void sgen_init_internal_allocator (void) { int i, size; for (i = 0; i < INTERNAL_MEM_MAX; ++i) fixed_type_allocator_indexes [i] = -1; for (i = 0; i < NUM_ALLOCATORS; ++i) { allocator_block_sizes [i] = block_size (allocator_sizes [i]); mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]); mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i], MONO_MEM_ACCOUNT_SGEN_INTERNAL); } for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) { int max_size = (LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2) & ~(SIZEOF_VOID_P - 1); /* * we assert that allocator_sizes contains the biggest possible object size * per block which has to be an aligned address. * (4K => 2040, 8k => 4088, 16k => 8184 on 64bits), * so that we do not get different block sizes for sizes that should go to the same one */ g_assert (allocator_sizes [index_for_size (max_size)] == max_size); g_assert (block_size (max_size) == size); if (size < LOCK_FREE_ALLOC_SB_MAX_SIZE) g_assert (block_size (max_size + 1) == size << 1); } }
static void descriptor_check_consistency (Descriptor *desc, gboolean print) { int count = desc->anchor.data.count; int max_count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (desc->block_size) / desc->slot_size; gboolean* linked = g_newa (gboolean, max_count); int i, last; unsigned int index; #ifndef DESC_AVAIL_DUMMY Descriptor *avail; for (avail = desc_avail; avail; avail = avail->next) g_assert_OR_PRINT (desc != avail, "descriptor is in the available list\n"); #endif g_assert_OR_PRINT (desc->slot_size == desc->heap->sc->slot_size, "slot size doesn't match size class\n"); if (print) g_print ("descriptor %p is ", desc); switch (desc->anchor.data.state) { case STATE_FULL: if (print) g_print ("full\n"); g_assert_OR_PRINT (count == 0, "count is not zero: %d\n", count); break; case STATE_PARTIAL: if (print) g_print ("partial\n"); g_assert_OR_PRINT (count < max_count, "count too high: is %d but must be below %d\n", count, max_count); break; case STATE_EMPTY: if (print) g_print ("empty\n"); g_assert_OR_PRINT (count == max_count, "count is wrong: is %d but should be %d\n", count, max_count); break; default: g_assert_OR_PRINT (FALSE, "invalid state\n"); } for (i = 0; i < max_count; ++i) linked [i] = FALSE; index = desc->anchor.data.avail; last = -1; for (i = 0; i < count; ++i) { gpointer addr = (char*)desc->sb + index * desc->slot_size; g_assert_OR_PRINT (index >= 0 && index < max_count, "index %d for %dth available slot, linked from %d, not in range [0 .. %d)\n", index, i, last, max_count); g_assert_OR_PRINT (!linked [index], "%dth available slot %d linked twice\n", i, index); if (linked [index]) break; linked [index] = TRUE; last = index; index = *(unsigned int*)addr; } }
void mono_lock_free_allocator_init_size_class (MonoLockFreeAllocSizeClass *sc, unsigned int slot_size, unsigned int block_size) { g_assert (block_size > 0); g_assert ((block_size & (block_size - 1)) == 0); /* check if power of 2 */ g_assert (slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size)); mono_lock_free_queue_init (&sc->partial); sc->slot_size = slot_size; sc->block_size = block_size; }
void mono_lock_free_free (gpointer ptr, size_t block_size) { Anchor old_anchor, new_anchor; Descriptor *desc; gpointer sb; MonoLockFreeAllocator *heap = NULL; desc = *(Descriptor**) sb_header_for_addr (ptr, block_size); g_assert (block_size == desc->block_size); sb = desc->sb; do { new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value; *(unsigned int*)ptr = old_anchor.data.avail; new_anchor.data.avail = ((char*)ptr - (char*)sb) / desc->slot_size; g_assert (new_anchor.data.avail < LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / desc->slot_size); if (old_anchor.data.state == STATE_FULL) new_anchor.data.state = STATE_PARTIAL; if (++new_anchor.data.count == desc->max_count) { heap = desc->heap; new_anchor.data.state = STATE_EMPTY; } } while (!set_anchor (desc, old_anchor, new_anchor)); if (new_anchor.data.state == STATE_EMPTY) { g_assert (old_anchor.data.state != STATE_EMPTY); if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, NULL, desc) == desc) { /* We own it, so we free it. */ desc_retire (desc); } else { /* * Somebody else must free it, so we do some * freeing for others. */ list_remove_empty_desc (heap->sc); } } else if (old_anchor.data.state == STATE_FULL) { /* * Nobody owned it, now we do, so we need to give it * back. */ g_assert (new_anchor.data.state == STATE_PARTIAL); if (InterlockedCompareExchangePointer ((gpointer * volatile)&desc->heap->active, desc, NULL) != NULL) heap_put_partial (desc); } }
static gpointer alloc_from_active_or_partial (MonoLockFreeAllocator *heap) { Descriptor *desc; Anchor old_anchor, new_anchor; gpointer addr; retry: desc = heap->active; if (desc) { if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, NULL, desc) != desc) goto retry; } else { desc = heap_get_partial (heap); if (!desc) return NULL; } /* Now we own the desc. */ do { unsigned int next; new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value; if (old_anchor.data.state == STATE_EMPTY) { /* We must free it because we own it. */ desc_retire (desc); goto retry; } g_assert (old_anchor.data.state == STATE_PARTIAL); g_assert (old_anchor.data.count > 0); addr = (char*)desc->sb + old_anchor.data.avail * desc->slot_size; mono_memory_read_barrier (); next = *(unsigned int*)addr; g_assert (next < LOCK_FREE_ALLOC_SB_USABLE_SIZE (desc->block_size) / desc->slot_size); new_anchor.data.avail = next; --new_anchor.data.count; if (new_anchor.data.count == 0) new_anchor.data.state = STATE_FULL; } while (!set_anchor (desc, old_anchor, new_anchor)); /* If the desc is partial we have to give it back. */ if (new_anchor.data.state == STATE_PARTIAL) { if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, desc, NULL) != NULL) heap_put_partial (desc); } return addr; }
static size_t block_size (size_t slot_size) { static int pagesize = -1; int size; if (pagesize == -1) pagesize = mono_pagesize (); for (size = pagesize; size < LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) { if (slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (size)) return size; } return LOCK_FREE_ALLOC_SB_MAX_SIZE; }
static size_t block_size (size_t slot_size) { static int pagesize = -1; int size; size_t aligned_slot_size = SGEN_ALIGN_UP_TO (slot_size, SIZEOF_VOID_P); if (pagesize == -1) pagesize = mono_pagesize (); for (size = pagesize; size < LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) { if (aligned_slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (size)) return size; } return LOCK_FREE_ALLOC_SB_MAX_SIZE; }
static gpointer alloc_from_new_sb (MonoLockFreeAllocator *heap) { unsigned int slot_size, block_size, count, i; Descriptor *desc = desc_alloc (heap->account_type); slot_size = desc->slot_size = heap->sc->slot_size; block_size = desc->block_size = heap->sc->block_size; count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / slot_size; desc->heap = heap; /* * Setting avail to 1 because 0 is the block we're allocating * right away. */ desc->anchor.data.avail = 1; desc->slot_size = heap->sc->slot_size; desc->max_count = count; desc->anchor.data.count = desc->max_count - 1; desc->anchor.data.state = STATE_PARTIAL; desc->sb = alloc_sb (desc); /* Organize blocks into linked list. */ for (i = 1; i < count - 1; ++i) *(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1; *(unsigned int*)((char*)desc->sb + (count - 1) * slot_size) = 0; mono_memory_write_barrier (); /* Make it active or free it again. */ if (mono_atomic_cas_ptr ((volatile gpointer *)&heap->active, desc, NULL) == NULL) { return desc->sb; } else { desc->anchor.data.state = STATE_EMPTY; desc_retire (desc); return NULL; } }
void sgen_init_internal_allocator (void) { int i, size; for (i = 0; i < INTERNAL_MEM_MAX; ++i) fixed_type_allocator_indexes [i] = -1; for (i = 0; i < NUM_ALLOCATORS; ++i) { allocator_block_sizes [i] = block_size (allocator_sizes [i]); mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]); mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i]); } for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) { int max_size = LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2; /* * we assert that allocator_sizes contains the biggest possible object size * per block (4K => 4080 / 2 = 2040, 8k => 8176 / 2 = 4088, 16k => 16368 / 2 = 8184 on 64bits), * so that we do not get different block sizes for sizes that should go to the same one */ g_assert (allocator_sizes [index_for_size (max_size)] == max_size); } }