void sgen_init_internal_allocator (void) { int i, size; for (i = 0; i < INTERNAL_MEM_MAX; ++i) fixed_type_allocator_indexes [i] = -1; for (i = 0; i < NUM_ALLOCATORS; ++i) { allocator_block_sizes [i] = block_size (allocator_sizes [i]); mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]); mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i], MONO_MEM_ACCOUNT_SGEN_INTERNAL); } for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) { int max_size = (LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2) & ~(SIZEOF_VOID_P - 1); /* * we assert that allocator_sizes contains the biggest possible object size * per block which has to be an aligned address. * (4K => 2040, 8k => 4088, 16k => 8184 on 64bits), * so that we do not get different block sizes for sizes that should go to the same one */ g_assert (allocator_sizes [index_for_size (max_size)] == max_size); g_assert (block_size (max_size) == size); if (size < LOCK_FREE_ALLOC_SB_MAX_SIZE) g_assert (block_size (max_size + 1) == size << 1); } }
void* sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure) { int index; void *p; if (size > allocator_sizes [NUM_ALLOCATORS - 1]) { p = sgen_alloc_os_memory (size, (SgenAllocFlags)(SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE), NULL, MONO_MEM_ACCOUNT_SGEN_INTERNAL); if (!p) sgen_assert_memory_alloc (NULL, size, description_for_type (type)); } else { index = index_for_size (size); #ifdef HEAVY_STATISTICS ++ allocator_sizes_stats [index]; #endif p = mono_lock_free_alloc (&allocators [index]); if (!p) sgen_assert_memory_alloc (NULL, size, description_for_type (type)); memset (p, 0, size); } SGEN_ASSERT (0, !(((mword)p) & (sizeof(gpointer) - 1)), "Why do we allocate unaligned addresses ?"); return p; }
void sgen_register_fixed_internal_mem_type (int type, size_t size) { int slot; g_assert (type >= 0 && type < INTERNAL_MEM_MAX); slot = index_for_size (size); g_assert (slot >= 0); if (fixed_type_allocator_indexes [type] == -1) fixed_type_allocator_indexes [type] = slot; else g_assert (fixed_type_allocator_indexes [type] == slot); }
void* sgen_alloc_internal_dynamic (size_t size, int type) { int index; void *p; if (size > allocator_sizes [NUM_ALLOCATORS - 1]) return sgen_alloc_os_memory (size, TRUE); index = index_for_size (size); p = mono_lock_free_alloc (&allocators [index]); memset (p, 0, size); return p; }
void sgen_free_internal_dynamic (void *addr, size_t size, int type) { int index; if (!addr) return; if (size > allocator_sizes [NUM_ALLOCATORS - 1]) return sgen_free_os_memory (addr, size); index = index_for_size (size); mono_lock_free_free (addr); }
void sgen_register_fixed_internal_mem_type (int type, size_t size) { int slot; g_assert (type >= 0 && type < INTERNAL_MEM_MAX); g_assert (size <= allocator_sizes [NUM_ALLOCATORS - 1]); slot = index_for_size (size); g_assert (slot >= 0); if (fixed_type_allocator_indexes [type] == -1) fixed_type_allocator_indexes [type] = slot; else { if (fixed_type_allocator_indexes [type] != slot) g_error ("Invalid double registration of type %d old slot %d new slot %d", type, fixed_type_allocator_indexes [type], slot); } }
void* sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure) { int index; void *p; if (size > allocator_sizes [NUM_ALLOCATORS - 1]) { p = sgen_alloc_os_memory (size, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, NULL); if (!p) sgen_assert_memory_alloc (NULL, description_for_type (type)); return p; } index = index_for_size (size); p = mono_lock_free_alloc (&allocators [index]); if (!p) sgen_assert_memory_alloc (NULL, description_for_type (type)); memset (p, 0, size); return p; }
void sgen_init_internal_allocator (void) { int i, size; for (i = 0; i < INTERNAL_MEM_MAX; ++i) fixed_type_allocator_indexes [i] = -1; for (i = 0; i < NUM_ALLOCATORS; ++i) { allocator_block_sizes [i] = block_size (allocator_sizes [i]); mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]); mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i]); } for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) { int max_size = LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2; /* * we assert that allocator_sizes contains the biggest possible object size * per block (4K => 4080 / 2 = 2040, 8k => 8176 / 2 = 4088, 16k => 16368 / 2 = 8184 on 64bits), * so that we do not get different block sizes for sizes that should go to the same one */ g_assert (allocator_sizes [index_for_size (max_size)] == max_size); } }
void* sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure) { int index; void *p; if (size > allocator_sizes [NUM_ALLOCATORS - 1]) { p = sgen_alloc_os_memory (size, (SgenAllocFlags)(SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE), NULL); if (!p) sgen_assert_memory_alloc (NULL, size, description_for_type (type)); } else { index = index_for_size (size); #ifdef HEAVY_STATISTICS ++ allocator_sizes_stats [index]; #endif p = mono_lock_free_alloc (&allocators [index]); if (!p) sgen_assert_memory_alloc (NULL, size, description_for_type (type)); memset (p, 0, size); } return p; }