static void free_sb (gpointer sb, size_t block_size) { gpointer sb_header = sb_header_for_addr (sb, block_size); g_assert ((char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE == sb); mono_vfree (sb_header, block_size); //g_print ("free sb %p\n", sb_header); }
/* * Free the memory returned by sgen_alloc_os_memory (), returning it to the OS. */ void sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags) { g_assert (!(flags & ~SGEN_ALLOC_HEAP)); mono_vfree (addr, size); SGEN_ATOMIC_ADD_P (total_alloc, -(gssize)size); total_alloc_max = MAX (total_alloc_max, total_alloc); }
void* mono_valloc_aligned (size_t size, size_t alignment, int flags) { /* Allocate twice the memory to be able to put the block on an aligned address */ char *mem = mono_valloc (NULL, size + alignment, flags); char *aligned; if (!mem) return NULL; aligned = aligned_address (mem, size, alignment); if (aligned > mem) mono_vfree (mem, aligned - mem); if (aligned + size < mem + size + alignment) mono_vfree (aligned + size, (mem + size + alignment) - (aligned + size)); return aligned; }
/* * Free the memory returned by sgen_alloc_os_memory (), returning it to the OS. */ void sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags) { g_assert (!(flags & ~SGEN_ALLOC_HEAP)); mono_vfree (addr, size); SGEN_ATOMIC_ADD_P (total_alloc, -size); if (flags & SGEN_ALLOC_HEAP) MONO_GC_HEAP_FREE ((mword)addr, size); }
static Descriptor* desc_alloc (MonoMemAccountType type) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); Descriptor *desc; for (;;) { gboolean success; desc = (Descriptor *) mono_get_hazardous_pointer ((volatile gpointer *)&desc_avail, hp, 1); if (desc) { Descriptor *next = desc->next; success = (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, next, desc) == desc); } else { size_t desc_size = sizeof (Descriptor); Descriptor *d; int i; desc = (Descriptor *) mono_valloc (NULL, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE), type); g_assertf (desc, "Failed to allocate memory for the lock free allocator"); /* Organize into linked list. */ d = desc; for (i = 0; i < NUM_DESC_BATCH; ++i) { Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size)); d->next = next; mono_lock_free_queue_node_init (&d->node, TRUE); d = next; } mono_memory_write_barrier (); success = (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, desc->next, NULL) == NULL); if (!success) mono_vfree (desc, desc_size * NUM_DESC_BATCH, type); } mono_hazard_pointer_clear (hp, 1); if (success) break; } g_assert (!desc->in_use); desc->in_use = TRUE; return desc; }
static void codechunk_vfree (void *ptr, guint32 size) { GSList *freelist; mono_os_mutex_lock (&valloc_mutex); freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); if (!freelist || g_slist_length (freelist) < VALLOC_FREELIST_SIZE) { freelist = g_slist_prepend (freelist, ptr); g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist); } else { mono_vfree (ptr, size, MONO_MEM_ACCOUNT_CODE); } mono_os_mutex_unlock (&valloc_mutex); }
static void codechunk_vfree (void *ptr, guint32 size) { GSList *freelist; EnterCriticalSection (&valloc_mutex); freelist = g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); if (!freelist || g_slist_length (freelist) < VALLOC_FREELIST_SIZE) { freelist = g_slist_prepend (freelist, ptr); g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist); } else { mono_vfree (ptr, size); } LeaveCriticalSection (&valloc_mutex); }
static Descriptor* desc_alloc (void) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); Descriptor *desc; for (;;) { gboolean success; desc = (Descriptor *) get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1); if (desc) { Descriptor *next = desc->next; success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, next, desc) == desc); } else { size_t desc_size = sizeof (Descriptor); Descriptor *d; int i; desc = (Descriptor *) mono_valloc (0, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE)); /* Organize into linked list. */ d = desc; for (i = 0; i < NUM_DESC_BATCH; ++i) { Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size)); d->next = next; mono_lock_free_queue_node_init (&d->node, TRUE); d = next; } mono_memory_write_barrier (); success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc->next, NULL) == NULL); if (!success) mono_vfree (desc, desc_size * NUM_DESC_BATCH); } mono_hazard_pointer_clear (hp, 1); if (success) break; } g_assert (!desc->in_use); desc->in_use = TRUE; return desc; }
static void codechunk_cleanup (void) { GHashTableIter iter; gpointer key, value; if (!valloc_freelists) return; g_hash_table_iter_init (&iter, valloc_freelists); while (g_hash_table_iter_next (&iter, &key, &value)) { GSList *freelist = (GSList *) value; GSList *l; for (l = freelist; l; l = l->next) { mono_vfree (l->data, GPOINTER_TO_UINT (key), MONO_MEM_ACCOUNT_CODE); } g_slist_free (freelist); } g_hash_table_destroy (valloc_freelists); }
static CodeChunk* new_codechunk (CodeChunk *last, int dynamic, int size) { int minsize, flags = CODE_FLAG_MMAP; int chunk_size, bsize = 0; int pagesize, valloc_granule; CodeChunk *chunk; void *ptr; #ifdef FORCE_MALLOC flags = CODE_FLAG_MALLOC; #endif pagesize = mono_pagesize (); valloc_granule = mono_valloc_granule (); if (dynamic) { chunk_size = size; flags = CODE_FLAG_MALLOC; } else { minsize = MAX (pagesize * MIN_PAGES, valloc_granule); if (size < minsize) chunk_size = minsize; else { /* Allocate MIN_ALIGN-1 more than we need so we can still */ /* guarantee MIN_ALIGN alignment for individual allocs */ /* from mono_code_manager_reserve_align. */ size += MIN_ALIGN - 1; size &= ~(MIN_ALIGN - 1); chunk_size = size; chunk_size += valloc_granule - 1; chunk_size &= ~ (valloc_granule - 1); } } #ifdef BIND_ROOM if (dynamic) /* Reserve more space since there are no other chunks we might use if this one gets full */ bsize = (chunk_size * 2) / BIND_ROOM; else bsize = chunk_size / BIND_ROOM; if (bsize < MIN_BSIZE) bsize = MIN_BSIZE; bsize += MIN_ALIGN -1; bsize &= ~ (MIN_ALIGN - 1); if (chunk_size - size < bsize) { chunk_size = size + bsize; if (!dynamic) { chunk_size += valloc_granule - 1; chunk_size &= ~ (valloc_granule - 1); } } #endif if (flags == CODE_FLAG_MALLOC) { ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1); if (!ptr) return NULL; } else { /* Try to allocate code chunks next to each other to help the VM */ ptr = NULL; if (last) ptr = codechunk_valloc ((guint8*)last->data + last->size, chunk_size); if (!ptr) ptr = codechunk_valloc (NULL, chunk_size); if (!ptr) return NULL; } if (flags == CODE_FLAG_MALLOC) { #ifdef BIND_ROOM /* Make sure the thunks area is zeroed */ memset (ptr, 0, bsize); #endif } chunk = (CodeChunk *) g_malloc (sizeof (CodeChunk)); if (!chunk) { if (flags == CODE_FLAG_MALLOC) dlfree (ptr); else mono_vfree (ptr, chunk_size, MONO_MEM_ACCOUNT_CODE); return NULL; } chunk->next = NULL; chunk->size = chunk_size; chunk->data = (char *) ptr; chunk->flags = flags; chunk->pos = bsize; chunk->bsize = bsize; if (code_manager_callbacks.chunk_new) code_manager_callbacks.chunk_new ((gpointer)chunk->data, chunk->size); mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size); code_memory_used += chunk_size; mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE, code_memory_used); /*printf ("code chunk at: %p\n", ptr);*/ return chunk; }
static void mono_sgen_free_os_memory (void *addr, size_t size) { mono_vfree (addr, size); }
/* * Free the memory returned by sgen_alloc_os_memory (), returning it to the OS. */ void sgen_free_os_memory (void *addr, size_t size) { mono_vfree (addr, size); SGEN_ATOMIC_ADD_P (total_alloc, -size); }