/** * mono_mempool_alloc0: * * same as mono_mempool_alloc, but fills memory with zero. */ gpointer mono_mempool_alloc0 (MonoMemPool *pool, guint size) { gpointer rval; #ifdef MALLOC_ALLOCATION rval = mono_mempool_alloc (pool, size); #else size = (size + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1); rval = pool->pos; pool->pos = (guint8*)rval + size; if (G_UNLIKELY (pool->pos >= pool->end)) { rval = mono_mempool_alloc (pool, size); } #ifdef TRACE_ALLOCATIONS else if (pool == mono_get_corlib ()->mempool) { mono_backtrace (size); } #endif #endif memset (rval, 0, size); return rval; }
/** * mono_mempool_alloc: * \param pool the memory pool to use * \param size size of the memory block * * Allocates a new block of memory in \p pool . * * \returns the address of a newly allocated memory block. */ gpointer (mono_mempool_alloc) (MonoMemPool *pool, guint size) { gpointer rval = pool->pos; // Return value // Normal case: Just bump up pos pointer and we are done size = ALIGN_SIZE (size); pool->pos = (guint8*)rval + size; #ifdef TRACE_ALLOCATIONS if (pool == mono_get_corlib ()->mempool) { mono_backtrace (size); } #endif // If we have just overflowed the current block, we need to back up and try again. if (G_UNLIKELY (pool->pos >= pool->end)) { pool->pos -= size; // Back out // For large objects, allocate the object into its own block. // (In individual allocation mode, the constant will be 0 and this path will always be taken) if (size >= MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE) { guint new_size = SIZEOF_MEM_POOL + size; MonoMemPool *np = (MonoMemPool *)g_malloc (new_size); np->next = pool->next; np->size = new_size; pool->next = np; pool->d.allocated += new_size; UnlockedAdd64 (&total_bytes_allocated, new_size); rval = (guint8*)np + SIZEOF_MEM_POOL; } else { // Notice: any unused memory at the end of the old head becomes simply abandoned in this case until the mempool is freed (see Bugzilla #35136) guint new_size = get_next_size (pool, size); MonoMemPool *np = (MonoMemPool *)g_malloc (new_size); np->next = pool->next; np->size = new_size; pool->next = np; pool->pos = (guint8*)np + SIZEOF_MEM_POOL; pool->end = (guint8*)np + new_size; pool->d.allocated += new_size; UnlockedAdd64 (&total_bytes_allocated, new_size); rval = pool->pos; pool->pos += size; } } return rval; }
static void add_record (RecordType record_kind, RuntimeLocks kind, gpointer lock) { int i = 0; const int no_frames = 6; gpointer frames[no_frames]; char *msg; if (!trace_file) return; memset (frames, 0, sizeof (gpointer) * no_frames); mono_backtrace (frames, no_frames); for (i = 0; i < no_frames; ++i) frames [i] = (gpointer)((size_t)frames[i] - base_address); /*We only dump 5 frames, which should be more than enough to most analysis.*/ msg = g_strdup_printf ("%x,%d,%d,%p,%p,%p,%p,%p,%p\n", (guint32)mono_native_thread_id_get (), record_kind, kind, lock, frames [1], frames [2], frames [3], frames [4], frames [5]); fwrite (msg, strlen (msg), 1, trace_file); fflush (trace_file); g_free (msg); }
/** * mono_mempool_alloc0: * * same as \c mono_mempool_alloc, but fills memory with zero. */ gpointer (mono_mempool_alloc0) (MonoMemPool *pool, guint size) { gpointer rval; // For the fast path, repeat the first few lines of mono_mempool_alloc size = ALIGN_SIZE (size); rval = pool->pos; pool->pos = (guint8*)rval + size; // If that doesn't work fall back on mono_mempool_alloc to handle new chunk allocation if (G_UNLIKELY (pool->pos >= pool->end)) { rval = mono_mempool_alloc (pool, size); } #ifdef TRACE_ALLOCATIONS else if (pool == mono_get_corlib ()->mempool) { mono_backtrace (size); } #endif memset (rval, 0, size); return rval; }
/** * mono_mempool_alloc: * @pool: the momory pool to use * @size: size of the momory block * * Allocates a new block of memory in @pool. * * Returns: the address of a newly allocated memory block. */ gpointer mono_mempool_alloc (MonoMemPool *pool, guint size) { gpointer rval; size = (size + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1); #ifdef MALLOC_ALLOCATION { Chunk *c = g_malloc (size); c->next = pool->chunks; pool->chunks = c; c->size = size - sizeof(Chunk); pool->allocated += size; rval = ((guint8*)c) + sizeof (Chunk); } #else rval = pool->pos; pool->pos = (guint8*)rval + size; #ifdef TRACE_ALLOCATIONS if (pool == mono_get_corlib ()->mempool) { mono_backtrace (size); } #endif if (G_UNLIKELY (pool->pos >= pool->end)) { pool->pos -= size; if (size >= 4096) { MonoMemPool *np = g_malloc (sizeof (MonoMemPool) + size); np->next = pool->next; pool->next = np; np->pos = (guint8*)np + sizeof (MonoMemPool); np->size = sizeof (MonoMemPool) + size; np->end = np->pos + np->size - sizeof (MonoMemPool); pool->d.allocated += sizeof (MonoMemPool) + size; total_bytes_allocated += sizeof (MonoMemPool) + size; return (guint8*)np + sizeof (MonoMemPool); } else { int new_size = get_next_size (pool, size); MonoMemPool *np = g_malloc (new_size); np->next = pool->next; pool->next = np; pool->pos = (guint8*)np + sizeof (MonoMemPool); np->pos = (guint8*)np + sizeof (MonoMemPool); np->size = new_size; np->end = np->pos; pool->end = pool->pos + new_size - sizeof (MonoMemPool); pool->d.allocated += new_size; total_bytes_allocated += new_size; rval = pool->pos; pool->pos += size; } } #endif return rval; }