Beispiel #1
0
region newsubregion(region parent)
{
  // fprintf(stderr, "## newsubregion\n");
  char *first;
  region r;

  first = (char *)alloc_single_page(NULL);
  preclear(first + PAGE_HEADER_SIZE, RPAGESIZE - PAGE_HEADER_SIZE);

#ifdef STAGGER_RSTART
  /* stagger regions across cache lines a bit */
  rstart += 64;
#if RPAGESIZE < 1024
#error RPAGESIZE must be at least 1024, or change the next if.
#endif
  if (rstart >= 16 * 64) rstart = 0;
#endif
  r = (region)(first + rstart + PAGE_HEADER_SIZE);
  VALGRIND_MAKE_WRITABLE(r, sizeof(*r));
  postclear(r, sizeof *r);
  initregion(r);

  if (parent)
    link_region(r, parent);

  // fprintf(stderr, "## create mempool %p\n", r);
  VALGRIND_CREATE_MEMPOOL(r, 0, 0);
  ++num_regions_active;
  ++num_regions_created;
  return r;
}
Beispiel #2
0
/*@-internalglobs@*/
rpmioPool rpmioNewPool(const char * name, size_t size, int limit, int flags,
		char * (*dbg) (void *item),
		void (*init) (void *item),
		void (*fini) (void *item))
	/*@*/
{
    rpmioPool pool = xcalloc(1, sizeof(*pool));
#if defined(WITH_VALGRIND)
    static int rzB = 0;		/* size of red-zones (if any) */
    static int is_zeroed = 0;	/* does pool return zero'd allocations? */
    rzB = rzB;			/* XXX CentOS5 valgrind doesn't use. */
    is_zeroed = is_zeroed;	/* XXX CentOS5 valgrind doesn't use. */
#endif
    VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed);
    pool->have = yarnNewLock(0);
    pool->pool = NULL;
    pool->head = NULL;
    pool->tail = &pool->head;
    pool->size = size;
    pool->limit = limit;
    pool->flags = flags;
    pool->dbg = (void *) dbg;
    pool->init = init;
    pool->fini = fini;
    pool->reused = 0;
    pool->made = 0;
    pool->name = name;
    pool->zlog = NULL;
    rpmlog(RPMLOG_DEBUG, D_("pool %s:\tcreated size %u limit %d flags %d\n"), pool->name, (unsigned)pool->size, pool->limit, pool->flags);
    return pool;
}
Beispiel #3
0
void MemoryPool::init(void* memory, size_t length)
{
    // hvlad: we not used placement new[] there as :
    // a) by standard placement new[] could add some (unknown!) overhead and use
    // part of allocated memory for own use. For example MSVC reserved first array
    // slot and stored items number in it returning advanced pointer. In our case
    // it results in that freeObjects != memory and AV when freeObjects's memory is
    // deallocated as freeObjects don't points to a parent pool anymore.
    // b) constructor of AtomicPointer does nothing except of zero'ing memory, plain
    // memset will do it much faster. destructor of AtomicPointer is empty and we
    // don't need to call it. This behavior is unlikely to be changed.
    //
    // While we can workaround (a) storing memory to release it correctly later,
    // we can't predict in portable way how much overhead is necessary to allocate
    // memory correctly.

    freeObjects = (FreeChainPtr*) memory;
    memset(freeObjects, 0, length * sizeof(void*));
    bigHunks = NULL;
    smallHunks = NULL;
    freeBlocks.nextLarger = freeBlocks.priorSmaller = &freeBlocks;
    junk.nextLarger = junk.priorSmaller = &junk;
    blocksAllocated = 0;
    blocksActive = 0;

#ifdef USE_VALGRIND
    delayedFreeCount = 0;
    delayedFreePos = 0;

    VALGRIND_CREATE_MEMPOOL(this, VALGRIND_REDZONE, 0);
#endif
}
/**
 * Clear for reuse, avoids re-allocation when an arena may
 * otherwise be free'd and recreated.
 */
void BLI_memarena_clear(MemArena *ma)
{
	if (ma->bufs) {
		unsigned char *curbuf_prev;
		size_t curbuf_used;

		if (ma->bufs->next) {
			BLI_linklist_freeN(ma->bufs->next);
			ma->bufs->next = NULL;
		}

		curbuf_prev = ma->curbuf;
		ma->curbuf = ma->bufs->link;
		memarena_curbuf_align(ma);

		/* restore to original size */
		curbuf_used = (size_t)(curbuf_prev - ma->curbuf);
		ma->cursize += curbuf_used;

		if (ma->use_calloc) {
			memset(ma->curbuf, 0, curbuf_used);
		}
	}

#ifdef WITH_MEM_VALGRIND
	VALGRIND_DESTROY_MEMPOOL(ma);
	VALGRIND_CREATE_MEMPOOL(ma, 0, false);
#endif

}
Beispiel #5
0
/*--------------------
 * MemoryContextCreate
 *		Context-type-independent part of context creation.
 *
 * This is only intended to be called by context-type-specific
 * context creation routines, not by the unwashed masses.
 *
 * The context creation procedure is a little bit tricky because
 * we want to be sure that we don't leave the context tree invalid
 * in case of failure (such as insufficient memory to allocate the
 * context node itself).  The procedure goes like this:
 *	1.  Context-type-specific routine first calls MemoryContextCreate(),
 *		passing the appropriate tag/size/methods values (the methods
 *		pointer will ordinarily point to statically allocated data).
 *		The parent and name parameters usually come from the caller.
 *	2.  MemoryContextCreate() attempts to allocate the context node,
 *		plus space for the name.  If this fails we can ereport() with no
 *		damage done.
 *	3.  We fill in all of the type-independent MemoryContext fields.
 *	4.  We call the type-specific init routine (using the methods pointer).
 *		The init routine is required to make the node minimally valid
 *		with zero chance of failure --- it can't allocate more memory,
 *		for example.
 *	5.  Now we have a minimally valid node that can behave correctly
 *		when told to reset or delete itself.  We link the node to its
 *		parent (if any), making the node part of the context tree.
 *	6.  We return to the context-type-specific routine, which finishes
 *		up type-specific initialization.  This routine can now do things
 *		that might fail (like allocate more memory), so long as it's
 *		sure the node is left in a state that delete will handle.
 *
 * This protocol doesn't prevent us from leaking memory if step 6 fails
 * during creation of a top-level context, since there's no parent link
 * in that case.  However, if you run out of memory while you're building
 * a top-level context, you might as well go home anyway...
 *
 * Normally, the context node and the name are allocated from
 * TopMemoryContext (NOT from the parent context, since the node must
 * survive resets of its parent context!).  However, this routine is itself
 * used to create TopMemoryContext!  If we see that TopMemoryContext is NULL,
 * we assume we are creating TopMemoryContext and use malloc() to allocate
 * the node.
 *
 * Note that the name field of a MemoryContext does not point to
 * separately-allocated storage, so it should not be freed at context
 * deletion.
 *--------------------
 */
MemoryContext
MemoryContextCreate(NodeTag tag, Size size,
					MemoryContextMethods *methods,
					MemoryContext parent,
					const char *name)
{
	MemoryContext node;
	Size		needed = size + strlen(name) + 1;

	/* creating new memory contexts is not allowed in a critical section */
	Assert(CritSectionCount == 0);

	/* Get space for node and name */
	if (TopMemoryContext != NULL)
	{
		/* Normal case: allocate the node in TopMemoryContext */
		node = (MemoryContext) MemoryContextAlloc(TopMemoryContext,
												  needed);
	}
	else
	{
		/* Special case for startup: use good ol' malloc */
		node = (MemoryContext) malloc(needed);
		Assert(node != NULL);
	}

	/* Initialize the node as best we can */
	MemSet(node, 0, size);
	node->type = tag;
	node->methods = methods;
	node->parent = NULL;		/* for the moment */
	node->firstchild = NULL;
	node->prevchild = NULL;
	node->nextchild = NULL;
	node->isReset = true;
	node->name = ((char *) node) + size;
	strcpy(node->name, name);

	/* Type-specific routine finishes any other essential initialization */
	(*node->methods->init) (node);

	/* OK to link node to parent (if any) */
	/* Could use MemoryContextSetParent here, but doesn't seem worthwhile */
	if (parent)
	{
		node->parent = parent;
		node->nextchild = parent->firstchild;
		if (parent->firstchild != NULL)
			parent->firstchild->prevchild = node;
		parent->firstchild = node;
		/* inherit allowInCritSection flag from parent */
		node->allowInCritSection = parent->allowInCritSection;
	}

	VALGRIND_CREATE_MEMPOOL(node, 0, false);

	/* Return to type-specific creation routine to finish up */
	return node;
}
Beispiel #6
0
static void trace_init()
{
  marker=1;
  VALGRIND_CREATE_MEMPOOL(&marker, REDZONE_SIZE, 1);
  if(getenv("ARENA_SZ")) {
    pos = ARENA_SIZE - atoi(getenv("ARENA_SZ"));
  }
}
Beispiel #7
0
    void FixedAlloc::CreateChunk(bool canFail)
    {
        // Allocate a new block
        m_numBlocks++;

        vmpi_spin_lock_t *lock = NULL;
        if(m_isFixedAllocSafe) {
            lock = &((FixedAllocSafe*)this)->m_spinlock;
            VMPI_lockRelease(lock);
        }

        FixedBlock* b = (FixedBlock*) m_heap->Alloc(1, GCHeap::kExpand | (canFail ? GCHeap::kCanFail : 0));
        VALGRIND_CREATE_MEMPOOL(b,  0/*redZoneSize*/, 0/*zeroed*/);

        // treat block header as allocation so reads write are okay
        VALGRIND_MEMPOOL_ALLOC(b, b, (char*)b->items - (char*)b);

        if(lock != NULL)
            VMPI_lockAcquire(lock);

        if(!b)
            return;

        b->numAlloc = 0;
        b->size = (uint16_t)m_itemSize;
        b->firstFree = 0;
        b->nextItem = b->items;
        b->alloc = this;

#ifdef GCDEBUG
        // Deleted and unused memory is poisoned, this is important for leak diagnostics.
        if (!RUNNING_ON_VALGRIND)
            VMPI_memset(b->items, uint8_t(GCHeap::FXFreedPoison), m_itemSize * m_itemsPerBlock);
#endif

        // Link the block at the end of the list.
        b->prev = m_lastBlock;
        b->next = 0;
        if (m_lastBlock)
            m_lastBlock->next = b;
        if (!m_firstBlock)
            m_firstBlock = b;
        m_lastBlock = b;

        // Add our new ChunkBlock to the firstFree list (which should
        // be empty but might not because we let go of the lock above)
        if (m_firstFree)
        {
            GCAssert(m_firstFree->prevFree == 0);
            m_firstFree->prevFree = b;
        }
        b->nextFree = m_firstFree;
        b->prevFree = 0;
        m_firstFree = b;

        return;
    }
Beispiel #8
0
		Nursery(size_t size, unsigned gen, GC *next)
		 : m_size(size),
		   m_free(size),
		   m_min_free(size/4),
		   m_data_blocks(0),
		   m_generation(gen),
		   m_next(next)
		{
			m_data = m_next_pos = (uint8_t*)malloc(size);
			VALGRIND_CREATE_MEMPOOL(m_data, 0, false)
			m_remembered_end = m_remembered_set = (Remembered*)(m_data + size);
		}
MemArena *BLI_memarena_new(const size_t bufsize, const char *name)
{
	MemArena *ma = MEM_callocN(sizeof(*ma), "memarena");
	ma->bufsize = bufsize;
	ma->align = 8;
	ma->name = name;

#ifdef WITH_MEM_VALGRIND
	VALGRIND_CREATE_MEMPOOL(ma, 0, false);
#endif

	return ma;
}
Beispiel #10
0
void slist_init(struct simple_list* slist, size_t key_size, size_t element_size, size_t initial_size) {
    slist->key   = NULL;
    slist->value = NULL;
    slist->size = 0;
    slist->capacity = 0;
    slist->key_size = key_size;
    slist->element_size = element_size;
    slist_alloc(slist, initial_size);

#ifndef NVALGRIND
    VALGRIND_CREATE_MEMPOOL(slist->value, 0, 0);
#endif
}
Beispiel #11
0
struct pool *
allocate_pool()
{
  struct pool *p = malloc(sizeof(struct pool));
  assert(p);
  p->allocated = 4096;
  p->used = 0;
  p->buf = malloc(p->allocated);
  assert(p->buf);
  memset(p->buf, 0, p->allocated);
  VALGRIND_CREATE_MEMPOOL(p, 0, 0);
  VALGRIND_MAKE_NOACCESS(p->buf, p->allocated);
  return p;
}
Beispiel #12
0
/*
 * MemoryContextResetOnly
 *		Release all space allocated within a context.
 *		Nothing is done to the context's descendant contexts.
 */
void
MemoryContextResetOnly(MemoryContext context)
{
	AssertArg(MemoryContextIsValid(context));

	/* Nothing to do if no pallocs since startup or last reset */
	if (!context->isReset)
	{
		MemoryContextCallResetCallbacks(context);
		(*context->methods->reset) (context);
		context->isReset = true;
		VALGRIND_DESTROY_MEMPOOL(context);
		VALGRIND_CREATE_MEMPOOL(context, 0, false);
	}
}
Beispiel #13
0
ContextMemoryManager::ContextMemoryManager() : d_indexChunkList(0) {
  // Create initial chunk
  d_chunkList.push_back((char*)malloc(chunkSizeBytes));
  d_nextFree = d_chunkList.back();
  if(d_nextFree == NULL) {
    throw std::bad_alloc();
  }
  d_endChunk = d_nextFree + chunkSizeBytes;

#ifdef CVC4_VALGRIND
  VALGRIND_CREATE_MEMPOOL(this, 0, false);
  VALGRIND_MAKE_MEM_NOACCESS(d_nextFree, chunkSizeBytes);
  d_allocations.push_back(std::vector<char*>());
#endif /* CVC4_VALGRIND */
}
static void*
pool_alloc (void)
{
	Pool *pool;
	void *pages, *item;
	size_t len, i;

	/* A pool with an available item */
	for (pool = all_pools; pool; pool = pool->next) {
		if (unused_peek (&pool->unused))
			break;
	}

	/* Create a new pool */
	if (pool == NULL) {
		len = getpagesize () * 2;
		pages = mmap (0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
		if (pages == MAP_FAILED)
			return NULL;

		/* Fill in the block header, and inlude in block list */
		pool = pages;
		pool->next = all_pools;
		all_pools = pool;
		pool->length = len;
		pool->used = 0;
		pool->unused = NULL;

		/* Fill block with unused items */
		pool->n_items = (len - sizeof (Pool)) / sizeof (Item);
		for (i = 0; i < pool->n_items; ++i)
			unused_push (&pool->unused, pool->items + i);

#ifdef WITH_VALGRIND
		VALGRIND_CREATE_MEMPOOL(pool, 0, 0);
#endif
	}

	++pool->used;
	ASSERT (unused_peek (&pool->unused));
	item = unused_pop (&pool->unused);

#ifdef WITH_VALGRIND
	VALGRIND_MEMPOOL_ALLOC (pool, item, sizeof (Item));
#endif

	return memset (item, 0, sizeof (Item));
}
Beispiel #15
0
void push(pool *p)
{
   level_list *l;

   if(USE_MMAP)
      l = (level_list *)mmap(0, sizeof(level_list),
                             PROT_READ|PROT_WRITE|PROT_EXEC,
                             MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
   else
      l = (level_list *)malloc(sizeof(level_list));

   l->next = p->levels;
   l->where = p->where;
   VALGRIND_CREATE_MEMPOOL(l->where, REDZONE_SIZE, 0);
   p->levels = l;
}
Beispiel #16
0
static void *
eina_one_big_init(const char *context,
                  EINA_UNUSED const char *option,
                  va_list args)
{
   One_Big *pool;
   int item_size;
   size_t length;

   length = context ? strlen(context) + 1 : 0;

   pool = calloc(1, sizeof (One_Big) + length);
   if (!pool)
      return NULL;

   item_size = va_arg(args, int);

   pool->item_size = eina_mempool_alignof(item_size);
   pool->max = va_arg(args, int);

   pool->offset_to_item_inlist = pool->item_size;
   if (pool->offset_to_item_inlist % (int)sizeof(void *) != 0)
     {
        pool->offset_to_item_inlist =
          (((pool->offset_to_item_inlist / (int)sizeof(void *)) + 1) *
           (int)sizeof(void *));
     }

   if (length)
     {
        pool->name = (const char *)(pool + 1);
        memcpy((char *)pool->name, context, length);
     }

#ifdef EINA_HAVE_DEBUG_THREADS
   pool->self = eina_thread_self();
#endif
   eina_lock_new(&pool->mutex);

#ifndef NVALGRIND
   VALGRIND_CREATE_MEMPOOL(pool, 0, 1);
#endif

   return pool;
}
Beispiel #17
0
/*
 * MemoryContextReset
 *		Release all space allocated within a context and its descendants,
 *		but don't delete the contexts themselves.
 *
 * The type-specific reset routine handles the context itself, but we
 * have to do the recursion for the children.
 */
void
MemoryContextReset(MemoryContext context)
{
	AssertArg(MemoryContextIsValid(context));

	/* save a function call in common case where there are no children */
	if (context->firstchild != NULL)
		MemoryContextResetChildren(context);

	/* Nothing to do if no pallocs since startup or last reset */
	if (!context->isReset)
	{
		(*context->methods->reset) (context);
		context->isReset = true;
		VALGRIND_DESTROY_MEMPOOL(context);
		VALGRIND_CREATE_MEMPOOL(context, 0, false);
	}
}
Beispiel #18
0
int psmi_sysbuf_init(void)
{
	int i;
	uint32_t block_sizes[] = { 256, 512, 1024,
		2048, 4096, 8192, (uint32_t) -1 };
	uint32_t replenishing_rate[] = { 128, 64, 32, 16, 8, 4, 0 };

	if (psmi_sysbuf.is_initialized)
		return PSM_OK;

	for (i = 0; i < MM_NUM_OF_POOLS; i++) {
		psmi_sysbuf.handler_index[i].block_size = block_sizes[i];
		psmi_sysbuf.handler_index[i].current_available = 0;
		psmi_sysbuf.handler_index[i].free_list = NULL;
		psmi_sysbuf.handler_index[i].total_alloc = 0;
		psmi_sysbuf.handler_index[i].replenishing_rate =
			replenishing_rate[i];

		if (block_sizes[i] == -1) {
			psmi_assert_always(replenishing_rate[i] == 0);
			psmi_sysbuf.handler_index[i].flags =
				MM_FLAG_TRANSIENT;
		} else {
			psmi_assert_always(replenishing_rate[i] > 0);
			psmi_sysbuf.handler_index[i].flags = MM_FLAG_NONE;
		}
	}

	VALGRIND_CREATE_MEMPOOL(&psmi_sysbuf, PSM_VALGRIND_REDZONE_SZ,
				PSM_VALGRIND_MEM_UNDEFINED);

	/* Hit once on each block size so we have a pool that's allocated */
	for (i = 0; i < MM_NUM_OF_POOLS; i++) {
		void *ptr;
		if (block_sizes[i] == -1)
			continue;
		ptr = psmi_sysbuf_alloc(block_sizes[i]);
		psmi_sysbuf_free(ptr);
	}

	return PSM_OK;
}
Beispiel #19
0
static void twdInitOnce(void *arg)
{
    epicsThreadId tid;

    tLock = epicsMutexMustCreate();
    mLock = epicsMutexMustCreate();
    fLock = epicsMutexMustCreate();
    ellInit(&fList);
    VALGRIND_CREATE_MEMPOOL(&fList, 0, 0);

    twdCtl = twdctlRun;
    loopEvent = epicsEventMustCreate(epicsEventEmpty);
    exitEvent = epicsEventMustCreate(epicsEventEmpty);

    tid = epicsThreadCreate("taskwd", epicsThreadPriorityLow,
         epicsThreadGetStackSize(epicsThreadStackSmall),
         twdTask, NULL);
    if (tid == 0)
        cantProceed("Failed to spawn task watchdog thread\n");

    epicsAtExit(twdShutdown, NULL);
}
Beispiel #20
0
ucs_status_t ucs_mpool_init(ucs_mpool_t *mp, size_t priv_size,
                            size_t elem_size, size_t align_offset, size_t alignment,
                            unsigned elems_per_chunk, unsigned max_elems,
                            ucs_mpool_ops_t *ops, const char *name)
{
    /* Check input values */
    if ((elem_size == 0) || (align_offset > elem_size) ||
        (alignment == 0) || !ucs_is_pow2(alignment) ||
        (elems_per_chunk == 0) || (max_elems < elems_per_chunk))
    {
        ucs_error("Invalid memory pool parameter(s)");
        return UCS_ERR_INVALID_PARAM;
    }

    mp->data = ucs_malloc(sizeof(*mp->data) + priv_size, "mpool_data");
    if (mp->data == NULL) {
        ucs_error("Failed to allocate memory pool slow-path area");
        return UCS_ERR_NO_MEMORY;
    }

    mp->freelist           = NULL;
    mp->data->elem_size    = sizeof(ucs_mpool_elem_t) + elem_size;
    mp->data->alignment    = alignment;
    mp->data->align_offset = sizeof(ucs_mpool_elem_t) + align_offset;
    mp->data->quota        = max_elems;
    mp->data->tail         = NULL;
    mp->data->chunk_size   = sizeof(ucs_mpool_chunk_t) + alignment +
                             elems_per_chunk * ucs_mpool_elem_total_size(mp->data);
    mp->data->chunks       = NULL;
    mp->data->ops          = ops;
    mp->data->name         = strdup(name);

    VALGRIND_CREATE_MEMPOOL(mp, 0, 0);

    ucs_debug("mpool %s: align %u, maxelems %u, elemsize %u",
              ucs_mpool_name(mp), mp->data->alignment, max_elems, mp->data->elem_size);
    return UCS_OK;
}
Beispiel #21
0
/* Creates the allocator data structure with bins. */
MVMFixedSizeAlloc * MVM_fixed_size_create(MVMThreadContext *tc) {
    int init_stat;
#ifdef MVM_VALGRIND_SUPPORT
    int bin_no;
#endif
    MVMFixedSizeAlloc *al = MVM_malloc(sizeof(MVMFixedSizeAlloc));
    al->size_classes = MVM_calloc(MVM_FSA_BINS, sizeof(MVMFixedSizeAllocSizeClass));
    if ((init_stat = uv_mutex_init(&(al->complex_alloc_mutex))) < 0)
        MVM_exception_throw_adhoc(tc, "Failed to initialize mutex: %s",
            uv_strerror(init_stat));
    al->freelist_spin = 0;
    al->free_at_next_safepoint_overflows = NULL;

    /* All other places where we use valgrind macros are very likely
     * thrown out by dead code elimination. Not 100% sure about this,
     * so we ifdef it out. */
#ifdef MVM_VALGRIND_SUPPORT
    for (bin_no = 0; bin_no < MVM_FSA_BINS; bin_no++)
        VALGRIND_CREATE_MEMPOOL(&al->size_classes[bin_no], MVM_FSA_REDZONE_BYTES, 0);
#endif

    return al;
}
Beispiel #22
0
void valgrindCreateMempool(MM_GCExtensionsBase *extensions, MM_EnvironmentBase *env, uintptr_t poolAddr)
{
    //1 lets valgrind know that objects will be defined when allocated
    VALGRIND_CREATE_MEMPOOL(poolAddr, 0, 1);
    extensions->valgrindMempoolAddr = poolAddr;

    MUTEX_INIT(extensions->memcheckHashTableMutex);
    MUTEX_ENTER(extensions->memcheckHashTableMutex);
    const char *tableName = "MemcheckWrapper";
    uint32_t entrySize = sizeof(uintptr_t);

    extensions->memcheckHashTable = hashTableNew(env->getPortLibrary(),
                                                 tableName,
                                                 0,
                                                 entrySize,
                                                 0,
                                                 0,
                                                 OMRMEM_CATEGORY_VM,
                                                 hashFn,
                                                 hashEqualFn,
                                                 0,
                                                 0);
    MUTEX_EXIT(extensions->memcheckHashTableMutex);
}
Beispiel #23
0
/*
 ** ArenaAllocate() -- allocate space from an arena pool
 **
 ** Description: ArenaAllocate() allocates space from an arena
 ** pool.
 **
 ** First try to satisfy the request from arenas starting at
 ** pool->current.
 **
 ** If there is not enough space in the arena pool->current, try
 ** to claim an arena, on a first fit basis, from the global
 ** freelist (arena_freelist).
 **
 ** If no arena in arena_freelist is suitable, then try to
 ** allocate a new arena from the heap.
 **
 ** Returns: pointer to allocated space or NULL
 **
 */
void *ArenaAllocate(ArenaPool *pool, unsigned int nb)
{
    Arena *a;
    char *rp;     /* returned pointer */

#ifdef DEBUG_ARENA_MALLOC
    assert((nb & pool->mask) == 0);
#endif

    nb = (uword)ARENA_ALIGN(pool, nb); /* force alignment */

    /* attempt to allocate from arenas at pool->current */
    {
        a = pool->current;
        do {
            if (a->avail + nb <= a->limit)  {
                pool->current = a;
                rp = (char *)a->avail;
                a->avail += nb;
                VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);
                return rp;
            }
        } while (NULL != (a = a->next));
    }

    /* attempt to allocate from arena_freelist */
    {
        Arena *p; /* previous pointer, for unlinking from freelist */

        for (a = p = arena_freelist; a != NULL; p = a, a = a->next) {
            if (a->base + nb <= a->limit)  {
                if (p == arena_freelist) {
                    arena_freelist = a->next;
                } else {
                    p->next = a->next;
                }
                a->avail = a->base;
                rp = (char *)a->avail;
                a->avail += nb;
                VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);
                /* the newly allocated arena is linked after pool->current
                 *  and becomes pool->current */
                a->next = pool->current->next;
                pool->current->next = a;
                pool->current = a;
                if (0 == pool->first.next) {
                    pool->first.next = a;
                }
                freelist_count--;
                return (rp);
            }
        }
    }

    /* attempt to allocate from the heap */
    {
        unsigned int sz;
#if HAVE_MMAP
        if (pool->cumul > pool->largealloc) {
            // High memory pressure. Switch to a fractional allocation strategy
            // so that malloc gets a chance to successfully trim us down when it's over.
            sz = qMin(pool->cumul / 12, MAX_DISCRETE_ALLOCATION(pool));
#ifdef DEBUG_ARENA_MALLOC
            printf("allocating %d bytes (fractional strategy)\n", sz);
#endif
        } else
#endif
            sz = pool->arenasize > nb ? pool->arenasize : nb;
        sz += sizeof * a + pool->mask; /* header and alignment slop */
        pool->cumul += sz;
#ifdef DEBUG_ARENA_MALLOC
        i++;
        printf("Malloc: %d\n", i);
#endif
        a = (Arena *)malloc(sz);
        if (a)  {
            a->limit = (uword)a + sz;
            a->base = a->avail = (uword)ARENA_ALIGN(pool, a + 1);
            VALGRIND_CREATE_MEMPOOL(a->base, 0, 0);
            rp = (char *)a->avail;
            a->avail += nb;
            VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);

            /* the newly allocated arena is linked after pool->current
            *  and becomes pool->current */
            a->next = pool->current->next;
            pool->current->next = a;
            pool->current = a;
            if (!pool->first.next) {
                pool->first.next = a;
            }
            return (rp);
        }
    }

    /* we got to here, and there's no memory to allocate */
    return (0);
} /* --- end ArenaAllocate() --- */
Beispiel #24
0
    void* GCLargeAlloc::Alloc(size_t requestSize, int flags)
#endif
    {
#ifdef DEBUG
        m_gc->heap->CheckForOOMAbortAllocation();
#endif
        GCHeap::CheckForAllocSizeOverflow(requestSize, sizeof(LargeBlock)+GCHeap::kBlockSize);

        int blocks = (int)((requestSize+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize);
        uint32_t computedSize = blocks*GCHeap::kBlockSize - sizeof(LargeBlock);

        // Allocation must be signalled before we allocate because no GC work must be allowed to
        // come between an allocation and an initialization - if it does, we may crash, as
        // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
        // having it.  In principle we could signal allocation late but only set the object
        // flags after signaling, but we might still cause trouble for the profiler, which also
        // depends on non-interruptibility.

        m_gc->SignalAllocWork(computedSize);

        // Pointer containing memory is always zeroed (see bug 594533).
        if((flags&GC::kContainsPointers) != 0)
            flags |= GC::kZero;

        LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, PageMap::kGCLargeAllocPageFirst,
                                                           (flags&GC::kZero) != 0, (flags&GC::kCanFail) != 0);
        void *item = NULL;

        if (block)
        {
            // Code below uses these optimizations
            GCAssert((unsigned long)GC::kFinalize == (unsigned long)kFinalizable);
            GCAssert((unsigned long)GC::kInternalExact == (unsigned long)kVirtualGCTrace);
            
            gcbits_t flagbits0 = 0;
            gcbits_t flagbits1 = 0;

#if defined VMCFG_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|GC::kInternalExact));
#elif defined VMCFG_SELECTABLE_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|m_gc->runtimeSelectableExactnessFlag));  // 0 or GC::kInternalExact
#else
            flagbits0 = (flags & GC::kFinalize);
#endif

            VALGRIND_CREATE_MEMPOOL(block, /*rdzone*/0, (flags&GC::kZero) != 0);
            VALGRIND_MEMPOOL_ALLOC(block, block, sizeof(LargeBlock));

            block->gc = this->m_gc;
            block->alloc= this;
            block->next = m_blocks;
            block->size = computedSize;
            block->bibopTag = 0;
#ifdef MMGC_FASTBITS
            block->bitsShift = 12;     // Always use bits[0]
#endif
            block->containsPointers = ((flags&GC::kContainsPointers) != 0) ? 1 : 0;
            block->rcobject = ((flags&GC::kRCObject) != 0) ? 1 : 0;
            block->bits = block->flags;
            m_blocks = block;

            item = block->GetObject();

            if(m_gc->collecting && !m_startedFinalize)
                flagbits0 |= kMark;

            block->flags[0] = flagbits0;
            block->flags[1] = flagbits1;
#ifdef _DEBUG
            (void)originalSize;
            if (flags & GC::kZero)
            {
                if (!RUNNING_ON_VALGRIND)
                {
                    // AllocBlock should take care of this
                    for(int i=0, n=(int)(requestSize/sizeof(int)); i<n; i++) {
                        if(((int*)item)[i] != 0)
                            GCAssert(false);
                    }
                }
            }
#endif

            // see comments in GCAlloc about using full size instead of ask size
            VALGRIND_MEMPOOL_ALLOC(block, item, computedSize);

#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
            if(heap->HooksEnabled()) {
                size_t userSize = block->size - DebugSize();
#ifdef MMGC_MEMORY_PROFILER
                m_totalAskSize += originalSize;
                heap->AllocHook(GetUserPointer(item), originalSize, userSize, /*managed=*/true);
#else
                heap->AllocHook(GetUserPointer(item), 0, userSize, /*managed=*/true);
#endif
            }
#endif
        }
        return item;
    }
Beispiel #25
0
    GCAlloc::GCBlock* GCAlloc::CreateChunk(int flags)
    {
        // Too many definitions of kBlockSize, make sure they're at least in sync.

        GCAssert(uint32_t(kBlockSize) == GCHeap::kBlockSize);

        // Get bitmap space; this may trigger OOM handling.

        gcbits_t* bits = m_bitsInPage ? NULL : (gcbits_t*)m_gc->AllocBits(m_numBitmapBytes, m_sizeClassIndex);

        // Allocate a new block; this may trigger OOM handling (though that
        // won't affect the bitmap space, which is not GC'd individually).

        GCBlock* b = (GCBlock*) m_gc->AllocBlock(1, PageMap::kGCAllocPage, /*zero*/true,  (flags&GC::kCanFail) != 0);

        if (b)
        {
            VALGRIND_CREATE_MEMPOOL(b, 0/*redZoneSize*/, 1/*zeroed*/);

            // treat block header as a separate allocation
            VALGRIND_MEMPOOL_ALLOC(b, b, sizeof(GCBlock));


            b->gc = m_gc;
            b->alloc = this;
            b->size = m_itemSize;
            b->slowFlags = 0;
            if(m_gc->collecting && m_finalized)
                b->finalizeState = m_gc->finalizedValue;
            else
                b->finalizeState = !m_gc->finalizedValue;

            b->bibopTag = m_bibopTag;

#ifdef MMGC_FASTBITS
            b->bitsShift = (uint8_t) m_bitsShift;
#endif
            b->containsPointers = ContainsPointers();
            b->rcobject = ContainsRCObjects();

            if (m_bitsInPage)
                b->bits = (gcbits_t*)b + sizeof(GCBlock);
            else
                b->bits = bits;

            // ditto for in page bits
            if (m_bitsInPage) {
                VALGRIND_MEMPOOL_ALLOC(b, b->bits, m_numBitmapBytes);
            }

            // Link the block at the end of the list
            b->prev = m_lastBlock;
            b->next = 0;

            if (m_lastBlock) {
                m_lastBlock->next = b;
            }
            if (!m_firstBlock) {
                m_firstBlock = b;
            }
            m_lastBlock = b;

            // Add our new ChunkBlock to the firstFree list (which should be empty)
            if (m_firstFree)
            {
                GCAssert(m_firstFree->prevFree == 0);
                m_firstFree->prevFree = b;
            }
            b->nextFree = m_firstFree;
            b->prevFree = 0;
            m_firstFree = b;

            // calculate back from end (better alignment, no dead space at end)
            b->items = (char*)b+GCHeap::kBlockSize - m_itemsPerBlock * m_itemSize;
            b->numFree = (short)m_itemsPerBlock;

            // explode the new block onto its free list
            //
            // We must make the object look free, which means poisoning it properly and setting
            // the mark bits correctly.

            b->firstFree = b->items;
            void** p = (void**)(void*)b->items;
            int limit = m_itemsPerBlock-1;
#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
#endif
            for ( int i=0 ; i < limit ; i++ ) {
#ifdef MMGC_HOOKS
#ifdef MMGC_MEMORY_INFO // DebugSize is 0 if MEMORY_INFO is off, so we get an "obviously true" warning from GCC.
                GCAssert(m_itemSize >= DebugSize());
#endif
                if(heap->HooksEnabled())
                    heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
#endif
                p = FLSeed(p, (char*)p + m_itemSize);
            }
#ifdef MMGC_HOOKS
            if(heap->HooksEnabled())
                heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
#endif
            p[0] = NULL;

            // Set all the mark bits to 'free'
            
            GCAssert(sizeof(gcbits_t) == 1);
            GCAssert(kFreelist == 3);
            GCAssert(m_numBitmapBytes % 4 == 0);
            
            uint32_t *pbits = (uint32_t*)(void *)b->bits;
            for(int i=0, n=m_numBitmapBytes>>2; i < n; i++)
                pbits[i] = 0x03030303;

#ifdef MMGC_MEMORY_INFO
            VerifyFreeBlockIntegrity(b->firstFree, m_itemSize);
#endif
        }
        else {
            if (bits)