示例#1
0
/*
 * MemoryContextAllocExtended
 *		Allocate space within the specified context using the given flags.
 */
void *
MemoryContextAllocExtended(MemoryContext context, Size size, int flags)
{
	void	   *ret;

	AssertArg(MemoryContextIsValid(context));
	AssertNotInCriticalSection(context);

	if (((flags & MCXT_ALLOC_HUGE) != 0 && !AllocHugeSizeIsValid(size)) ||
		((flags & MCXT_ALLOC_HUGE) == 0 && !AllocSizeIsValid(size)))
		elog(ERROR, "invalid memory alloc request size %zu", size);

	context->isReset = false;

	ret = (*context->methods->alloc) (context, size);
	if (ret == NULL)
	{
		if ((flags & MCXT_ALLOC_NO_OOM) == 0)
		{
			MemoryContextStats(TopMemoryContext);
			ereport(ERROR,
					(errcode(ERRCODE_OUT_OF_MEMORY),
					 errmsg("out of memory"),
					 errdetail("Failed on request of size %zu.", size)));
		}
		return NULL;
	}

	VALGRIND_MEMPOOL_ALLOC(context, ret, size);

	if ((flags & MCXT_ALLOC_ZERO) != 0)
		MemSetAligned(ret, 0, size);

	return ret;
}
void *
palloc0(Size size)
{
	/* duplicates MemoryContextAllocZero to avoid increased overhead */
	void	   *ret;

	AssertArg(MemoryContextIsValid(CurrentMemoryContext));
	AssertNotInCriticalSection(CurrentMemoryContext);

	if (!AllocSizeIsValid(size))
		elog(ERROR, "invalid memory alloc request size %zu", size);

	CurrentMemoryContext->isReset = false;

	ret = (*CurrentMemoryContext->methods->alloc) (CurrentMemoryContext, size);
	if (ret == NULL)
	{
		MemoryContextStats(TopMemoryContext);
		ereport(ERROR,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of memory"),
				 errdetail("Failed on request of size %zu.", size)));
	}

	VALGRIND_MEMPOOL_ALLOC(CurrentMemoryContext, ret, size);

	MemSetAligned(ret, 0, size);

	return ret;
}
示例#3
0
文件: mcxt.c 项目: ysd001/pgpool2
/*
 * MemoryContextAllocHuge
 *		Allocate (possibly-expansive) space within the specified context.
 *
 * See considerations in comment at MaxAllocHugeSize.
 */
void *
MemoryContextAllocHuge(MemoryContext context, Size size)
{
	void	   *ret;

	AssertArg(MemoryContextIsValid(context));
	AssertNotInCriticalSection(context);

	if (!AllocHugeSizeIsValid(size))
		elog(ERROR, "invalid memory alloc request size %zu", size);

	context->isReset = false;

	ret = (*context->methods->alloc) (context, size);
	if (ret == NULL)
	{
		MemoryContextStats(TopMemoryContext);
		ereport(ERROR,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of memory"),
				 errdetail("Failed on request of size %zu.", size)));
	}

	VALGRIND_MEMPOOL_ALLOC(context, ret, size);

	return ret;
}
void *BLI_memarena_alloc(MemArena *ma, size_t size)
{
	void *ptr;

	/* ensure proper alignment by rounding
	 * size up to multiple of 8 */
	size = PADUP(size, ma->align);

	if (UNLIKELY(size > ma->cursize)) {
		if (size > ma->bufsize - (ma->align - 1)) {
			ma->cursize = PADUP(size + 1, ma->align);
		}
		else {
			ma->cursize = ma->bufsize;
		}

		ma->curbuf = (ma->use_calloc ? MEM_callocN : MEM_mallocN)(ma->cursize, ma->name);
		BLI_linklist_prepend(&ma->bufs, ma->curbuf);
		memarena_curbuf_align(ma);
	}

	ptr = ma->curbuf;
	ma->curbuf += size;
	ma->cursize -= size;

#ifdef WITH_MEM_VALGRIND
	VALGRIND_MEMPOOL_ALLOC(ma, ptr, size);
#endif

	return ptr;
}
示例#5
0
文件: taskwd.c 项目: ukaea/epics
static void freeNode(union twdNode *pn)
{
    VALGRIND_MEMPOOL_FREE(&fList, pn);
    VALGRIND_MEMPOOL_ALLOC(&fList, pn, sizeof(ELLNODE));
    epicsMutexMustLock(fLock);
    ellAdd(&fList, (void *)pn);
    epicsMutexUnlock(fLock);
}
示例#6
0
char *allocate(pool *p, int size)
{
   char *where;
   p->left -= size + (REDZONE_SIZE*2);
   where = p->where + REDZONE_SIZE;
   p->where += size + (REDZONE_SIZE*2);
   VALGRIND_MEMPOOL_ALLOC(p->levels->where, where, size);
   return where;
}
示例#7
0
void * 
allocate_from_pool(struct pool *p, size_t n)
{
  void *a = p->buf + p->used;
  assert(p->used + n < p->allocated);
  VALGRIND_MEMPOOL_ALLOC(p, a, n);
  p->used += n;
  return a;
}
示例#8
0
    void FixedAlloc::CreateChunk(bool canFail)
    {
        // Allocate a new block
        m_numBlocks++;

        vmpi_spin_lock_t *lock = NULL;
        if(m_isFixedAllocSafe) {
            lock = &((FixedAllocSafe*)this)->m_spinlock;
            VMPI_lockRelease(lock);
        }

        FixedBlock* b = (FixedBlock*) m_heap->Alloc(1, GCHeap::kExpand | (canFail ? GCHeap::kCanFail : 0));
        VALGRIND_CREATE_MEMPOOL(b,  0/*redZoneSize*/, 0/*zeroed*/);

        // treat block header as allocation so reads write are okay
        VALGRIND_MEMPOOL_ALLOC(b, b, (char*)b->items - (char*)b);

        if(lock != NULL)
            VMPI_lockAcquire(lock);

        if(!b)
            return;

        b->numAlloc = 0;
        b->size = (uint16_t)m_itemSize;
        b->firstFree = 0;
        b->nextItem = b->items;
        b->alloc = this;

#ifdef GCDEBUG
        // Deleted and unused memory is poisoned, this is important for leak diagnostics.
        if (!RUNNING_ON_VALGRIND)
            VMPI_memset(b->items, uint8_t(GCHeap::FXFreedPoison), m_itemSize * m_itemsPerBlock);
#endif

        // Link the block at the end of the list.
        b->prev = m_lastBlock;
        b->next = 0;
        if (m_lastBlock)
            m_lastBlock->next = b;
        if (!m_firstBlock)
            m_firstBlock = b;
        m_lastBlock = b;

        // Add our new ChunkBlock to the firstFree list (which should
        // be empty but might not because we let go of the lock above)
        if (m_firstFree)
        {
            GCAssert(m_firstFree->prevFree == 0);
            m_firstFree->prevFree = b;
        }
        b->nextFree = m_firstFree;
        b->prevFree = 0;
        m_firstFree = b;

        return;
    }
示例#9
0
void *malloc(size_t s) {
  if(!marker) trace_init();
  if(s>0x7fffffff) return 0;
  size_t *p = real_malloc(s+sizeof(s));
  if(!p) return p;
  *p++ = s;
  void *result = p;
  VALGRIND_MEMPOOL_ALLOC(&marker, result, s);
  return result;
}
示例#10
0
/*@-internalglobs@*/
rpmioItem rpmioGetPool(rpmioPool pool, size_t size)
{
    rpmioItem item;

    if (pool != NULL) {
	/* if can't create any more, wait for a space to show up */
	yarnPossess(pool->have);
	if (pool->limit == 0)
	    yarnWaitFor(pool->have, NOT_TO_BE, 0);

	/* if a space is available, pull it from the list and return it */
	if (pool->head != NULL) {
	    item = pool->head;
	    pool->head = item->pool;	/* XXX pool == next */
	    if (pool->head == NULL)
		pool->tail = &pool->head;
	    pool->reused++;
	    item->pool = pool;		/* remember the pool this belongs to */
	    yarnTwist(pool->have, BY, -1);      /* one less in pool */
	    VALGRIND_MEMPOOL_ALLOC(pool,
		item + 1,
		size - sizeof(struct rpmioItem_s));
	    return item;
	}

	/* nothing available, don't want to wait, make a new item */
assert(pool->limit != 0);
	if (pool->limit > 0)
	    pool->limit--;
	pool->made++;
	yarnRelease(pool->have);
    }

    item = xcalloc(1, size);
    item->use = yarnNewLock(0);		/* XXX newref? */
    item->pool = pool;
    VALGRIND_MEMPOOL_ALLOC(pool,
	item + 1,
	size - sizeof(struct rpmioItem_s));
    return item;
}
示例#11
0
void valgrindMempoolAlloc(MM_GCExtensionsBase *extensions, uintptr_t baseAddress, uintptr_t size)
{
#if defined(VALGRIND_REQUEST_LOGS)
    VALGRIND_PRINTF_BACKTRACE("Allocating an object at 0x%lx of size %lu\n", baseAddress, size);
#endif /* defined(VALGRIND_REQUEST_LOGS) */

    /* Allocate object in Valgrind memory pool. */
    VALGRIND_MEMPOOL_ALLOC(extensions->valgrindMempoolAddr, baseAddress, size);
    MUTEX_ENTER(extensions->memcheckHashTableMutex);
    hashTableAdd(extensions->memcheckHashTable, &baseAddress);
    MUTEX_EXIT(extensions->memcheckHashTableMutex);
}
示例#12
0
inline char *__rc_rstralloc(region r, size_t size)
{
  void *mem, *dummy;

  record_alloc(size);

  qalloc(r, &r->normal, &dummy, 0, 1, &mem, size, RALIGNMENT, 0);
  // fprintf(stderr, "## __rc_rstralloc: r=%p, mem=%p, size=%d\n", r, mem, size);
  // VALGRIND_DO_QUICK_LEAK_CHECK;
  VALGRIND_MEMPOOL_ALLOC(r, mem, size);

  return mem;
}
示例#13
0
static void*
pool_alloc (void)
{
	Pool *pool;
	void *pages, *item;
	size_t len, i;

	/* A pool with an available item */
	for (pool = all_pools; pool; pool = pool->next) {
		if (unused_peek (&pool->unused))
			break;
	}

	/* Create a new pool */
	if (pool == NULL) {
		len = getpagesize () * 2;
		pages = mmap (0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
		if (pages == MAP_FAILED)
			return NULL;

		/* Fill in the block header, and inlude in block list */
		pool = pages;
		pool->next = all_pools;
		all_pools = pool;
		pool->length = len;
		pool->used = 0;
		pool->unused = NULL;

		/* Fill block with unused items */
		pool->n_items = (len - sizeof (Pool)) / sizeof (Item);
		for (i = 0; i < pool->n_items; ++i)
			unused_push (&pool->unused, pool->items + i);

#ifdef WITH_VALGRIND
		VALGRIND_CREATE_MEMPOOL(pool, 0, 0);
#endif
	}

	++pool->used;
	ASSERT (unused_peek (&pool->unused));
	item = unused_pop (&pool->unused);

#ifdef WITH_VALGRIND
	VALGRIND_MEMPOOL_ALLOC (pool, item, sizeof (Item));
#endif

	return memset (item, 0, sizeof (Item));
}
示例#14
0
文件: taskwd.c 项目: ukaea/epics
static union twdNode *newNode(void)
{
    union twdNode *pn;

    epicsMutexMustLock(fLock);
    pn = (union twdNode *)ellGet(&fList);
    if (pn) {
        VALGRIND_MEMPOOL_FREE(&fList, pn);
    }
    epicsMutexUnlock(fLock);
    if (!pn)
        pn = calloc(1, sizeof(union twdNode));
    if (pn)
        VALGRIND_MEMPOOL_ALLOC(&fList, pn, sizeof(*pn));
    return pn;
}
示例#15
0
文件: slist.c 项目: DPMI/libcap_utils
void* slist_put(struct simple_list* slist, void* key) {
    if ( slist->size == slist->capacity ) {
        slist_alloc(slist, /* growth = */ slist->capacity);
    }

    unsigned int index = slist->size;
    slist->key[index] = key;
    slist->size++;
    void* ptr = slist_get(slist, index);

#ifndef NVALGRIND
    VALGRIND_MEMPOOL_ALLOC(slist->value, ptr, slist->element_size);
#endif

    return ptr;
}
示例#16
0
void *__rc_ralloc_small0(region r, size_t size)
{
  char *mem2;

  mem2 = PALIGN(r->normal.page.allocfrom, RALIGNMENT);
  if (mem2 + size >= r->normal.page.end)
    return __rc_typed_ralloc(r, size, 0);

  record_alloc(size);

  r->normal.page.allocfrom = mem2 + size + REDZONE;
  // VALGRIND_DO_QUICK_LEAK_CHECK;
  VALGRIND_MEMPOOL_ALLOC(r, mem2, size);
  postclear(mem2, size);

  return mem2;
}
示例#17
0
文件: mpool.c 项目: bbenton/ucx
void ucs_mpool_cleanup(ucs_mpool_t *mp, int leak_check)
{
    ucs_mpool_chunk_t *chunk, *next_chunk;
    ucs_mpool_elem_t *elem, *next_elem;
    ucs_mpool_data_t *data = mp->data;
    void *obj;

    /* Cleanup all elements in the freelist and set their header to NULL to mark
     * them as released for the leak check.
     */
    next_elem = mp->freelist;
    while (next_elem != NULL) {
        elem = next_elem;
        VALGRIND_MAKE_MEM_DEFINED(elem, sizeof *elem);
        next_elem = elem->next;
        if (data->ops->obj_cleanup != NULL) {
            obj = elem + 1;
            VALGRIND_MEMPOOL_ALLOC(mp, obj, mp->data->elem_size - sizeof(ucs_mpool_elem_t));
            VALGRIND_MAKE_MEM_DEFINED(obj, mp->data->elem_size - sizeof(ucs_mpool_elem_t));
            data->ops->obj_cleanup(mp, obj);
            VALGRIND_MEMPOOL_FREE(mp, obj);
        }
        elem->mpool = NULL;
    }

    /*
     * Go over all elements in the chunks and make sure they were on the freelist.
     * Then, release the chunk.
     */
    next_chunk = data->chunks;
    while (next_chunk != NULL) {
        chunk      = next_chunk;
        next_chunk = chunk->next;

        if (leak_check) {
            ucs_mpool_chunk_leak_check(mp, chunk);
        }
        data->ops->chunk_release(mp, chunk);
    }

    VALGRIND_DESTROY_MEMPOOL(mp);
    ucs_debug("mpool %s destroyed", ucs_mpool_name(mp));

    free(data->name);
    ucs_free(data);
}
示例#18
0
void valgrindResizeObject(MM_GCExtensionsBase *extensions, uintptr_t baseAddress, uintptr_t oldSize, uintptr_t newSize)
{

#if defined(VALGRIND_REQUEST_LOGS)
    VALGRIND_PRINTF_BACKTRACE("Resizing an object at 0x%lx from size %d to %d\n", baseAddress, (int)oldSize, (int)newSize);
#endif /* defined(VALGRIND_REQUEST_LOGS) */

    /* We could have used VALGRIND_MEMPOOL_CHANGE request to let Valgrind know of moved object
    but it is very slow without an internal hack. (https://bugs.kde.org/show_bug.cgi?id=366817)*/
    // VALGRIND_CHECK_MEM_IS_DEFINED(baseAddress, oldSize);

    /* Valgrind already knows former size of object allocated at baseAddress. So it will 
    mark the area from baseAddress to oldSize-1 noaccess on a free request as desired*/
    VALGRIND_MEMPOOL_FREE(extensions->valgrindMempoolAddr, baseAddress);

    /* And we don't need to remove and add same address in extensions->_allocatedObjects */
    VALGRIND_MEMPOOL_ALLOC(extensions->valgrindMempoolAddr, baseAddress, newSize);
}
示例#19
0
/*
 * MemoryContextAlloc
 *		Allocate space within the specified context.
 *
 * This could be turned into a macro, but we'd have to import
 * nodes/memnodes.h into postgres.h which seems a bad idea.
 */
void *
MemoryContextAlloc(MemoryContext context, Size size)
{
	void	   *ret;

	AssertArg(MemoryContextIsValid(context));
	AssertNotInCriticalSection(context);

	if (!AllocSizeIsValid(size))
		elog(ERROR, "invalid memory alloc request size %zu", size);

	context->isReset = false;

	ret = (*context->methods->alloc) (context, size);
	VALGRIND_MEMPOOL_ALLOC(context, ret, size);

	return ret;
}
示例#20
0
/*
 * MemoryContextAllocHuge
 *		Allocate (possibly-expansive) space within the specified context.
 *
 * See considerations in comment at MaxAllocHugeSize.
 */
void *
MemoryContextAllocHuge(MemoryContext context, Size size)
{
	void	   *ret;

	AssertArg(MemoryContextIsValid(context));

	if (!AllocHugeSizeIsValid(size))
		elog(ERROR, "invalid memory alloc request size %lu",
			 (unsigned long) size);

	context->isReset = false;

	ret = (*context->methods->alloc) (context, size);
	VALGRIND_MEMPOOL_ALLOC(context, ret, size);

	return ret;
}
示例#21
0
		inline void *raw_alloc(size_t object_size, ValueType type, bool pinned)
		{
			size_t data_size   = sizeof(Data) + object_size;
			if (!pinned && data_size < m_free)
			{
				Data *data = (Data*)m_next_pos;
				VALGRIND_MEMPOOL_ALLOC(m_data, m_next_pos, data_size);
				m_next_pos += data_size;
				m_free -= data_size;
				m_data_blocks++;
				data->init(object_size, type, m_generation);
				TRACE_PRINTF(TRACE_ALLOC, TRACE_SPAM,
					"Nursery%u Alloc %u type %x ... got %p ... alloc return %p\n",
					m_generation, (unsigned)object_size, type, data, data->m_data);
				return data->m_data;
			} else {
				return m_next->raw_alloc(object_size, type, pinned);
			}
		}
示例#22
0
		void write_ptr(void *obj, tField &field, const tVal &val)
		{
			assert(sizeof(val) == sizeof(uintptr_t));
			if ((!obj || generation_of(obj) > m_generation) && is_generation(val, m_generation))
			{
				// TODO: What to do when this happens? Maybe it should be a deque anyways.
				if (m_free < sizeof(Remembered))
					throw std::runtime_error("No free space for remembered set! PANIC!");

				TRACE_PRINTF(TRACE_GC, TRACE_DEBUG, "Write barrier (%u), adding: %p(%s) <- %p(%s)\n", m_generation, &field, obj && is_generation(&obj, m_generation)? "yes":"no", *(void**)&val, is_generation(val, m_generation)? "yes":"no");

				m_free -= sizeof(Remembered);
				Remembered *r = --m_remembered_set;
				VALGRIND_MEMPOOL_ALLOC(m_data, r, sizeof(Remembered));
				r->in_object = obj;
				r->location = (uintptr_t*)&field;
				r->val = *(uintptr_t*)&val;
			}
		}
示例#23
0
void *
palloc(Size size)
{
	/* duplicates MemoryContextAlloc to avoid increased overhead */
	void	   *ret;

	AssertArg(MemoryContextIsValid(CurrentMemoryContext));

	if (!AllocSizeIsValid(size))
		elog(ERROR, "invalid memory alloc request size %lu",
			 (unsigned long) size);

	CurrentMemoryContext->isReset = false;

	ret = (*CurrentMemoryContext->methods->alloc) (CurrentMemoryContext, size);
	VALGRIND_MEMPOOL_ALLOC(CurrentMemoryContext, ret, size);

	return ret;
}
示例#24
0
void *
palloc0(Size size)
{
	/* duplicates MemoryContextAllocZero to avoid increased overhead */
	void	   *ret;

	AssertArg(MemoryContextIsValid(CurrentMemoryContext));
	AssertNotInCriticalSection(CurrentMemoryContext);

	if (!AllocSizeIsValid(size))
		elog(ERROR, "invalid memory alloc request size %zu", size);

	CurrentMemoryContext->isReset = false;

	ret = (*CurrentMemoryContext->methods->alloc) (CurrentMemoryContext, size);
	VALGRIND_MEMPOOL_ALLOC(CurrentMemoryContext, ret, size);

	MemSetAligned(ret, 0, size);

	return ret;
}
示例#25
0
void* ContextMemoryManager::newData(size_t size) {
  // Use next available free location in current chunk
  void* res = (void*)d_nextFree;
  d_nextFree += size;
  // Check if the request is too big for the chunk
  if(d_nextFree > d_endChunk) {
    newChunk();
    res = (void*)d_nextFree;
    d_nextFree += size;
    AlwaysAssert(d_nextFree <= d_endChunk,
                 "Request is bigger than memory chunk size");
  }
  Debug("context") << "ContextMemoryManager::newData(" << size
                   << ") returning " << res << " at level "
                   << d_chunkList.size() << std::endl;

#ifdef CVC4_VALGRIND
  VALGRIND_MEMPOOL_ALLOC(this, static_cast<char*>(res), size);
  d_allocations.back().push_back(static_cast<char*>(res));
#endif /* CVC4_VALGRIND */

  return res;
}
示例#26
0
inline static
char *internal_rstrextend(region r, const char *old, size_t newsize,
			  int needsclear)
{
  /* For now we don't attempt to extend the old storage area */
  void *newmem, *hdr;
  unsigned long *oldhdr, oldsize;

  record_alloc(newsize);

  qalloc(r, &r->normal, &hdr, sizeof(unsigned long), ALIGNMENT_LONG,
	 &newmem, newsize, RALIGNMENT, 0);

  VALGRIND_MEMPOOL_ALLOC(r, newmem, newsize);

  /* If we don't do this we can't find the header: */
  hdr = (char *)newmem - sizeof(unsigned long);

  *(unsigned long *)hdr = newsize;

  if (old)
    {
      oldhdr = (unsigned long *)(old - ALIGNMENT_LONG);
      oldsize = *oldhdr;

      if (oldsize > newsize)
	oldsize = newsize;
      else if (needsclear)
	clear((char *)newmem + oldsize, newsize - oldsize);
      memcpy(newmem, old, oldsize);
    }
  else if (needsclear)
    clear(newmem, newsize);

  return newmem;
}
示例#27
0
void *psmi_sysbuf_alloc(uint32_t alloc_size)
{
	struct psmi_mem_ctrl *mm_handler = psmi_sysbuf.handler_index;
	struct psmi_mem_block_ctrl *new_block;
	int replenishing;

	while (mm_handler->block_size < alloc_size)
		mm_handler++;

	replenishing = mm_handler->replenishing_rate;

	if (mm_handler->current_available == 0) { /* allocate more buffers */
		if (mm_handler->flags & MM_FLAG_TRANSIENT) {
			uint32_t newsz = alloc_size +
				sizeof(struct psmi_mem_block_ctrl) +
				PSM_VALGRIND_REDZONE_SZ;
			new_block = psmi_malloc(PSMI_EP_NONE,
					UNEXPECTED_BUFFERS, newsz);

			if (new_block) {
				new_block->mem_handler = mm_handler;
				new_block++;
				mm_handler->total_alloc++;
				psmi_sysbuf.mem_ctrl_total_bytes += newsz;
				VALGRIND_MEMPOOL_ALLOC(&psmi_sysbuf, new_block,
						       alloc_size);
			}
			return new_block;
		}

		do {
			uint32_t newsz =
			    mm_handler->block_size +
			    sizeof(struct psmi_mem_block_ctrl) +
			    PSM_VALGRIND_REDZONE_SZ;

			new_block = psmi_malloc(PSMI_EP_NONE,
					UNEXPECTED_BUFFERS, newsz);
			psmi_sysbuf.mem_ctrl_total_bytes += newsz;

			if (new_block) {
				mm_handler->current_available++;
				mm_handler->total_alloc++;

				new_block->next = mm_handler->free_list;
				mm_handler->free_list = new_block;
			}

		} while (--replenishing && new_block);
	}

	if (mm_handler->current_available) {
		mm_handler->current_available--;

		new_block = mm_handler->free_list;
		mm_handler->free_list = new_block->next;

		new_block->mem_handler = mm_handler;
		new_block++;

		VALGRIND_MEMPOOL_ALLOC(&psmi_sysbuf, new_block,
				mm_handler->block_size);
		return new_block;
	}

	return NULL;
}
示例#28
0
文件: arena.cpp 项目: KDE/khtml
/*
 ** ArenaAllocate() -- allocate space from an arena pool
 **
 ** Description: ArenaAllocate() allocates space from an arena
 ** pool.
 **
 ** First try to satisfy the request from arenas starting at
 ** pool->current.
 **
 ** If there is not enough space in the arena pool->current, try
 ** to claim an arena, on a first fit basis, from the global
 ** freelist (arena_freelist).
 **
 ** If no arena in arena_freelist is suitable, then try to
 ** allocate a new arena from the heap.
 **
 ** Returns: pointer to allocated space or NULL
 **
 */
void *ArenaAllocate(ArenaPool *pool, unsigned int nb)
{
    Arena *a;
    char *rp;     /* returned pointer */

#ifdef DEBUG_ARENA_MALLOC
    assert((nb & pool->mask) == 0);
#endif

    nb = (uword)ARENA_ALIGN(pool, nb); /* force alignment */

    /* attempt to allocate from arenas at pool->current */
    {
        a = pool->current;
        do {
            if (a->avail + nb <= a->limit)  {
                pool->current = a;
                rp = (char *)a->avail;
                a->avail += nb;
                VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);
                return rp;
            }
        } while (NULL != (a = a->next));
    }

    /* attempt to allocate from arena_freelist */
    {
        Arena *p; /* previous pointer, for unlinking from freelist */

        for (a = p = arena_freelist; a != NULL; p = a, a = a->next) {
            if (a->base + nb <= a->limit)  {
                if (p == arena_freelist) {
                    arena_freelist = a->next;
                } else {
                    p->next = a->next;
                }
                a->avail = a->base;
                rp = (char *)a->avail;
                a->avail += nb;
                VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);
                /* the newly allocated arena is linked after pool->current
                 *  and becomes pool->current */
                a->next = pool->current->next;
                pool->current->next = a;
                pool->current = a;
                if (0 == pool->first.next) {
                    pool->first.next = a;
                }
                freelist_count--;
                return (rp);
            }
        }
    }

    /* attempt to allocate from the heap */
    {
        unsigned int sz;
#if HAVE_MMAP
        if (pool->cumul > pool->largealloc) {
            // High memory pressure. Switch to a fractional allocation strategy
            // so that malloc gets a chance to successfully trim us down when it's over.
            sz = qMin(pool->cumul / 12, MAX_DISCRETE_ALLOCATION(pool));
#ifdef DEBUG_ARENA_MALLOC
            printf("allocating %d bytes (fractional strategy)\n", sz);
#endif
        } else
#endif
            sz = pool->arenasize > nb ? pool->arenasize : nb;
        sz += sizeof * a + pool->mask; /* header and alignment slop */
        pool->cumul += sz;
#ifdef DEBUG_ARENA_MALLOC
        i++;
        printf("Malloc: %d\n", i);
#endif
        a = (Arena *)malloc(sz);
        if (a)  {
            a->limit = (uword)a + sz;
            a->base = a->avail = (uword)ARENA_ALIGN(pool, a + 1);
            VALGRIND_CREATE_MEMPOOL(a->base, 0, 0);
            rp = (char *)a->avail;
            a->avail += nb;
            VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);

            /* the newly allocated arena is linked after pool->current
            *  and becomes pool->current */
            a->next = pool->current->next;
            pool->current->next = a;
            pool->current = a;
            if (!pool->first.next) {
                pool->first.next = a;
            }
            return (rp);
        }
    }

    /* we got to here, and there's no memory to allocate */
    return (0);
} /* --- end ArenaAllocate() --- */
示例#29
0
文件: GCAlloc.cpp 项目: AdiKo/avmplus
    GCAlloc::GCBlock* GCAlloc::CreateChunk(int flags)
    {
        // Too many definitions of kBlockSize, make sure they're at least in sync.

        GCAssert(uint32_t(kBlockSize) == GCHeap::kBlockSize);

        // Get bitmap space; this may trigger OOM handling.

        gcbits_t* bits = m_bitsInPage ? NULL : (gcbits_t*)m_gc->AllocBits(m_numBitmapBytes, m_sizeClassIndex);

        // Allocate a new block; this may trigger OOM handling (though that
        // won't affect the bitmap space, which is not GC'd individually).

        GCBlock* b = (GCBlock*) m_gc->AllocBlock(1, PageMap::kGCAllocPage, /*zero*/true,  (flags&GC::kCanFail) != 0);

        if (b)
        {
            VALGRIND_CREATE_MEMPOOL(b, 0/*redZoneSize*/, 1/*zeroed*/);

            // treat block header as a separate allocation
            VALGRIND_MEMPOOL_ALLOC(b, b, sizeof(GCBlock));


            b->gc = m_gc;
            b->alloc = this;
            b->size = m_itemSize;
            b->slowFlags = 0;
            if(m_gc->collecting && m_finalized)
                b->finalizeState = m_gc->finalizedValue;
            else
                b->finalizeState = !m_gc->finalizedValue;

            b->bibopTag = m_bibopTag;

#ifdef MMGC_FASTBITS
            b->bitsShift = (uint8_t) m_bitsShift;
#endif
            b->containsPointers = ContainsPointers();
            b->rcobject = ContainsRCObjects();

            if (m_bitsInPage)
                b->bits = (gcbits_t*)b + sizeof(GCBlock);
            else
                b->bits = bits;

            // ditto for in page bits
            if (m_bitsInPage) {
                VALGRIND_MEMPOOL_ALLOC(b, b->bits, m_numBitmapBytes);
            }

            // Link the block at the end of the list
            b->prev = m_lastBlock;
            b->next = 0;

            if (m_lastBlock) {
                m_lastBlock->next = b;
            }
            if (!m_firstBlock) {
                m_firstBlock = b;
            }
            m_lastBlock = b;

            // Add our new ChunkBlock to the firstFree list (which should be empty)
            if (m_firstFree)
            {
                GCAssert(m_firstFree->prevFree == 0);
                m_firstFree->prevFree = b;
            }
            b->nextFree = m_firstFree;
            b->prevFree = 0;
            m_firstFree = b;

            // calculate back from end (better alignment, no dead space at end)
            b->items = (char*)b+GCHeap::kBlockSize - m_itemsPerBlock * m_itemSize;
            b->numFree = (short)m_itemsPerBlock;

            // explode the new block onto its free list
            //
            // We must make the object look free, which means poisoning it properly and setting
            // the mark bits correctly.

            b->firstFree = b->items;
            void** p = (void**)(void*)b->items;
            int limit = m_itemsPerBlock-1;
#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
#endif
            for ( int i=0 ; i < limit ; i++ ) {
#ifdef MMGC_HOOKS
#ifdef MMGC_MEMORY_INFO // DebugSize is 0 if MEMORY_INFO is off, so we get an "obviously true" warning from GCC.
                GCAssert(m_itemSize >= DebugSize());
#endif
                if(heap->HooksEnabled())
                    heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
#endif
                p = FLSeed(p, (char*)p + m_itemSize);
            }
#ifdef MMGC_HOOKS
            if(heap->HooksEnabled())
                heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
#endif
            p[0] = NULL;

            // Set all the mark bits to 'free'
            
            GCAssert(sizeof(gcbits_t) == 1);
            GCAssert(kFreelist == 3);
            GCAssert(m_numBitmapBytes % 4 == 0);
            
            uint32_t *pbits = (uint32_t*)(void *)b->bits;
            for(int i=0, n=m_numBitmapBytes>>2; i < n; i++)
                pbits[i] = 0x03030303;

#ifdef MMGC_MEMORY_INFO
            VerifyFreeBlockIntegrity(b->firstFree, m_itemSize);
#endif
        }
        else {
            if (bits)
示例#30
0
文件: GCLargeAlloc.cpp 项目: bsdf/trx
    void* GCLargeAlloc::Alloc(size_t requestSize, int flags)
#endif
    {
#ifdef DEBUG
        m_gc->heap->CheckForOOMAbortAllocation();
#endif
        GCHeap::CheckForAllocSizeOverflow(requestSize, sizeof(LargeBlock)+GCHeap::kBlockSize);

        int blocks = (int)((requestSize+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize);
        uint32_t computedSize = blocks*GCHeap::kBlockSize - sizeof(LargeBlock);

        // Allocation must be signalled before we allocate because no GC work must be allowed to
        // come between an allocation and an initialization - if it does, we may crash, as
        // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
        // having it.  In principle we could signal allocation late but only set the object
        // flags after signaling, but we might still cause trouble for the profiler, which also
        // depends on non-interruptibility.

        m_gc->SignalAllocWork(computedSize);

        // Pointer containing memory is always zeroed (see bug 594533).
        if((flags&GC::kContainsPointers) != 0)
            flags |= GC::kZero;

        LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, PageMap::kGCLargeAllocPageFirst,
                                                           (flags&GC::kZero) != 0, (flags&GC::kCanFail) != 0);
        void *item = NULL;

        if (block)
        {
            // Code below uses these optimizations
            GCAssert((unsigned long)GC::kFinalize == (unsigned long)kFinalizable);
            GCAssert((unsigned long)GC::kInternalExact == (unsigned long)kVirtualGCTrace);
            
            gcbits_t flagbits0 = 0;
            gcbits_t flagbits1 = 0;

#if defined VMCFG_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|GC::kInternalExact));
#elif defined VMCFG_SELECTABLE_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|m_gc->runtimeSelectableExactnessFlag));  // 0 or GC::kInternalExact
#else
            flagbits0 = (flags & GC::kFinalize);
#endif

            VALGRIND_CREATE_MEMPOOL(block, /*rdzone*/0, (flags&GC::kZero) != 0);
            VALGRIND_MEMPOOL_ALLOC(block, block, sizeof(LargeBlock));

            block->gc = this->m_gc;
            block->alloc= this;
            block->next = m_blocks;
            block->size = computedSize;
            block->bibopTag = 0;
#ifdef MMGC_FASTBITS
            block->bitsShift = 12;     // Always use bits[0]
#endif
            block->containsPointers = ((flags&GC::kContainsPointers) != 0) ? 1 : 0;
            block->rcobject = ((flags&GC::kRCObject) != 0) ? 1 : 0;
            block->bits = block->flags;
            m_blocks = block;

            item = block->GetObject();

            if(m_gc->collecting && !m_startedFinalize)
                flagbits0 |= kMark;

            block->flags[0] = flagbits0;
            block->flags[1] = flagbits1;
#ifdef _DEBUG
            (void)originalSize;
            if (flags & GC::kZero)
            {
                if (!RUNNING_ON_VALGRIND)
                {
                    // AllocBlock should take care of this
                    for(int i=0, n=(int)(requestSize/sizeof(int)); i<n; i++) {
                        if(((int*)item)[i] != 0)
                            GCAssert(false);
                    }
                }
            }
#endif

            // see comments in GCAlloc about using full size instead of ask size
            VALGRIND_MEMPOOL_ALLOC(block, item, computedSize);

#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
            if(heap->HooksEnabled()) {
                size_t userSize = block->size - DebugSize();
#ifdef MMGC_MEMORY_PROFILER
                m_totalAskSize += originalSize;
                heap->AllocHook(GetUserPointer(item), originalSize, userSize, /*managed=*/true);
#else
                heap->AllocHook(GetUserPointer(item), 0, userSize, /*managed=*/true);
#endif
            }
#endif
        }
        return item;
    }