/**
 * Clear for reuse, avoids re-allocation when an arena may
 * otherwise be free'd and recreated.
 */
void BLI_memarena_clear(MemArena *ma)
{
	if (ma->bufs) {
		unsigned char *curbuf_prev;
		size_t curbuf_used;

		if (ma->bufs->next) {
			BLI_linklist_freeN(ma->bufs->next);
			ma->bufs->next = NULL;
		}

		curbuf_prev = ma->curbuf;
		ma->curbuf = ma->bufs->link;
		memarena_curbuf_align(ma);

		/* restore to original size */
		curbuf_used = (size_t)(curbuf_prev - ma->curbuf);
		ma->cursize += curbuf_used;

		if (ma->use_calloc) {
			memset(ma->curbuf, 0, curbuf_used);
		}
	}

#ifdef WITH_MEM_VALGRIND
	VALGRIND_DESTROY_MEMPOOL(ma);
	VALGRIND_CREATE_MEMPOOL(ma, 0, false);
#endif

}
Esempio n. 2
0
rpmioPool rpmioFreePool(rpmioPool pool)
	/*@globals _rpmioPool @*/
	/*@modifies _rpmioPool @*/
{
    if (pool == NULL) {
	pool = _rpmioPool;
	_rpmioPool = NULL;
    }
    if (pool != NULL) {
	rpmioItem item;
	int count = 0;
	yarnPossess(pool->have);
	while ((item = pool->head) != NULL) {
	    pool->head = item->pool;	/* XXX pool == next */
	    if (item->use != NULL)
		item->use = yarnFreeLock(item->use);
	    item = _free(item);
	    count++;
	}
	yarnRelease(pool->have);
	pool->have = yarnFreeLock(pool->have);
	rpmlog(RPMLOG_DEBUG, D_("pool %s:\treused %d, alloc'd %d, free'd %d items.\n"), pool->name, pool->reused, pool->made, count);
#ifdef	NOTYET
assert(pool->made == count);
#else
if (pool->made != count)
rpmlog(RPMLOG_WARNING, D_("pool %s: FIXME: made %d, count %d\nNote: This is a harmless memory leak discovered while exiting, relax ...\n"), pool->name, pool->made, count);
#endif
	(void) _free(pool);
	VALGRIND_DESTROY_MEMPOOL(pool);
    }
    return NULL;
}
Esempio n. 3
0
/*
 * MemoryContextDelete
 *		Delete a context and its descendants, and release all space
 *		allocated therein.
 *
 * The type-specific delete routine removes all subsidiary storage
 * for the context, but we have to delete the context node itself,
 * as well as recurse to get the children.  We must also delink the
 * node from its parent, if it has one.
 */
void
MemoryContextDelete(MemoryContext context)
{
	AssertArg(MemoryContextIsValid(context));
	/* We had better not be deleting TopMemoryContext ... */
	Assert(context != TopMemoryContext);
	/* And not CurrentMemoryContext, either */
	Assert(context != CurrentMemoryContext);

	MemoryContextDeleteChildren(context);

	/*
	 * It's not entirely clear whether 'tis better to do this before or after
	 * delinking the context; but an error in a callback will likely result in
	 * leaking the whole context (if it's not a root context) if we do it
	 * after, so let's do it before.
	 */
	MemoryContextCallResetCallbacks(context);

	/*
	 * We delink the context from its parent before deleting it, so that if
	 * there's an error we won't have deleted/busted contexts still attached
	 * to the context tree.  Better a leak than a crash.
	 */
	MemoryContextSetParent(context, NULL);

	(*context->methods->delete_context) (context);
	VALGRIND_DESTROY_MEMPOOL(context);
	pfree(context);
}
Esempio n. 4
0
int main( int argc, char** argv )
{
  struct cell *cells_local[N];
  int arg;
  size_t i;
  struct pool *p      = allocate_pool();
  struct cell **cells = static_roots ? cells_static : cells_local;

  assert(argc == 2);
  assert(argv[1]);
  assert(strlen(argv[1]) == 1);
  assert(argv[1][0] >= '0' && argv[1][0] <= '5');
  arg = atoi( argv[1] );
  set_flags( arg );

  memset(cells_static, 0, sizeof(cells_static));
  memset(cells_local,  0, sizeof(cells_local));

  for (i = 0; i < N; ++i) {
    cells[i] = allocate_from_pool(p, sizeof(struct cell));  
  }

  if (trim_pool)
  VALGRIND_MEMPOOL_TRIM(p, 
			p->buf+(10 * sizeof(struct cell)), 
			20 * sizeof(struct cell) + 2);

  if (destroy_pool)
  VALGRIND_DESTROY_MEMPOOL(p);

  return 0;
}
Esempio n. 5
0
    void GCLargeAlloc::Free(const void *item)
    {
        LargeBlock *b = GetLargeBlock(item);

#ifdef GCDEBUG
        // RCObject have contract that they must clean themselves, since they
        // have to scan themselves to decrement other RCObjects they might as well
        // clean themselves too, better than suffering a memset later
        if(b->rcobject)
            m_gc->RCObjectZeroCheck((RCObject*)GetUserPointer(item));
#endif


        // We can't allow free'ing something during Sweeping, otherwise alloc counters
        // get decremented twice and destructors will be called twice.
        GCAssert(m_gc->collecting == false || m_gc->marking == true);
        if (m_gc->marking && (m_gc->collecting || IsProtectedAgainstFree(b))) {
            m_gc->AbortFree(GetUserPointer(item));
            return;
        }

        m_gc->policy.signalFreeWork(b->size);

#ifdef MMGC_HOOKS
        GCHeap* heap = GCHeap::GetGCHeap();
        if(heap->HooksEnabled())
        {
            const void* p = GetUserPointer(item);
            size_t userSize = GC::Size(p);
#ifdef MMGC_MEMORY_PROFILER
            if(heap->GetProfiler())
                m_totalAskSize -= heap->GetProfiler()->GetAskSize(p);
#endif
            heap->FinalizeHook(p, userSize);
            heap->FreeHook(p, userSize, uint8_t(GCHeap::GCFreedPoison));
        }
#endif

        if(b->flags[0] & kHasWeakRef)
            m_gc->ClearWeakRef(GetUserPointer(item));

        LargeBlock **prev = &m_blocks;
        while(*prev)
        {
            if(b == *prev)
            {
                *prev = Next(b);
                size_t numBlocks = b->GetNumBlocks();
                m_totalAllocatedBytes -= b->size;
                VALGRIND_MEMPOOL_FREE(b, b);
                VALGRIND_MEMPOOL_FREE(b, item);
                VALGRIND_DESTROY_MEMPOOL(b);
                m_gc->FreeBlock(b, (uint32_t)numBlocks, m_partitionIndex);
                return;
            }
            prev = (LargeBlock**)(&(*prev)->next);
        }
        GCAssertMsg(false, "Bad free!");
    }
void BLI_memarena_free(MemArena *ma)
{
	BLI_linklist_freeN(ma->bufs);

#ifdef WITH_MEM_VALGRIND
	VALGRIND_DESTROY_MEMPOOL(ma);
#endif

	MEM_freeN(ma);
}
Esempio n. 7
0
void slist_free(struct simple_list* slist) {
#ifndef NVALGRIND
    VALGRIND_DESTROY_MEMPOOL(slist->value);
#endif

    slist_clear(slist);
    free(slist->key);
    free(slist->value);
    slist->capacity = 0;
}
Esempio n. 8
0
void pop(pool *p)
{
   level_list *l = p->levels;
   p->levels = l->next;
   VALGRIND_DESTROY_MEMPOOL(l->where);
   VALGRIND_MAKE_MEM_NOACCESS(l->where, p->where-l->where);
   p->where = l->where;
   if(USE_MMAP)
      munmap(l, sizeof(level_list));
   else
      free(l);
}
Esempio n. 9
0
File: taskwd.c Progetto: ukaea/epics
static void twdShutdown(void *arg)
{
    ELLNODE *cur;
    twdCtl = twdctlExit;
    epicsEventSignal(loopEvent);
    epicsEventWait(exitEvent);
    while ((cur = ellGet(&fList)) != NULL) {
        VALGRIND_MEMPOOL_FREE(&fList, cur);
        free(cur);
    }
    VALGRIND_DESTROY_MEMPOOL(&fList);
}
Esempio n. 10
0
void valgrindDestroyMempool(MM_GCExtensionsBase *extensions)
{
    if (extensions->valgrindMempoolAddr != 0)
    {
        //All objects should have been freed by now!
        VALGRIND_DESTROY_MEMPOOL(extensions->valgrindMempoolAddr);
        MUTEX_ENTER(extensions->memcheckHashTableMutex);
        extensions->valgrindMempoolAddr = 0;
        hashTableFree(extensions->memcheckHashTable);
        extensions->memcheckHashTable = NULL;
        MUTEX_EXIT(extensions->memcheckHashTableMutex);
        MUTEX_DESTROY(extensions->memcheckHashTableMutex);
    }
}
Esempio n. 11
0
/*
 * MemoryContextResetOnly
 *		Release all space allocated within a context.
 *		Nothing is done to the context's descendant contexts.
 */
void
MemoryContextResetOnly(MemoryContext context)
{
	AssertArg(MemoryContextIsValid(context));

	/* Nothing to do if no pallocs since startup or last reset */
	if (!context->isReset)
	{
		MemoryContextCallResetCallbacks(context);
		(*context->methods->reset) (context);
		context->isReset = true;
		VALGRIND_DESTROY_MEMPOOL(context);
		VALGRIND_CREATE_MEMPOOL(context, 0, false);
	}
}
Esempio n. 12
0
ContextMemoryManager::~ContextMemoryManager() {
#ifdef CVC4_VALGRIND
  VALGRIND_DESTROY_MEMPOOL(this);
#endif /* CVC4_VALGRIND */

  // Delete all chunks
  while(!d_chunkList.empty()) {
    free(d_chunkList.back());
    d_chunkList.pop_back();
  }
  while(!d_freeChunks.empty()) {
    free(d_freeChunks.back());
    d_freeChunks.pop_back();
  }
}
Esempio n. 13
0
    void FixedAlloc::FreeChunk(FixedBlock* b)
    {
        if ( ((b->prevFree && (b->prevFree->nextFree!=b))) ||
            ((b->nextFree && (b->nextFree->prevFree!=b))) )
            VMPI_abort();

        m_numBlocks--;

        // Unlink the block from the list
        if (b == m_firstBlock)
            m_firstBlock = b->next;
        else
            b->prev->next = b->next;

        if (b == m_lastBlock)
            m_lastBlock = b->prev;
        else
            b->next->prev = b->prev;

        // If this is the first free block, pick a new one...
        if ( m_firstFree == b )
            m_firstFree = b->nextFree;
        else if (b->prevFree)
            b->prevFree->nextFree = b->nextFree;

        if (b->nextFree)
            b->nextFree->prevFree = b->prevFree;

        // Any lock can't be held across the call to FreeNoProfile, so if there
        // is a lock obtain it, release it, and then reacquire it.  This works
        // because Destroy caches no state across the call to FreeChunk.

        vmpi_spin_lock_t *lock = NULL;

        if(m_isFixedAllocSafe) {
            lock = &((FixedAllocSafe*)this)->m_spinlock;
            VMPI_lockRelease(lock);
        }

        // Free the memory
        m_heap->FreeNoProfile(b);

        if(lock != NULL)
            VMPI_lockAcquire(lock);

        VALGRIND_MEMPOOL_FREE(b, b);
        VALGRIND_DESTROY_MEMPOOL(b);
    }
Esempio n. 14
0
static void delregion(region r)
{
  nochildren(r);
  VALGRIND_DESTROY_MEMPOOL(r);
  VALGRIND_MAKE_WRITABLE(r, sizeof(struct page));
  free_all_pages(r, &r->normal);
  // VALGRIND_DO_QUICK_LEAK_CHECK;
  // VALGRIND_FREELIKE_BLOCK(r, 0);
  // fprintf(stderr, "##delregion: r->normal.page.end = %p\n", r->normal.page.end);
  // fprintf(stderr, "##           r+1 = %p\n", r+1);
  if (r->normal.page.end) {
    // VALGRIND_MAKE_NOACCESS((char*)(r+1), r->normal.page.end - (char*)(r+1));
  }
  // VALGRIND_MAKE_NOACCESS(r, sizeof(*r));
  --num_regions_active;
}
Esempio n. 15
0
static void
eina_one_big_shutdown(void *data)
{
   One_Big *pool = data;

   if (!pool) return;
   if (!eina_lock_take(&pool->mutex))
     {
#ifdef EINA_HAVE_DEBUG_THREADS
        assert(eina_thread_equal(pool->self, eina_thread_self()));
#endif
     }

   if (pool->over > 0)
     {
// FIXME: should we warn here? one_big mempool exceeded its alloc and now
// mempool is cleaning up the mess created. be quiet for now as we were before
// but edje seems to be a big offender at the moment! bad cedric! :)
//        WRN(
//            "Pool [%s] over by %i. cleaning up for you", 
//            pool->name, pool->over);
        while (pool->over_list)
          {
             Eina_Inlist *il = pool->over_list;
             void *ptr = OVER_MEM_FROM_LIST(pool, il);
             pool->over_list = eina_inlist_remove(pool->over_list, il);
             free(ptr);
             pool->over--;
          }
     }
   if (pool->over > 0)
     {
        WRN(
            "Pool [%s] still over by %i\n", 
            pool->name, pool->over);
     }

#ifndef NVALGRIND
   VALGRIND_DESTROY_MEMPOOL(pool);
#endif

   if (pool->base) free(pool->base);

   eina_lock_release(&pool->mutex);
   eina_lock_free(&pool->mutex);
   free(pool);
}
Esempio n. 16
0
File: mpool.c Progetto: bbenton/ucx
void ucs_mpool_cleanup(ucs_mpool_t *mp, int leak_check)
{
    ucs_mpool_chunk_t *chunk, *next_chunk;
    ucs_mpool_elem_t *elem, *next_elem;
    ucs_mpool_data_t *data = mp->data;
    void *obj;

    /* Cleanup all elements in the freelist and set their header to NULL to mark
     * them as released for the leak check.
     */
    next_elem = mp->freelist;
    while (next_elem != NULL) {
        elem = next_elem;
        VALGRIND_MAKE_MEM_DEFINED(elem, sizeof *elem);
        next_elem = elem->next;
        if (data->ops->obj_cleanup != NULL) {
            obj = elem + 1;
            VALGRIND_MEMPOOL_ALLOC(mp, obj, mp->data->elem_size - sizeof(ucs_mpool_elem_t));
            VALGRIND_MAKE_MEM_DEFINED(obj, mp->data->elem_size - sizeof(ucs_mpool_elem_t));
            data->ops->obj_cleanup(mp, obj);
            VALGRIND_MEMPOOL_FREE(mp, obj);
        }
        elem->mpool = NULL;
    }

    /*
     * Go over all elements in the chunks and make sure they were on the freelist.
     * Then, release the chunk.
     */
    next_chunk = data->chunks;
    while (next_chunk != NULL) {
        chunk      = next_chunk;
        next_chunk = chunk->next;

        if (leak_check) {
            ucs_mpool_chunk_leak_check(mp, chunk);
        }
        data->ops->chunk_release(mp, chunk);
    }

    VALGRIND_DESTROY_MEMPOOL(mp);
    ucs_debug("mpool %s destroyed", ucs_mpool_name(mp));

    free(data->name);
    ucs_free(data);
}
Esempio n. 17
0
MemoryPool::~MemoryPool(void)
{
    pool_destroying = true;

    decrement_usage(used_memory.value());
    decrement_mapping(mapped_memory.value());

#ifdef USE_VALGRIND
    VALGRIND_DESTROY_MEMPOOL(this);

    // Do not forget to discard stack traces for delayed free blocks
    for (size_t i = 0; i < delayedFreeCount; i++)
    {
        MemBlock* block = delayedFree[i];
        void* object = &block->body;

        VALGRIND_DISCARD(
            VALGRIND_MAKE_MEM_DEFINED(block, OFFSET(MemBlock*, body)));
        VALGRIND_DISCARD(
            VALGRIND_MAKE_WRITABLE(object, block->length));
    }
#endif

    if (parent)
    {
        MemoryPool::release(freeObjects);
    }
    else
    {
        releaseRaw(pool_destroying, freeObjects, ((threshold + roundingSize) / roundingSize) * sizeof(void*));
    }
    freeObjects = NULL;

    for (MemSmallHunk* hunk; hunk = smallHunks;)
    {
        smallHunks = hunk->nextHunk;
        releaseRaw(pool_destroying, hunk, minAllocation);
    }

    for (MemBigHunk* hunk; hunk = bigHunks;)
    {
        bigHunks = hunk->nextHunk;
        releaseRaw(pool_destroying, hunk, hunk->length);
    }
}
Esempio n. 18
0
/*
 * MemoryContextReset
 *		Release all space allocated within a context and its descendants,
 *		but don't delete the contexts themselves.
 *
 * The type-specific reset routine handles the context itself, but we
 * have to do the recursion for the children.
 */
void
MemoryContextReset(MemoryContext context)
{
	AssertArg(MemoryContextIsValid(context));

	/* save a function call in common case where there are no children */
	if (context->firstchild != NULL)
		MemoryContextResetChildren(context);

	/* Nothing to do if no pallocs since startup or last reset */
	if (!context->isReset)
	{
		(*context->methods->reset) (context);
		context->isReset = true;
		VALGRIND_DESTROY_MEMPOOL(context);
		VALGRIND_CREATE_MEMPOOL(context, 0, false);
	}
}
Esempio n. 19
0
static void
pool_free (void* item)
{
	Pool *pool, **at;
	char *ptr, *beg, *end;

	ptr = item;

	/* Find which block this one belongs to */
	for (at = &all_pools, pool = *at; pool; at = &pool->next, pool = *at) {
		beg = (char*)pool->items;
		end = (char*)pool + pool->length - sizeof (Item);
		if (ptr >= beg && ptr <= end) {
			ASSERT ((ptr - beg) % sizeof (Item) == 0);
			break;
		}
	}

	/* Otherwise invalid meta */
	ASSERT (at);
	ASSERT (pool);
	ASSERT (pool->used > 0);

	/* No more meta cells used in this block, remove from list, destroy */
	if (pool->used == 1) {
		*at = pool->next;

#ifdef WITH_VALGRIND
		VALGRIND_DESTROY_MEMPOOL (pool);
#endif

		munmap (pool, pool->length);
		return;
	}

#ifdef WITH_VALGRIND
	VALGRIND_MEMPOOL_FREE (pool, item);
	VALGRIND_MAKE_MEM_UNDEFINED (item, sizeof (Item));
#endif

	--pool->used;
	memset (item, 0xCD, sizeof (Item));
	unused_push (&pool->unused, item);
}
Esempio n. 20
0
void psmi_sysbuf_fini(void)
{
	struct psmi_mem_block_ctrl *block;
	struct psmi_mem_ctrl *handler_index;
	int i;

	if (!psmi_sysbuf.is_initialized)
		return;

	VALGRIND_DESTROY_MEMPOOL(&psmi_sysbuf);

	handler_index = psmi_sysbuf.handler_index;
	for (i = 0; i < MM_NUM_OF_POOLS; i++) {
		while ((block = handler_index[i].free_list) != NULL) {
			handler_index[i].free_list = block->next;
			psmi_free(block);
		}
	}
}
Esempio n. 21
0
mod_export void
old_heaps(Heap old)
{
    Heap h, n;

    queue_signals();
    for (h = heaps; h; h = n) {
	n = h->next;
	DPUTS(h->sp, "BUG: old_heaps() with pushed heaps");
#ifdef ZSH_HEAP_DEBUG
	if (heap_debug_verbosity & HDV_FREE) {
	    fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
		    "freed in old_heaps().\n", h->heap_id);
	}
#endif
#ifdef USE_MMAP
	munmap((void *) h, h->size);
#else
	zfree(h, HEAPSIZE);
#endif
#ifdef ZSH_VALGRIND
	VALGRIND_DESTROY_MEMPOOL((char *)h);
#endif
    }
    heaps = old;
#ifdef ZSH_HEAP_DEBUG
    if (heap_debug_verbosity & HDV_OLD) {
	fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
		"restored.\n", heaps->heap_id);
    }
    {
	Heap myold = heaps_saved ? getlinknode(heaps_saved) : NULL;
	if (old != myold)
	{
	    fprintf(stderr, "HEAP DEBUG: invalid old heap " HEAPID_FMT
		    ", expecting " HEAPID_FMT ".\n", old->heap_id,
		    myold->heap_id);
	}
    }
#endif
    fheap = NULL;
    unqueue_signals();
}
Esempio n. 22
0
/* Destroys the global fixed size allocator data structure and all of
 * the memory held within it. */
void MVM_fixed_size_destroy(MVMFixedSizeAlloc *al) {
    int bin_no;

    for (bin_no = 0; bin_no < MVM_FSA_BINS; bin_no++) {
        int page_no;
        int num_pages = al->size_classes[bin_no].num_pages;

        VALGRIND_DESTROY_MEMPOOL(&al->size_classes[bin_no]);

        for (page_no = 0; page_no < num_pages; page_no++) {
            MVM_free(al->size_classes[bin_no].pages[page_no]);
        }
        MVM_free(al->size_classes[bin_no].pages);
    }
    uv_mutex_destroy(&(al->complex_alloc_mutex));

    MVM_free(al->size_classes);
    MVM_free(al);
}
Esempio n. 23
0
/*
 * MemoryContextDelete
 *		Delete a context and its descendants, and release all space
 *		allocated therein.
 *
 * The type-specific delete routine removes all subsidiary storage
 * for the context, but we have to delete the context node itself,
 * as well as recurse to get the children.	We must also delink the
 * node from its parent, if it has one.
 */
void
MemoryContextDelete(MemoryContext context)
{
	AssertArg(MemoryContextIsValid(context));
	/* We had better not be deleting TopMemoryContext ... */
	Assert(context != TopMemoryContext);
	/* And not CurrentMemoryContext, either */
	Assert(context != CurrentMemoryContext);

	MemoryContextDeleteChildren(context);

	/*
	 * We delink the context from its parent before deleting it, so that if
	 * there's an error we won't have deleted/busted contexts still attached
	 * to the context tree.  Better a leak than a crash.
	 */
	MemoryContextSetParent(context, NULL);

	(*context->methods->delete_context) (context);
	VALGRIND_DESTROY_MEMPOOL(context);
	pfree(context);
}
Esempio n. 24
0
File: arena.cpp Progetto: KDE/khtml
/*
 * Free tail arenas linked after head, which may not be the true list head.
 * Reset pool->current to point to head in case it pointed at a tail arena.
 */
static void FreeArenaList(ArenaPool *pool, Arena *head, bool reallyFree)
{
    Arena **ap, *a;

    ap = &head->next;
    a = *ap;
    if (!a) {
        return;
    }

#ifdef DEBUG_ARENA_MALLOC
    printf("****** Freeing arena pool. Total allocated memory: %d\n", pool->cumul);

    do {
        assert(a->base <= a->avail && a->avail <= a->limit);
        a->avail = a->base;
        CLEAR_UNUSED(a);
    } while ((a = a->next) != 0);
    a = *ap;
#endif

    if (freelist_count >= FREELIST_MAX) {
        reallyFree = true;
    }

    if (reallyFree) {
        do {
            *ap = a->next;
            VALGRIND_DESTROY_MEMPOOL(a->base);
            CLEAR_ARENA(a);
#ifdef DEBUG_ARENA_MALLOC
            if (a) {
                i--;
                printf("Free: %d\n", i);
            }
#endif
            free(a); a = 0;
        } while ((a = *ap) != 0);
    } else {
        /* Insert as much of the arena chain as we can hold at the front of the freelist. */
        do {
            ap = &(*ap)->next;
            freelist_count++;
        } while (*ap && freelist_count < FREELIST_MAX);

        /* Get rid of excess */
        if (*ap) {
            Arena *xa, *n;
            for (xa = *ap; xa; xa = n) {
                VALGRIND_DESTROY_MEMPOOL(xa->base);
                n = xa->next;
#ifdef DEBUG_ARENA_MALLOC
                i--;
                printf("Free: %d\n", i);
#endif
                CLEAR_ARENA(xa);
                free(xa);
            }
        }
        *ap = arena_freelist;
        arena_freelist = a;
        head->next = 0;
    }
    pool->current = head;
}