void operator()( int id ) const {
        rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
        const int objLen = 10*id;

        pool_create_v1(id, &pol, &pool[id]);
        obj[id] = (char*)pool_malloc(pool[id], objLen);
        ASSERT(obj[id], NULL);
        memset(obj[id], id, objLen);

        {
            const size_t lrgSz = 2*16*1024;
            void *ptrLarge = pool_malloc(pool[id], lrgSz);
            ASSERT(ptrLarge, NULL);
            memset(ptrLarge, 1, lrgSz);

            // consume all small objects
            while (pool_malloc(pool[id], 5*1024))
                ;
            // releasing of large object can give a chance to allocate more
            pool_free(pool[id], ptrLarge);

            ASSERT(pool_malloc(pool[id], 5*1024), NULL);
        }

        barrier.wait();
        int myPool = number_of_threads-id-1;
        for (int i=0; i<10*myPool; i++)
            ASSERT(myPool==obj[myPool][i], NULL);
        pool_free(pool[myPool], obj[myPool]);
        pool_destroy(pool[myPool]);
    }
Пример #2
0
static void
sec_block_destroy (Block *block)
{
	Block *bl, **at;
	Cell *cell;

	ASSERT (block);
	ASSERT (block->words);
	ASSERT (block->used == 0);

	/* Remove from the list */
	for (at = &all_blocks, bl = *at; bl; at = &bl->next, bl = *at) {
		if (bl == block) {
			*at = block->next;
			break;
		}
	}

	/* Must have been found */
	ASSERT (bl == block);

	/* Release all the meta data cells */
	while (block->unused) {
		cell = block->unused;
		sec_remove_cell_ring (&block->unused, cell);
		pool_free (cell);
	}

	/* Release all pages of secure memory */
	sec_release_pages (block->words, block->n_words * sizeof (word_t));

	pool_free (block);
}
void TestFixedBufferPool()
{
    void *ptrs[7];
    rml::MemPoolPolicy pol(fixedBufGetMem, NULL, 0, /*fixedSizePool=*/true,
                           /*keepMemTillDestroy=*/false);
    rml::MemoryPool *pool;

    pool_create_v1(0, &pol, &pool);
    void *largeObj = pool_malloc(pool, 7*1024*1024);
    ASSERT(largeObj, NULL);
    pool_free(pool, largeObj);

    for (int i=0; i<7; i++) {
        ptrs[i] = pool_malloc(pool, 1024*1024);
        ASSERT(ptrs[i], NULL);
    }
    for (int i=0; i<7; i++)
        pool_free(pool, ptrs[i]);

    largeObj = pool_malloc(pool, 7*1024*1024);
    ASSERT(largeObj, NULL);
    pool_free(pool, largeObj);

    pool_destroy(pool);
}
// single pool shared by different threads
void TestSharedPool()
{
    rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
    rml::MemoryPool *pool;

    pool_create_v1(0, &pol, &pool);
    void **crossThread = new void*[MaxThread * SharedPoolRun::OBJ_CNT];
    void **afterTerm = new void*[MaxThread * SharedPoolRun::OBJ_CNT];

    for (int p=MinThread; p<=MaxThread; p++) {
        SharedPoolRun::init(p, pool, crossThread, afterTerm);
        SharedPoolRun thr;

        void *hugeObj = pool_malloc(pool, 10*1024*1024);
        ASSERT(hugeObj, NULL);

        NativeParallelFor( p, thr );

        pool_free(pool, hugeObj);
        for (int i=0; i<p*SharedPoolRun::OBJ_CNT; i++)
            pool_free(pool, afterTerm[i]);
    }
    delete []afterTerm;
    delete []crossThread;

    pool_destroy(pool);
    ASSERT(!liveRegions, "Expected all regions were released.");
}
    void operator()( int id ) const {
        const int ITERS = 1000;
        void *local[ITERS];

        startB.wait();
        for (int i=id*OBJ_CNT; i<(id+1)*OBJ_CNT; i++) {
            afterTerm[i] = pool_malloc(pool, i%2? 8*1024 : 9*1024);
            memset(afterTerm[i], i, i%2? 8*1024 : 9*1024);
            crossThread[i] = pool_malloc(pool, i%2? 9*1024 : 8*1024);
            memset(crossThread[i], i, i%2? 9*1024 : 8*1024);
        }

        for (int i=1; i<ITERS; i+=2) {
            local[i-1] = pool_malloc(pool, 6*1024);
            memset(local[i-1], i, 6*1024);
            local[i] = pool_malloc(pool, 16*1024);
            memset(local[i], i, 16*1024);
        }
        mallocDone.wait();
        int myVictim = threadNum-id-1;
        for (int i=myVictim*OBJ_CNT; i<(myVictim+1)*OBJ_CNT; i++)
            pool_free(pool, crossThread[i]);
        for (int i=0; i<ITERS; i++)
            pool_free(pool, local[i]);
    }
Пример #6
0
ast_t* ast_scope_decl(ast_t* node, string* identifier) {
  ast_t* ret = 0;
  array* arr = 0;

  char* cstr = identifier->value;

  array* scopes = ast_get_scopes(node);
  if (!scopes)
    return 0;

  log_silly("searching in %lu scopes", scopes->length);

  ast_t* scope;
  for (u64 i = 0; i < scopes->length; ++i) {
    scope = (ast_t*)scopes->values[i];

    ret = (ast_t*)hash_get(scope->block.variables, cstr);
    if (ret) {
      pool_free(scopes);
      return ret;
    }

    // TODO REVIEW why the first one only ? can remember why?!!
    arr = hash_get(scope->block.functions, cstr);
    if (arr) {
      pool_free(scopes);
      return (ast_t*)array_get(arr, 0);
    }
  }

  pool_free(scopes);

  return 0;
}
Пример #7
0
/**  De-allocates the interface/variables memory allocated using module_alloc
 *
 */
int module_free(module_t *module) {
	int i;
	mdebug("module_id=%d\n",module->id);
	if (!module) return -1;
	if (module->inputs) {
		if (pool_free(module->inputs)) return -1;
		module->inputs = NULL;
	}
	if (module->outputs) {
		if (pool_free(module->outputs)) return -1;
		module->outputs = NULL;
	}
	if (module->variables) {
		for (i=0;i<module->nof_variables;i++) {
			if (variable_free(&module->variables[i])) {
				return -1;
			}
		}
		if (pool_free(module->variables)) return -1;
		module->variables = NULL;
	}
	module->nof_inputs = 0;
	module->nof_outputs = 0;
	module->nof_variables = 0;
	return 0;
}
Пример #8
0
void
chunk_bulb_free(struct bulb_t *b)
{
    assert(b);
    pool_free(b->buf);
    pool_free(b);
}
Пример #9
0
void
et_free_tree_force (struct et_node *t)
{
  pool_free (et_occurrences, t->rightmost_occ);
  if (t->parent_occ)
    pool_free (et_occurrences, t->parent_occ);
  pool_free (et_nodes, t);
}
Пример #10
0
/**
 * @brief Remove and free all (key,val) couples from the hash store
 *
 * This function removes all (key,val) couples from the hashtable and
 * frees the stored data using the supplied function
 *
 * @param[in,out] ht        The hashtable to be cleared of all entries
 * @param[in]     free_func The function with which to free the contents
 *                          of each entry
 *
 * @return HASHTABLE_SUCCESS or errors
 */
hash_error_t
hashtable_delall(struct hash_table *ht,
		 int (*free_func)(struct gsh_buffdesc,
				  struct gsh_buffdesc))
{
	/* Successive partition numbers */
	uint32_t index = 0;

	for (index = 0; index < ht->parameter.index_size; index++) {
		/* The root of each successive partition */
		struct rbt_head *root = &ht->partitions[index].rbt;
		/* Pointer to node in tree for removal */
		struct rbt_node *cursor = NULL;

		PTHREAD_RWLOCK_wrlock(&ht->partitions[index].lock);

		/* Continue until there are no more entries in the red-black
		   tree */
		while ((cursor = RBT_LEFTMOST(root)) != NULL) {
			/* Pointer to the key and value descriptors
			   for each successive entry */
			struct hash_data *data = NULL;
			/* Aliased poitner to node, for freeing
			   buffers after removal from tree */
			struct rbt_node *holder = cursor;
			/* Buffer descriptor for key, as stored */
			struct gsh_buffdesc key;
			/* Buffer descriptor for value, as stored */
			struct gsh_buffdesc val;
			/* Return code from the free function.  Zero
			   on failure */
			int rc = 0;

			RBT_UNLINK(root, cursor);
			data = RBT_OPAQ(holder);

			key = data->key;
			val = data->val;

			pool_free(ht->data_pool, data);
			pool_free(ht->node_pool, holder);
			--ht->partitions[index].count;
			rc = free_func(key, val);

			if (rc == 0) {
				PTHREAD_RWLOCK_unlock(&ht->partitions[index].
						      lock);
				return HASHTABLE_ERROR_DELALL_FAIL;
			}
		}
		PTHREAD_RWLOCK_unlock(&ht->partitions[index].lock);
	}

	return HASHTABLE_SUCCESS;
}
Пример #11
0
/* match will find a child in the parent, and either replace (if it's an insert) or remove (if data is NULL) */
int xdb_act(xdbcache xc, jid owner, char *ns, char *act, char *match, xmlnode data)
{
    xdbcache newx;
	pool p;

    if(xc == NULL || owner == NULL || ns == NULL)
    {
        fprintf(stderr,"Programming Error: xdb_set() called with NULL\n");
        return 1;
    }

    
    log_debug(ZONE,"XDB SET");

    /* init this newx */
	p = pool_new();
	newx = pmalloco(p, sizeof(_xdbcache));
    newx->i = xc->i;
    newx->set = 1;
    newx->data = data;
    newx->ns = ns;
    newx->act = act;
    newx->match = match;
    newx->owner = owner;
    newx->sent = time(NULL);
    newx->preblock = 0; /* flag */


    pthread_mutex_lock(&(xc->sem));
    newx->id = xc->id++; 
    newx->next = xc->next;
    newx->prev = xc;
    newx->next->prev = newx;
    xc->next = newx; 
    pthread_mutex_unlock(&(xc->sem));

    /* send it on it's way */
    xdb_deliver(xc->i, newx,0);

    /* if it hasn't already returned, we should block here until it returns */
    while (newx->preblock != 1) usleep(10);


    /* if it didn't actually get set, flag that */
    if(newx->data == NULL) {
	  pool_free(p);
	  return 1;
	}

    xmlnode_free(newx->data);

	pool_free(p);

    return 0;
}
Пример #12
0
void
http_keyval_free(pool_t *pool, http_keyval_t *node) {
    http_keyval_t *next; 

    for(; node; node = next ) {
	next = node->next;
	pool_free(pool, node->key);
	pool_free(pool, node->val);
	pool_free(pool, node);
    }
}
Пример #13
0
void
et_free_tree (struct et_node *t)
{
  while (t->son)
    et_split (t->son);

  if (t->father)
    et_split (t);

  pool_free (et_occurrences, t->rightmost_occ);
  pool_free (et_nodes, t);
}
Пример #14
0
static Block* 
sec_block_create (size_t size,
                  const char *during_tag)
{
	Block *block;
	Cell *cell;

	ASSERT (during_tag);

	/* We can force all all memory to be malloced */
	if (getenv ("SECMEM_FORCE_FALLBACK"))
		return NULL;

	block = pool_alloc ();
	if (!block)
		return NULL;

	cell = pool_alloc ();
	if (!cell) {
		pool_free (block);
		return NULL;
	}

	/* The size above is a minimum, we're free to go bigger */
	if (size < DEFAULT_BLOCK_SIZE)
		size = DEFAULT_BLOCK_SIZE;
		
	block->words = sec_acquire_pages (&size, during_tag);
	block->n_words = size / sizeof (word_t);
	if (!block->words) {
		pool_free (block);
		pool_free (cell);
		return NULL;
	}
	
#ifdef WITH_VALGRIND
	VALGRIND_MAKE_MEM_DEFINED (block->words, size);
#endif
	
	/* The first cell to allocate from */
	cell->words = block->words;
	cell->n_words = block->n_words;
	cell->requested = 0;
	sec_write_guards (cell);
	sec_insert_cell_ring (&block->unused_cells, cell);

	block->next = all_blocks;
	all_blocks = block;
	
	return block;
}
Пример #15
0
int32_t dec_session_ref(nfs41_session_t *session)
{
    int i;
    int32_t refcnt = atomic_dec_int32_t(&session->refcount);

    if (refcnt == 0) {

        /* Unlink the session from the client's list of
           sessions */
        PTHREAD_MUTEX_lock(&session->clientid_record->cid_mutex);
        glist_del(&session->session_link);
        PTHREAD_MUTEX_unlock(&session->clientid_record->cid_mutex);

        /* Decrement our reference to the clientid record */
        dec_client_id_ref(session->clientid_record);
        /* Destroy this session's mutexes and condition variable */

        for (i = 0; i < NFS41_NB_SLOTS; i++)
            PTHREAD_MUTEX_destroy(&session->slots[i].lock);

        PTHREAD_COND_destroy(&session->cb_cond);
        PTHREAD_MUTEX_destroy(&session->cb_mutex);

        /* Destroy the session's back channel (if any) */
        if (session->flags & session_bc_up)
            nfs_rpc_destroy_chan(&session->cb_chan);

        /* Free the memory for the session */
        pool_free(nfs41_session_pool, session);
    }

    return refcnt;
}
Пример #16
0
bool journal_delete(journal_t journal, journal_operation_t oper, string * name)
{
	struct journal_entry_def entry;
	journal_entry del;
	void **slot;

	CHECK_MUTEX_LOCKED(journal->mutex);

	entry.oper = oper;
	entry.name = *name;
	slot =
		htab_find_slot_with_hash(journal->htab, &entry, JOURNAL_HASH(&entry),
								 NO_INSERT);
	if (!slot)
		return false;

	del = (journal_entry) * slot;
	if (del->next)
		del->next->prev = del->prev;
	else
		journal->last = del->prev;
	if (del->prev)
		del->prev->next = del->next;
	else
		journal->first = del->next;

	free(del->name.str);
	zfsd_mutex_lock(&journal_mutex);
	pool_free(journal_pool, del);
	zfsd_mutex_unlock(&journal_mutex);
	htab_clear_slot(journal->htab, slot);

	return true;
}
Пример #17
0
/** fetch user data */
user_t user_load(sm_t sm, jid_t jid) {
    user_t user;

    /* already loaded */
    user = xhash_get(sm->users, jid_user(jid));
    if(user != NULL) {
        log_debug(ZONE, "returning previously-created user data for %s", jid_user(jid));
        return user;
    }

    /* make a new one */
    user = _user_alloc(sm, jid);

    /* get modules to setup */
    if(mm_user_load(sm->mm, user) != 0) {
        log_debug(ZONE, "modules failed user load for %s", jid_user(jid));
        pool_free(user->p);
        return NULL;
    }

    /* save them for later */
    xhash_put(sm->users, jid_user(user->jid), (void *) user);

    log_debug(ZONE, "loaded user data for %s", jid_user(jid));

    return user;
}
Пример #18
0
int main(void)
{
	configfile_t *configfile;
	struct mycontext context;

	context.current_end_token = 0;
	context.permissions = 0;

	context.pool = pool_new(NULL);
	configfile =
	    dotconf_create("./context.conf", options, (void *)&context,
			   CASE_INSENSITIVE);
	if (!configfile) {
		fprintf(stderr, "Error opening configuration file\n");
		return 1;
	}
	configfile->errorhandler = (dotconf_errorhandler_t) error_handler;
	configfile->contextchecker = (dotconf_contextchecker_t) context_checker;
	if (dotconf_command_loop(configfile) == 0)
		fprintf(stderr, "Error reading configuration file\n");

	dotconf_cleanup(configfile);
	pool_free(context.pool);

	return 0;
}
Пример #19
0
int
main(int argc, char **argv)
{
  int c, flags = 0;
  char *attrname = 0;
  
  Pool *pool = pool_create();
  Repo *repo = repo_create(pool, "<stdin>");

  while ((c = getopt(argc, argv, "hn:")) >= 0)
    {   
      switch(c)
	{
	case 'h':
	  usage(0);
	  break;
	case 'n':
	  attrname = optarg;
	  break;
	default:
	  usage(1);
	  break;
	}
    }
  repo_add_deltainfoxml(repo, stdin, flags);
  tool_write(repo, 0, attrname);
  pool_free(pool);
  exit(0);
}
Пример #20
0
result js_session_free(void *arg)
{
	session s = (session) arg;

	pool_free(s->p);
	return r_UNREG;
}
Пример #21
0
bool journal_delete_entry(journal_t journal, journal_entry entry)
{
	void **slot;

	CHECK_MUTEX_LOCKED(journal->mutex);

	slot = htab_find_slot_with_hash(journal->htab, entry, JOURNAL_HASH(entry),
									NO_INSERT);
	if (!slot)
		return false;

	if (entry->next)
		entry->next->prev = entry->prev;
	else
		journal->last = entry->prev;
	if (entry->prev)
		entry->prev->next = entry->next;
	else
		journal->first = entry->next;

	free(entry->name.str);
	zfsd_mutex_lock(&journal_mutex);
	pool_free(journal_pool, entry);
	zfsd_mutex_unlock(&journal_mutex);
	htab_clear_slot(journal->htab, slot);

	return true;
}
Пример #22
0
void pool_close(POOL_T *pPool, int wait_for_ret) {

  TIME_VAL tv0 = timer_GetTime();

  if(!pPool) {
    return;
  }

  if(pPool->pInUse) {
    pPool->destroy_onempty = 1;

    if(wait_for_ret) {
      LOG(X_DEBUG("pool_close %s waiting for resources to be returned"), (pPool->descr ? pPool->descr : ""));
      while(pPool->pInUse) {
        if(wait_for_ret > 0 && (timer_GetTime() - tv0) / TIME_VAL_MS > wait_for_ret) {
          LOG(X_WARNING("pool_close %s aborting wait for resources to be returned"), (pPool->descr ? pPool->descr : ""));
          break;
        }
        usleep(50000);
      }
      LOG(X_DEBUG("pool_close %s done waiting for resources to be returned"), (pPool->descr ? pPool->descr : ""));
    } else {
      LOG(X_DEBUG("pool_close %s delaying deallocation until resources returned"), 
        (pPool->descr ? pPool->descr : ""));
      return;
    }

  }

  pool_free(pPool);

}
Пример #23
0
/**
 * Destroys the UDP TX scheduler, which must no longer be attached to anything.
 */
void
udp_sched_free(udp_sched_t *us)
{
	udp_sched_check(us);
	unsigned i;

	/*
	 * TX stacks are asynchronously collected, so we need to force collection
	 * now to make sure nobody references us any longer.
	 */

	tx_collect();

	g_assert(0 == hash_list_length(us->stacks));

	for (i = 0; i < N_ITEMS(us->lifo); i++) {
		udp_sched_drop_all(us, &us->lifo[i]);
	}
	udp_sched_tx_release(us);
	udp_sched_seen_clear(us);
	pool_free(us->txpool);
	hset_free_null(&us->seen);
	hash_list_free(&us->stacks);
	udp_sched_clear_sockets(us);

	us->magic = 0;
	WFREE(us);
}
Пример #24
0
/*
 * Deallocates a HacheItem created via HacheItemCreate.
 *
 * This function will not remove the item from the HacheTable so be sure to
 * call HacheTableDel() first if appropriate.
 */
static void HacheItemDestroy(HacheTable *h, HacheItem *hi, int deallocate_data) {
    assert(hi->h == h);

    if (!(h->options & HASH_NONVOLATILE_KEYS) || (h->options & HASH_OWN_KEYS))
	if (hi->key)
	    free(hi->key);

    if (deallocate_data) {
	if (h->del) {
	    h->del(h->clientdata, hi->data);
	} else if (hi->data.p) {
	    free(hi->data.p);
	}
    }

    if (hi->in_use_next)
	hi->in_use_next->in_use_prev = hi->in_use_prev;
    if (hi->in_use_prev)
	hi->in_use_prev->in_use_next = hi->in_use_next;
    if (h->in_use == hi)
	h->in_use = hi->in_use_next;

    
    if (h->options & HASH_POOL_ITEMS) 
    	pool_free(h->hi_pool, hi);
    else if (hi)
	free(hi);

    h->nused--;
}
Пример #25
0
void xhash_free(xht h)
{
/*    log_debug(ZONE,"hash free %X",h); */

    if(h != NULL)
        pool_free(h->p);
}
static void TestEntries()
{
    const int SZ = 4;
    const int ALGN = 4;
    size_t size[SZ] = {8, 8000, 9000, 100*1024};
    size_t algn[ALGN] = {8, 64, 4*1024, 8*1024*1024};

    rml::MemPoolPolicy pol(getGranMem, putGranMem);
    currGranularity = 1; // not check granularity in the test
    rml::MemoryPool *pool;

    pool_create_v1(0, &pol, &pool);
    for (int i=0; i<SZ; i++)
        for (int j=0; j<ALGN; j++) {
            char *p = (char*)pool_aligned_malloc(pool, size[i], algn[j]);
            ASSERT(p && 0==((uintptr_t)p & (algn[j]-1)), NULL);
            memset(p, j, size[i]);

            size_t curr_algn = algn[rand() % ALGN];
            size_t curr_sz = size[rand() % SZ];
            char *p1 = (char*)pool_aligned_realloc(pool, p, curr_sz, curr_algn);
            ASSERT(p1 && 0==((uintptr_t)p1 & (curr_algn-1)), NULL);
            ASSERT(memEqual(p1, min(size[i], curr_sz), j), NULL);

            memset(p1, j+1, curr_sz);
            size_t curr_sz1 = size[rand() % SZ];
            char *p2 = (char*)pool_realloc(pool, p1, curr_sz1);
            ASSERT(p2, NULL);
            ASSERT(memEqual(p2, min(curr_sz1, curr_sz), j+1), NULL);

            pool_free(pool, p2);
        }

    pool_destroy(pool);
}
Пример #27
0
VOID
KernelFreeMemory
    (
        PVOID                       pMemoryBlock
    )
{
    BITS                            critLevel;
    BOOLEAN                         bSucceeded      = TRUE;

    /*DH  Why this could be called with NULL pointer!*/
    /*KernelAssert(pMemoryBlock != NULL);*/
    if ( pMemoryBlock == NULL )
    {
        return;
    }

    critLevel = atmos_startcritical();

    bSucceeded = pool_free(gAnscDynMemoryPoolId, pMemoryBlock);

    atmos_endcritical(critLevel);

    if ( !bSucceeded )
    {
        KernelTrace2
            (
                KERNEL_DBG_LEVEL_WARNING,
                KERNEL_DBG_MASK_MEMORY,
                "KernelFreeMemory -- failed to free memory block 0x%X.\n",
                pMemoryBlock
            );
    }

    return;
}
Пример #28
0
int main(void)
{
	pool p;
	pthread_t t1, t2;
	size_t i;

	if( (p = pool_new(NELEM)) == NULL)
		return 77;

	/* Add some items to the pool */
	for(i = 0; i < NELEM; i++) {
		void* dummy = (void*)(i + 1);
		pool_add(p, dummy);
	}

	/* Start the threads */
	pthread_create(&t1, NULL, tfn, p);
	pthread_create(&t2, NULL, tfn, p);

	/* Wait for the threads to finish */
	pthread_join(t1, NULL);
	pthread_join(t2, NULL);

	pool_free(p, NULL);
	return 0;
}
Пример #29
0
int
main(int argc, char **argv)
{
  int c, flags = 0;
  const char *query = 0;
  
  Pool *pool = pool_create();
  Repo *repo = repo_create(pool, "<stdin>");

  while ((c = getopt (argc, argv, "hq:")) >= 0)
    {
      switch(c)
        {
        case 'h':
          usage(0);
          break;
        case 'q':
	  query = optarg;
          break;
	default:
          usage(1);
          break;
        }
    }
  repo_add_repomdxml(repo, stdin, flags);
  if (query)
    doquery(pool, repo, query);
  else
    tool_write(repo, 0, 0);
  pool_free(pool);
  exit(0);
}
Пример #30
0
// does pool_free(*dst), then *dst = malloc(len)
void*
pool_copy_buf(pool_t *pool, void **dst, const void *buf, size_t len) {
    pool_free(pool, *dst);
    *dst = pool_malloc(pool, len);
    memcpy(*dst, buf, len);
    return *dst;
}