Example #1
0
/*
 * bucket_group_destroy -- (internal) destroys bucket group instance
 */
static void
bucket_group_destroy(struct bucket **buckets)
{
	for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i)
		if (buckets[i] != NULL)
			bucket_delete(buckets[i]);
}
Example #2
0
/*
 * heap_cleanup -- cleanups the volatile heap state
 */
void
heap_cleanup(struct palloc_heap *heap)
{
	struct heap_rt *rt = heap->rt;

	alloc_class_collection_delete(rt->alloc_classes);

	bucket_delete(rt->default_bucket);

	for (unsigned i = 0; i < rt->ncaches; ++i)
		bucket_group_destroy(rt->caches[i].buckets);

	for (int i = 0; i < MAX_RUN_LOCKS; ++i)
		util_mutex_destroy(&rt->run_locks[i]);

	Free(rt->caches);

	for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
		recycler_delete(rt->recyclers[i]);
	}

	VALGRIND_DO_DESTROY_MEMPOOL(heap->layout);

	Free(rt);
	heap->rt = NULL;
}
Example #3
0
void lruhash_delete(struct lruhash *table)
{
    size_t i;
    if(!table)
        return;
    lock_basic_destroy(&table->lock);
    for(i=0; i<table->size; i++)
        bucket_delete(table, &table->array[i]);
    free(table->array);
    free(table);
}
Example #4
0
void lruhash_clear(struct lruhash* table)
{
    size_t i;
    if(!table)
        return;

    lock_basic_lock(&table->lock);
    for(i=0; i<table->size; i++) {
        bucket_delete(table, &table->array[i]);
    }
    table->lru_head = NULL;
    table->lru_tail = NULL;
    table->num = 0;
    table->space_used = 0;
    lock_basic_unlock(&table->lock);
}
Example #5
0
/*
 * heap_boot -- cleanups the volatile heap state
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
heap_cleanup(PMEMobjpool *pop)
{
	for (int i = 0; i < MAX_BUCKETS; ++i)
		bucket_delete(pop->heap->buckets[i]);

	for (int i = 0; i < MAX_RUN_LOCKS; ++i)
		pthread_mutex_destroy(&pop->heap->run_locks[i]);

	Free(pop->heap->bucket_map);

	Free(pop->heap);

	pop->heap = NULL;

	return 0;
}
Example #6
0
htable* htable_destroy(htable* ht) { // {{{

	size_t i = ht->bucketc;

	while (i--) {
		bucket* head = ht->bucketv[i];
		while (head) {
			bucket* next = head->next;
			bucket_delete(head);
			head = next;
		}
	}

	ht->entries = 0;
	ht->bucketc = 0;
	free(ht->bucketv);

	return ht;

} // }}}
Example #7
0
/*
 * heap_create_alloc_class_buckets -- (internal) allocates all cache bucket
 * instances of the specified type
 */
static int
heap_create_alloc_class_buckets(struct palloc_heap *heap, struct alloc_class *c)
{
	struct heap_rt *h = heap->rt;
	int i;
	for (i = 0; i < (int)h->ncaches; ++i) {
		h->caches[i].buckets[c->id] = bucket_new(
			container_new_seglists(heap), c);
		if (h->caches[i].buckets[c->id] == NULL)
			goto error_cache_bucket_new;
	}

	return 0;

error_cache_bucket_new:
	for (i -= 1; i >= 0; --i) {
		bucket_delete(h->caches[i].buckets[c->id]);
	}

	return -1;
}
Example #8
0
int htable_unset(htable* ht, void* key, size_t keylen, void** data, size_t *dlen) { // {{{

	int hval;
	bucket** b = htable_find_pbucket(ht, key, keylen, &hval);

	if (!*b)
		return HTABLE_NOT_FOUND;

	bucket* next = b[0]->next;

	*data = b[0]->data;
	*dlen = b[0]->dlen;
	b[0]->data = NULL;

	bucket_delete(*b);

	*b = next;

	ht->entries--;

	return HTABLE_FOUND;
} // }}}
Example #9
0
File: kv.c Project: skuanr/sheepdog
/* return SD_RES_NO_VDI if bucket is not existss */
int kv_delete_bucket(const char *account, const char *bucket)
{
	uint32_t account_vid, vid;
	char vdi_name[SD_MAX_VDI_LEN];
	int ret;

	ret = sd_lookup_vdi(account, &account_vid);
	if (ret != SD_RES_SUCCESS) {
		sd_err("Failed to find account %s", account);
		return ret;
	}

	sys->cdrv->lock(account_vid);
	snprintf(vdi_name, SD_MAX_VDI_LEN, "%s/%s", account, bucket);

	ret = sd_lookup_vdi(vdi_name, &vid);
	if (ret != SD_RES_SUCCESS)
		goto out;
	ret = bucket_delete(account, account_vid, bucket);
out:
	sys->cdrv->unlock(account_vid);
	return ret;
}
Example #10
0
/*
 * heap_buckets_init -- (internal) initializes bucket instances
 */
static int
heap_buckets_init(PMEMobjpool *pop)
{
	struct pmalloc_heap *h = pop->heap;
	int i;

	//printf("calling heap_buckets_init \n");
	bucket_proto[0].unit_max = RUN_UNIT_MAX;

	/*
	 * To take use of every single bit available in the run the unit size
	 * would have to be calculated using following expression:
	 * (RUNSIZE / (MAX_BITMAP_VALUES * BITS_PER_VALUE)), but to preserve
	 * cacheline alignment a little bit of memory at the end of the run
	 * is left unused.
	 */
	bucket_proto[0].unit_size = MIN_RUN_SIZE;

	for (i = 1; i < MAX_BUCKETS - 1; ++i) {
		bucket_proto[i].unit_max = RUN_UNIT_MAX;
		bucket_proto[i].unit_size =
				bucket_proto[i - 1].unit_size *
				bucket_proto[i - 1].unit_max;
	}

	bucket_proto[i].unit_max = -1;
	bucket_proto[i].unit_size = CHUNKSIZE;

	h->last_run_max_size = bucket_proto[i - 1].unit_size *
				(bucket_proto[i - 1].unit_max - 1);

	h->bucket_map = Malloc(sizeof (*h->bucket_map) * h->last_run_max_size);
	if (h->bucket_map == NULL)
		goto error_bucket_map_malloc;

	for (i = 0; i < MAX_BUCKETS; ++i) {
		h->buckets[i] = bucket_new(bucket_proto[i].unit_size,
					bucket_proto[i].unit_max);
		if (h->buckets[i] == NULL)
			goto error_bucket_new;
	}

	/* XXX better way to fill the bucket map */
	for (i = 0; i < h->last_run_max_size; ++i) {
		for (int j = 0; j < MAX_BUCKETS - 1; ++j) {
			/*
			 * Skip the last unit, so that the distribution
			 * of buckets in the map is better.
			 */
			if ((bucket_proto[j].unit_size *
				((bucket_proto[j].unit_max - 1))) >= i) {
				h->bucket_map[i] = h->buckets[j];
				break;
			}
		}
	}

	heap_populate_buckets(pop);

	return 0;

error_bucket_new:
	Free(h->bucket_map);

	for (i = i - 1; i >= 0; --i)
		bucket_delete(h->buckets[i]);
error_bucket_map_malloc:

	return ENOMEM;
}