Exemple #1
0
/*
 * heap_cleanup -- cleanups the volatile heap state
 */
void
heap_cleanup(struct palloc_heap *heap)
{
	struct heap_rt *rt = heap->rt;

	alloc_class_collection_delete(rt->alloc_classes);

	bucket_delete(rt->default_bucket);

	for (unsigned i = 0; i < rt->ncaches; ++i)
		bucket_group_destroy(rt->caches[i].buckets);

	for (int i = 0; i < MAX_RUN_LOCKS; ++i)
		util_mutex_destroy(&rt->run_locks[i]);

	Free(rt->caches);

	for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
		recycler_delete(rt->recyclers[i]);
	}

	VALGRIND_DO_DESTROY_MEMPOOL(heap->layout);

	Free(rt);
	heap->rt = NULL;
}
Exemple #2
0
/*
 * alloc_class_collection_new -- creates a new collection of allocation classes
 */
struct alloc_class_collection *
alloc_class_collection_new()
{
	LOG(10, NULL);

	struct alloc_class_collection *ac = Zalloc(sizeof(*ac));
	if (ac == NULL)
		return NULL;

	memset(ac->aclasses, 0, sizeof(ac->aclasses));

	ac->granularity = ALLOC_BLOCK_SIZE;
	ac->last_run_max_size = MAX_RUN_SIZE;
	ac->fail_on_missing_class = 0;
	ac->autogenerate_on_missing_class = 1;

	size_t maps_size = (MAX_RUN_SIZE / ac->granularity) + 1;

	if ((ac->class_map_by_alloc_size = Malloc(maps_size)) == NULL)
		goto error;
	if ((ac->class_map_by_unit_size = cuckoo_new()) == NULL)
		goto error;

	memset(ac->class_map_by_alloc_size, 0xFF, maps_size);

	if (alloc_class_new(-1, ac, CLASS_HUGE, HEADER_COMPACT,
		CHUNKSIZE, 0, 1) == NULL)
		goto error;

	struct alloc_class *predefined_class =
		alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT,
			MIN_RUN_SIZE, 0, 1);
	if (predefined_class == NULL)
		goto error;

	for (size_t i = 0; i < FIRST_GENERATED_CLASS_SIZE / ac->granularity;
		++i) {
		ac->class_map_by_alloc_size[i] = predefined_class->id;
	}

	/*
	 * Based on the defined categories, a set of allocation classes is
	 * created. The unit size of those classes is depended on the category
	 * initial size and step.
	 */
	size_t granularity_mask = ALLOC_BLOCK_SIZE_GEN - 1;
	for (int c = 1; c < MAX_ALLOC_CATEGORIES; ++c) {
		size_t n = categories[c - 1].size + ALLOC_BLOCK_SIZE_GEN;
		do {
			if (alloc_class_find_or_create(ac, n) == NULL)
				goto error;

			float stepf = (float)n * categories[c].step;
			size_t stepi = (size_t)stepf;
			stepi = (stepf - (float)stepi < FLT_EPSILON) ?
				stepi : stepi + 1;

			n += (stepi + (granularity_mask)) & ~granularity_mask;
		} while (n <= categories[c].size);
	}

	/*
	 * Find the largest alloc class and use it's unit size as run allocation
	 * threshold.
	 */
	uint8_t largest_aclass_slot;
	for (largest_aclass_slot = MAX_ALLOCATION_CLASSES - 1;
			largest_aclass_slot > 0 &&
			ac->aclasses[largest_aclass_slot] == NULL;
			--largest_aclass_slot) {
		/* intentional NOP */
	}

	struct alloc_class *c = ac->aclasses[largest_aclass_slot];

	/*
	 * The actual run might contain less unit blocks than the theoretical
	 * unit max variable. This may be the case for very large unit sizes.
	 */
	size_t real_unit_max = c->run.bitmap_nallocs < RUN_UNIT_MAX_ALLOC ?
		c->run.bitmap_nallocs : RUN_UNIT_MAX_ALLOC;

	size_t theoretical_run_max_size = c->unit_size * real_unit_max;

	ac->last_run_max_size = MAX_RUN_SIZE > theoretical_run_max_size ?
		theoretical_run_max_size : MAX_RUN_SIZE;

#ifdef DEBUG
	/*
	 * Verify that each bucket's unit size points back to the bucket by the
	 * bucket map. This must be true for the default allocation classes,
	 * otherwise duplicate buckets will be created.
	 */
	for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
		struct alloc_class *c = ac->aclasses[i];

		if (c != NULL && c->type == CLASS_RUN) {
			ASSERTeq(i, c->id);
			ASSERTeq(alloc_class_by_run(ac, c->unit_size,
				c->flags, c->run.size_idx), c);
		}
	}
#endif

	return ac;

error:
	alloc_class_collection_delete(ac);

	return NULL;
}
Exemple #3
0
/*
 * heap_boot -- opens the heap region of the pmemobj pool
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
		uint64_t run_id, void *base, struct pmem_ops *p_ops)
{
	struct heap_rt *h = Malloc(sizeof(*h));
	int err;
	if (h == NULL) {
		err = ENOMEM;
		goto error_heap_malloc;
	}

	h->alloc_classes = alloc_class_collection_new();
	if (h->alloc_classes == NULL) {
		err = ENOMEM;
		goto error_alloc_classes_new;
	}

	h->ncaches = heap_get_ncaches();
	h->caches = Malloc(sizeof(struct bucket_cache) * h->ncaches);
	if (h->caches == NULL) {
		err = ENOMEM;
		goto error_heap_cache_malloc;
	}

	h->max_zone = heap_max_zone(heap_size);
	h->zones_exhausted = 0;

	for (int i = 0; i < MAX_RUN_LOCKS; ++i)
		util_mutex_init(&h->run_locks[i], NULL);

	heap->run_id = run_id;
	heap->p_ops = *p_ops;
	heap->layout = heap_start;
	heap->rt = h;
	heap->size = heap_size;
	heap->base = base;
	VALGRIND_DO_CREATE_MEMPOOL(heap->layout, 0, 0);

	for (unsigned i = 0; i < h->ncaches; ++i)
		bucket_group_init(h->caches[i].buckets);

	size_t rec_i;
	for (rec_i = 0; rec_i < MAX_ALLOCATION_CLASSES; ++rec_i) {
		if ((h->recyclers[rec_i] = recycler_new(heap)) == NULL) {
			err = ENOMEM;
			goto error_recycler_new;
		}
	}

	return 0;

error_recycler_new:
	Free(h->caches);
	for (size_t i = 0; i < rec_i; ++i)
		recycler_delete(h->recyclers[i]);
error_heap_cache_malloc:
	alloc_class_collection_delete(h->alloc_classes);
error_alloc_classes_new:
	Free(h);
	heap->rt = NULL;
error_heap_malloc:
	return err;
}