Beispiel #1
0
/*
 * heap_buckets_init -- (internal) initializes bucket instances
 */
int
heap_buckets_init(struct palloc_heap *heap)
{
	struct heap_rt *h = heap->rt;

	for (uint8_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
		struct alloc_class *c = alloc_class_by_id(h->alloc_classes, i);
		if (c != NULL) {
			if (heap_create_alloc_class_buckets(heap, c) != 0)
				goto error_bucket_create;
		}
	}

	h->default_bucket = bucket_new(container_new_ctree(heap),
		alloc_class_by_id(h->alloc_classes, DEFAULT_ALLOC_CLASS_ID));

	if (h->default_bucket == NULL)
		goto error_bucket_create;

	heap_populate_buckets(heap);

	return 0;

error_bucket_create:
	for (unsigned i = 0; i < h->ncaches; ++i)
		bucket_group_destroy(h->caches[i].buckets);

	return -1;
}
Beispiel #2
0
/*
 * heap_ensure_bucket_filled -- (internal) refills the bucket if needed
 */
static void
heap_ensure_bucket_filled(PMEMobjpool *pop, struct bucket *b, int force)
{
	if (!force && !bucket_is_empty(b))
		return;

	if (!bucket_is_small(b)) {
		/* not much to do here apart from using the next zone */
		heap_populate_buckets(pop);
		return;
	}

	struct bucket *def_bucket = heap_get_default_bucket(pop);

	struct memory_block m = {0, 0, 1, 0};
	if (heap_get_bestfit_block(pop, def_bucket, &m) != 0)
		return; /* OOM */

	ASSERT(m.block_off == 0);

	heap_populate_run_bucket(pop, b, m.chunk_id, m.zone_id);
}
Beispiel #3
0
/*
 * heap_ensure_huge_bucket_filled --
 *	(internal) refills the default bucket if needed
 */
static int
heap_ensure_huge_bucket_filled(struct palloc_heap *heap)
{
	return (heap_reclaim_garbage(heap) == 0 ||
		heap_populate_buckets(heap) == 0) ? 0 : ENOMEM;
}
Beispiel #4
0
/*
 * heap_buckets_init -- (internal) initializes bucket instances
 */
static int
heap_buckets_init(PMEMobjpool *pop)
{
	struct pmalloc_heap *h = pop->heap;
	int i;

	//printf("calling heap_buckets_init \n");
	bucket_proto[0].unit_max = RUN_UNIT_MAX;

	/*
	 * To take use of every single bit available in the run the unit size
	 * would have to be calculated using following expression:
	 * (RUNSIZE / (MAX_BITMAP_VALUES * BITS_PER_VALUE)), but to preserve
	 * cacheline alignment a little bit of memory at the end of the run
	 * is left unused.
	 */
	bucket_proto[0].unit_size = MIN_RUN_SIZE;

	for (i = 1; i < MAX_BUCKETS - 1; ++i) {
		bucket_proto[i].unit_max = RUN_UNIT_MAX;
		bucket_proto[i].unit_size =
				bucket_proto[i - 1].unit_size *
				bucket_proto[i - 1].unit_max;
	}

	bucket_proto[i].unit_max = -1;
	bucket_proto[i].unit_size = CHUNKSIZE;

	h->last_run_max_size = bucket_proto[i - 1].unit_size *
				(bucket_proto[i - 1].unit_max - 1);

	h->bucket_map = Malloc(sizeof (*h->bucket_map) * h->last_run_max_size);
	if (h->bucket_map == NULL)
		goto error_bucket_map_malloc;

	for (i = 0; i < MAX_BUCKETS; ++i) {
		h->buckets[i] = bucket_new(bucket_proto[i].unit_size,
					bucket_proto[i].unit_max);
		if (h->buckets[i] == NULL)
			goto error_bucket_new;
	}

	/* XXX better way to fill the bucket map */
	for (i = 0; i < h->last_run_max_size; ++i) {
		for (int j = 0; j < MAX_BUCKETS - 1; ++j) {
			/*
			 * Skip the last unit, so that the distribution
			 * of buckets in the map is better.
			 */
			if ((bucket_proto[j].unit_size *
				((bucket_proto[j].unit_max - 1))) >= i) {
				h->bucket_map[i] = h->buckets[j];
				break;
			}
		}
	}

	heap_populate_buckets(pop);

	return 0;

error_bucket_new:
	Free(h->bucket_map);

	for (i = i - 1; i >= 0; --i)
		bucket_delete(h->buckets[i]);
error_bucket_map_malloc:

	return ENOMEM;
}