示例#1
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_coalesce_huge -- finds neighbours of a huge block, removes them from the
 *	volatile state and returns the resulting block
 */
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, const struct memory_block *m)
{
	const struct memory_block *blocks[3] = {NULL, m, NULL};

	struct bucket *b = heap_get_default_bucket(heap);

	struct memory_block prev = MEMORY_BLOCK_NONE;
	if (heap_get_adjacent_free_block(heap, m, &prev, 1) == 0 &&
		b->c_ops->get_rm_exact(b->container, &prev) == 0) {
		blocks[0] = &prev;
	}

	struct memory_block next = MEMORY_BLOCK_NONE;
	if (heap_get_adjacent_free_block(heap, m, &next, 0) == 0 &&
		b->c_ops->get_rm_exact(b->container, &next) == 0) {
		blocks[2] = &next;
	}

	return heap_coalesce(heap, blocks, 3);
}
示例#2
0
文件: heap.c 项目: sudkannan/nvml
/*
 * heap_ensure_bucket_filled -- (internal) refills the bucket if needed
 */
static void
heap_ensure_bucket_filled(PMEMobjpool *pop, struct bucket *b, int force)
{
	if (!force && !bucket_is_empty(b))
		return;

	if (!bucket_is_small(b)) {
		/* not much to do here apart from using the next zone */
		heap_populate_buckets(pop);
		return;
	}

	struct bucket *def_bucket = heap_get_default_bucket(pop);

	struct memory_block m = {0, 0, 1, 0};
	if (heap_get_bestfit_block(pop, def_bucket, &m) != 0)
		return; /* OOM */

	ASSERT(m.block_off == 0);

	heap_populate_run_bucket(pop, b, m.chunk_id, m.zone_id);
}
示例#3
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_ensure_run_bucket_filled -- (internal) refills the bucket if needed
 */
static int
heap_ensure_run_bucket_filled(struct palloc_heap *heap, struct bucket *b,
	uint32_t units)
{
	ASSERTeq(b->aclass->type, CLASS_RUN);

	if (b->is_active) {
		b->c_ops->rm_all(b->container);
		b->active_memory_block.m_ops
			->claim_revoke(&b->active_memory_block);

		b->is_active = 0;
	}

	struct heap_rt *h = heap->rt;
	struct memory_block m = MEMORY_BLOCK_NONE;

	if (recycler_get(h->recyclers[b->aclass->id], &m) == 0) {
		pthread_mutex_t *lock = m.m_ops->get_lock(&m);

		util_mutex_lock(lock);
		heap_reuse_run(heap, b, &m);
		util_mutex_unlock(lock);

		b->active_memory_block = m;
		b->is_active = 1;

		return 0;
	}

	m.size_idx = b->aclass->run.size_idx;

	/* cannot reuse an existing run, create a new one */
	struct bucket *defb = heap_get_default_bucket(heap);
	util_mutex_lock(&defb->lock);
	if (heap_get_bestfit_block(heap, defb, &m) == 0) {
		ASSERTeq(m.block_off, 0);

		heap_create_run(heap, b, &m);

		b->active_memory_block = m;
		b->is_active = 1;

		util_mutex_unlock(&defb->lock);
		return 0;
	}
	util_mutex_unlock(&defb->lock);

	/*
	 * Try the recycler again, the previous call to the bestfit_block for
	 * huge chunks might have reclaimed some unused runs.
	 */
	if (recycler_get(h->recyclers[b->aclass->id], &m) == 0) {
		pthread_mutex_t *lock = m.m_ops->get_lock(&m);
		util_mutex_lock(lock);
		heap_reuse_run(heap, b, &m);
		util_mutex_unlock(lock);

		/*
		 * To verify that the recycler run is not able to satisfy our
		 * request we attempt to retrieve a block. This is not ideal,
		 * and should be replaced by a different heuristic once proper
		 * memory block scoring is implemented.
		 */
		struct memory_block tmp = MEMORY_BLOCK_NONE;
		tmp.size_idx = units;
		if (b->c_ops->get_rm_bestfit(b->container, &tmp) != 0) {
			b->c_ops->rm_all(b->container);
			m.m_ops->claim_revoke(&m);
			return ENOMEM;
		} else {
			bucket_insert_block(b, &tmp);
		}

		b->active_memory_block = m;
		b->is_active = 1;

		return 0;
	}

	return ENOMEM;
}
示例#4
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_reclaim_run -- checks the run for available memory if unclaimed.
 *
 * Returns 1 if reclaimed chunk, 0 otherwise.
 */
static int
heap_reclaim_run(struct palloc_heap *heap, struct chunk_run *run,
	struct memory_block *m)
{
	if (m->m_ops->claim(m) != 0)
		return 0; /* this run already has an owner */

	struct alloc_class *c = alloc_class_get_create_by_unit_size(
		heap->rt->alloc_classes, run->block_size);
	if (c == NULL)
		return 0;

	ASSERTeq(c->type, CLASS_RUN);

	pthread_mutex_t *lock = m->m_ops->get_lock(m);
	util_mutex_lock(lock);

	unsigned i;
	unsigned nval = c->run.bitmap_nval;
	for (i = 0; nval > 0 && i < nval - 1; ++i)
		if (run->bitmap[i] != 0)
			break;

	int empty = (i == (nval - 1)) &&
		(run->bitmap[i] == c->run.bitmap_lastval);
	if (empty) {
		struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
		struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
		struct bucket *defb = heap_get_default_bucket(heap);

		/*
		 * The redo log ptr can be NULL if we are sure that there's only
		 * one persistent value modification in the entire operation
		 * context.
		 */
		struct operation_context ctx;
		operation_init(&ctx, heap->base, NULL, NULL);
		ctx.p_ops = &heap->p_ops;

		struct memory_block nb = MEMORY_BLOCK_NONE;
		nb.chunk_id = m->chunk_id;
		nb.zone_id = m->zone_id;
		nb.block_off = 0;
		nb.size_idx = m->size_idx;

		heap_chunk_init(heap, hdr, CHUNK_TYPE_FREE, nb.size_idx);
		memblock_rebuild_state(heap, &nb);

		nb = heap_coalesce_huge(heap, &nb);
		nb.m_ops->prep_hdr(&nb, MEMBLOCK_FREE, &ctx);

		operation_process(&ctx);

		bucket_insert_block(defb, &nb);

		*m = nb;
	} else {
		recycler_put(heap->rt->recyclers[c->id], m);
	}

	util_mutex_unlock(lock);

	return empty;
}