Пример #1
0
/*
 * palloc_exec_actions -- perform the provided free/alloc operations
 */
static void
palloc_exec_actions(struct palloc_heap *heap,
	struct operation_context *ctx,
	struct pobj_action_internal *actv,
	int actvcnt)
{
	/*
	 * The operations array is sorted so that proper lock ordering is
	 * ensured.
	 */
	qsort(actv, (size_t)actvcnt, sizeof(struct pobj_action_internal),
		palloc_action_compare);

	struct pobj_action_internal *act;
	for (int i = 0; i < actvcnt; ++i) {
		act = &actv[i];

		/*
		 * This lock must be held for the duration between the creation
		 * of the allocation metadata updates in the operation context
		 * and the operation processing. This is because a different
		 * thread might operate on the same 8-byte value of the run
		 * bitmap and override allocation performed by this thread.
		 */
		if (i == 0 || act->lock != actv[i - 1].lock) {
			if (act->lock)
				util_mutex_lock(act->lock);
		}

		action_funcs[act->type].exec(heap, act, ctx);
	}

	/* wait for all the headers to be persistent */
	pmemops_drain(&heap->p_ops);

	operation_process(ctx);

	for (int i = 0; i < actvcnt; ++i) {
		act = &actv[i];

		action_funcs[act->type].on_process(heap, act);

		if (i == 0 || act->lock != actv[i - 1].lock) {
			if (act->lock)
				util_mutex_unlock(act->lock);
		}
	}

	for (int i = 0; i < actvcnt; ++i) {
		act = &actv[i];

		action_funcs[act->type].on_unlock(heap, act);
	}
}
Пример #2
0
/*
 * heap_init_free_chunk -- initializes free chunk transient state
 */
static void
heap_init_free_chunk(struct palloc_heap *heap,
	struct chunk_header *hdr,
	struct memory_block *m)
{
	struct operation_context ctx;
	operation_init(&ctx, heap->base, NULL, NULL);
	ctx.p_ops = &heap->p_ops;
	heap_chunk_write_footer(hdr, hdr->size_idx);
	/*
	 * Perform coalescing just in case there
	 * are any neighbouring free chunks.
	 */
	struct memory_block nm = heap_coalesce_huge(heap, m);
	if (nm.chunk_id != m->chunk_id) {
		m->m_ops->prep_hdr(&nm, MEMBLOCK_FREE, &ctx);
		operation_process(&ctx);
	}
	*m = nm;
	bucket_insert_block(heap->rt->default_bucket, m);
}
Пример #3
0
static void
test_heap()
{
	struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE);
	PMEMobjpool *pop = &mpop->p;
	memset(pop, 0, MOCK_POOL_SIZE);
	pop->size = MOCK_POOL_SIZE;
	pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
	pop->persist = obj_heap_persist;

	UT_ASSERT(heap_check(pop) != 0);
	UT_ASSERT(heap_init(pop) == 0);
	UT_ASSERT(heap_boot(pop) == 0);
	UT_ASSERT(pop->heap != NULL);

	struct bucket *b_small = heap_get_best_bucket(pop, 1);
	struct bucket *b_big = heap_get_best_bucket(pop, 2048);

	UT_ASSERT(b_small->unit_size < b_big->unit_size);

	struct bucket *b_def = heap_get_best_bucket(pop, CHUNKSIZE);
	UT_ASSERT(b_def->unit_size == CHUNKSIZE);

	/* new small buckets should be empty */
	UT_ASSERT(b_small->type == BUCKET_RUN);
	UT_ASSERT(b_big->type == BUCKET_RUN);

	struct memory_block blocks[MAX_BLOCKS] = {
		{0, 0, 1, 0},
		{0, 0, 1, 0},
		{0, 0, 1, 0}
	};

	for (int i = 0; i < MAX_BLOCKS; ++i) {
		heap_get_bestfit_block(pop, b_def, &blocks[i]);
		UT_ASSERT(blocks[i].block_off == 0);
	}

	struct memory_block *blocksp[MAX_BLOCKS] = {NULL};

	struct memory_block prev;
	heap_get_adjacent_free_block(pop, b_def, &prev, blocks[1], 1);
	UT_ASSERT(prev.chunk_id == blocks[0].chunk_id);
	blocksp[0] = &prev;

	struct memory_block cnt;
	heap_get_adjacent_free_block(pop, b_def, &cnt, blocks[0], 0);
	UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id);
	blocksp[1] = &cnt;

	struct memory_block next;
	heap_get_adjacent_free_block(pop, b_def, &next, blocks[1], 0);
	UT_ASSERT(next.chunk_id == blocks[2].chunk_id);
	blocksp[2] = &next;

	struct operation_context *ctx = operation_init(pop, NULL);
	struct memory_block result =
		heap_coalesce(pop, blocksp, MAX_BLOCKS, HEAP_OP_FREE, ctx);
	operation_process(ctx);
	operation_delete(ctx);

	UT_ASSERT(result.size_idx == 3);
	UT_ASSERT(result.chunk_id == prev.chunk_id);

	UT_ASSERT(heap_check(pop) == 0);
	heap_cleanup(pop);
	UT_ASSERT(pop->heap == NULL);

	Free(mpop);
}
Пример #4
0
/*
 * heap_reclaim_run -- checks the run for available memory if unclaimed.
 *
 * Returns 1 if reclaimed chunk, 0 otherwise.
 */
static int
heap_reclaim_run(struct palloc_heap *heap, struct chunk_run *run,
	struct memory_block *m)
{
	if (m->m_ops->claim(m) != 0)
		return 0; /* this run already has an owner */

	struct alloc_class *c = alloc_class_get_create_by_unit_size(
		heap->rt->alloc_classes, run->block_size);
	if (c == NULL)
		return 0;

	ASSERTeq(c->type, CLASS_RUN);

	pthread_mutex_t *lock = m->m_ops->get_lock(m);
	util_mutex_lock(lock);

	unsigned i;
	unsigned nval = c->run.bitmap_nval;
	for (i = 0; nval > 0 && i < nval - 1; ++i)
		if (run->bitmap[i] != 0)
			break;

	int empty = (i == (nval - 1)) &&
		(run->bitmap[i] == c->run.bitmap_lastval);
	if (empty) {
		struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
		struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
		struct bucket *defb = heap_get_default_bucket(heap);

		/*
		 * The redo log ptr can be NULL if we are sure that there's only
		 * one persistent value modification in the entire operation
		 * context.
		 */
		struct operation_context ctx;
		operation_init(&ctx, heap->base, NULL, NULL);
		ctx.p_ops = &heap->p_ops;

		struct memory_block nb = MEMORY_BLOCK_NONE;
		nb.chunk_id = m->chunk_id;
		nb.zone_id = m->zone_id;
		nb.block_off = 0;
		nb.size_idx = m->size_idx;

		heap_chunk_init(heap, hdr, CHUNK_TYPE_FREE, nb.size_idx);
		memblock_rebuild_state(heap, &nb);

		nb = heap_coalesce_huge(heap, &nb);
		nb.m_ops->prep_hdr(&nb, MEMBLOCK_FREE, &ctx);

		operation_process(&ctx);

		bucket_insert_block(defb, &nb);

		*m = nb;
	} else {
		recycler_put(heap->rt->recyclers[c->id], m);
	}

	util_mutex_unlock(lock);

	return empty;
}
Пример #5
0
/*
 * palloc_operation -- persistent memory operation. Takes a NULL pointer
 *	or an existing memory block and modifies it to occupy, at least, 'size'
 *	number of bytes.
 *
 * The malloc, free and realloc routines are implemented in the context of this
 * common operation which encompasses all of the functionality usually done
 * separately in those methods.
 *
 * The first thing that needs to be done is determining which memory blocks
 * will be affected by the operation - this varies depending on the whether the
 * operation will need to modify or free an existing block and/or allocate
 * a new one.
 *
 * Simplified allocation process flow is as follows:
 *	- reserve a new block in the transient heap
 *	- prepare the new block
 *	- create redo log of required modifications
 *		- chunk metadata
 *		- offset of the new object
 *	- commit and process the redo log
 *
 * And similarly, the deallocation process:
 *	- create redo log of required modifications
 *		- reverse the chunk metadata back to the 'free' state
 *		- set the destination of the object offset to zero
 *	- commit and process the redo log
 *	- return the memory block back to the free blocks transient heap
 *
 * Reallocation is a combination of the above, which one additional step
 * of copying the old content in the meantime.
 */
int
palloc_operation(struct palloc_heap *heap,
	uint64_t off, uint64_t *dest_off, size_t size,
	palloc_constr constructor, void *arg,
	struct operation_context *ctx)
{
	struct bucket *b = NULL;
	struct allocation_header *alloc = NULL;
	struct memory_block existing_block = {0, 0, 0, 0};
	struct memory_block new_block = {0, 0, 0, 0};
	struct memory_block reclaimed_block = {0, 0, 0, 0};

	int ret = 0;

	/*
	 * These two lock are responsible for protecting the metadata for the
	 * persistent representation of a chunk. Depending on the operation and
	 * the type of a chunk, they might be NULL.
	 */
	pthread_mutex_t *existing_block_lock = NULL;
	pthread_mutex_t *new_block_lock = NULL;

	size_t sizeh = size + sizeof(struct allocation_header);

	/*
	 * The offset of an existing block can be nonzero which means this
	 * operation is either free or a realloc - either way the offset of the
	 * object needs to be translated into structure that all of the heap
	 * methods operate in.
	 */
	if (off != 0) {
		alloc = ALLOC_GET_HEADER(heap, off);
		existing_block = get_mblock_from_alloc(heap, alloc);
		/*
		 * This lock must be held until the operation is processed
		 * successfully, because other threads might operate on the
		 * same bitmap value.
		 */
		existing_block_lock = MEMBLOCK_OPS(AUTO, &existing_block)->
				get_lock(&existing_block, heap);
		if (existing_block_lock != NULL)
			util_mutex_lock(existing_block_lock);

#ifdef DEBUG
		if (MEMBLOCK_OPS(AUTO,
			&existing_block)->get_state(&existing_block, heap) !=
				MEMBLOCK_ALLOCATED) {
			ERR("Double free or heap corruption");
			ASSERT(0);
		}
#endif /* DEBUG */

		/*
		 * The memory block must return back to the originating bucket,
		 * otherwise coalescing of neighbouring blocks will be rendered
		 * impossible.
		 *
		 * If the block was allocated in a different incarnation of the
		 * heap (i.e. the application was restarted) and the chunk from
		 * which the allocation comes from was not yet processed, the
		 * originating bucket does not exists and all of the otherwise
		 * necessary volatile heap modifications won't be performed for
		 * this memory block.
		 */
		b = heap_get_chunk_bucket(heap, alloc->chunk_id,
				alloc->zone_id);
	}

	/* if allocation or reallocation, reserve new memory */
	if (size != 0) {
		/* reallocation to exactly the same size, which is a no-op */
		if (alloc != NULL && alloc->size == sizeh)
			goto out;

		errno = alloc_reserve_block(heap, &new_block, sizeh);
		if (errno != 0) {
			ret = -1;
			goto out;
		}
	}


	/*
	 * The offset value which is to be written to the destination pointer
	 * provided by the caller.
	 */
	uint64_t offset_value = 0;

	/* lock and persistently free the existing memory block */
	if (!MEMORY_BLOCK_IS_EMPTY(existing_block)) {
		/*
		 * This method will insert new entries into the operation
		 * context which will, after processing, update the chunk
		 * metadata to 'free' - it also takes care of all the necessary
		 * coalescing of blocks.
		 * Even though the transient state of the heap is used during
		 * this method to locate neighbouring blocks, it isn't modified.
		 *
		 * The rb block is the coalesced memory block that the free
		 * resulted in, to prevent volatile memory leak it needs to be
		 * inserted into the corresponding bucket.
		 */
		reclaimed_block = heap_free_block(heap, b, existing_block, ctx);
		offset_value = 0;
	}

	if (!MEMORY_BLOCK_IS_EMPTY(new_block)) {
		if (alloc_prep_block(heap, new_block, constructor,
				arg, &offset_value) != 0) {
			/*
			 * Constructor returned non-zero value which means
			 * the memory block reservation has to be rolled back.
			 */
			struct bucket *new_bucket = heap_get_chunk_bucket(heap,
				new_block.chunk_id, new_block.zone_id);
			ASSERTne(new_bucket, NULL);

			/*
			 * Omitting the context in this method results in
			 * coalescing of blocks without affecting the persistent
			 * heap state.
			 */
			new_block = heap_free_block(heap, new_bucket,
					new_block, NULL);
			CNT_OP(new_bucket, insert, heap, new_block);

			if (new_bucket->type == BUCKET_RUN)
				heap_degrade_run_if_empty(heap,
					new_bucket, new_block);

			errno = ECANCELED;
			ret = -1;
			goto out;
		}

		/*
		 * This lock must be held for the duration between the creation
		 * of the allocation metadata updates in the operation context
		 * and the operation processing. This is because a different
		 * thread might operate on the same 8-byte value of the run
		 * bitmap and override allocation performed by this thread.
		 */
		new_block_lock = MEMBLOCK_OPS(AUTO, &new_block)->
			get_lock(&new_block, heap);

		/* the locks might be identical in the case of realloc */
		if (new_block_lock == existing_block_lock)
			new_block_lock = NULL;

		if (new_block_lock != NULL)
			util_mutex_lock(new_block_lock);

#ifdef DEBUG
		if (MEMBLOCK_OPS(AUTO,
			&new_block)->get_state(&new_block, heap) !=
				MEMBLOCK_FREE) {
			ERR("Double free or heap corruption");
			ASSERT(0);
		}
#endif /* DEBUG */

		/*
		 * The actual required metadata modifications are chunk-type
		 * dependent, but it always is a modification of a single 8 byte
		 * value - either modification of few bits in a bitmap or
		 * changing a chunk type from free to used.
		 */
		MEMBLOCK_OPS(AUTO, &new_block)->prep_hdr(&new_block,
				heap, MEMBLOCK_ALLOCATED, ctx);
	}

	/* not in-place realloc */
	if (!MEMORY_BLOCK_IS_EMPTY(existing_block) &&
		!MEMORY_BLOCK_IS_EMPTY(new_block)) {
		size_t old_size = alloc->size;
		size_t to_cpy = old_size > sizeh ? sizeh : old_size;
		VALGRIND_ADD_TO_TX(PMALLOC_OFF_TO_PTR(heap, offset_value),
			to_cpy - ALLOC_OFF);
		pmemops_memcpy_persist(&heap->p_ops,
			PMALLOC_OFF_TO_PTR(heap, offset_value),
			PMALLOC_OFF_TO_PTR(heap, off),
			to_cpy - ALLOC_OFF);
		VALGRIND_REMOVE_FROM_TX(PMALLOC_OFF_TO_PTR(heap, offset_value),
			to_cpy - ALLOC_OFF);
	}

	/*
	 * If the caller provided a destination value to update, it needs to be
	 * modified atomically alongside the heap metadata, and so the operation
	 * context must be used.
	 * The actual offset value depends on whether the operation type.
	 */
	if (dest_off != NULL)
		operation_add_entry(ctx, dest_off, offset_value, OPERATION_SET);

	operation_process(ctx);

	/*
	 * After the operation succeeded, the persistent state is all in order
	 * but in some cases it might not be in-sync with the its transient
	 * representation.
	 */
	if (!MEMORY_BLOCK_IS_EMPTY(existing_block)) {
		VALGRIND_DO_MEMPOOL_FREE(heap->layout,
			(char *)heap_get_block_data(heap, existing_block)
			+ ALLOC_OFF);

		/* we might have been operating on inactive run */
		if (b != NULL) {
			/*
			 * Even though the initial condition is to check
			 * whether the existing block exists it's important to
			 * use the 'reclaimed block' - it is the coalesced one
			 * and reflects the current persistent heap state,
			 * whereas the existing block reflects the state from
			 * before this operation started.
			 */
			CNT_OP(b, insert, heap, reclaimed_block);

			/*
			 * Degrading of a run means turning it back into a chunk
			 * in case it's no longer needed.
			 * It might be tempting to defer this operation until
			 * such time that the chunk is actually needed, but
			 * right now the decision is to keep the persistent heap
			 * state as clean as possible - and that means not
			 * leaving unused data around.
			 */
			if (b->type == BUCKET_RUN)
				heap_degrade_run_if_empty(heap, b,
					reclaimed_block);
		}
	}

out:
	if (new_block_lock != NULL)
		util_mutex_unlock(new_block_lock);

	if (existing_block_lock != NULL)
		util_mutex_unlock(existing_block_lock);

	return ret;
}