Example #1
0
/*
 * persist_alloc -- (internal) performs a persistent allocation of the
 *	memory block previously reserved by volatile bucket
 */
static int
persist_alloc(PMEMobjpool *pop, struct lane_section *lane,
	struct memory_block m, uint64_t real_size, uint64_t *off,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
	void *arg, uint64_t data_off)
{
	int err;

#ifdef DEBUG
	if (heap_block_is_allocated(pop, m)) {
		ERR("heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	uint64_t op_result = 0;

	void *block_data = heap_get_block_data(pop, m);
	void *datap = (char *)block_data + sizeof (struct allocation_header);
	void *userdatap = (char *)datap + data_off;

	ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0);

	/* mark everything (including headers) as accessible */
	VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, block_data, real_size);
	/* mark space as allocated */
	VALGRIND_DO_MEMPOOL_ALLOC(pop, userdatap,
			real_size -
			sizeof (struct allocation_header) - data_off);

	alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size);

	if (constructor != NULL)
		constructor(pop, userdatap, arg);

	if ((err = heap_lock_if_run(pop, m)) != 0) {
		VALGRIND_DO_MEMPOOL_FREE(pop, userdatap);
		return err;
	}

	void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), pop_offset(pop, datap));
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Example #2
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
void
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct lane_section *lane;
	lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR);

	struct bucket *b = heap_get_chunk_bucket(pop,
		alloc->chunk_id, alloc->zone_id);

	struct memory_block m = get_mblock_from_alloc(pop, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	heap_lock_if_run(pop, m);

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	heap_unlock_if_run(pop, m);

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

	/* we might have been operating on inactive run */
	if (b != NULL) {
		CNT_OP(b, insert, pop, res);

		if (b->type == BUCKET_RUN)
			heap_degrade_run_if_empty(pop, b, res);
	}

	lane_release(pop);
}
Example #3
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_chunk_bucket(pop,
		alloc->chunk_id, alloc->zone_id);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

	bucket_insert_block(pop, b, res);

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
Example #4
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res) != 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
Example #5
0
/*
 * palloc_operation -- persistent memory operation. Takes a NULL pointer
 *	or an existing memory block and modifies it to occupy, at least, 'size'
 *	number of bytes.
 *
 * The malloc, free and realloc routines are implemented in the context of this
 * common operation which encompasses all of the functionality usually done
 * separately in those methods.
 *
 * The first thing that needs to be done is determining which memory blocks
 * will be affected by the operation - this varies depending on the whether the
 * operation will need to modify or free an existing block and/or allocate
 * a new one.
 *
 * Simplified allocation process flow is as follows:
 *	- reserve a new block in the transient heap
 *	- prepare the new block
 *	- create redo log of required modifications
 *		- chunk metadata
 *		- offset of the new object
 *	- commit and process the redo log
 *
 * And similarly, the deallocation process:
 *	- create redo log of required modifications
 *		- reverse the chunk metadata back to the 'free' state
 *		- set the destination of the object offset to zero
 *	- commit and process the redo log
 *	- return the memory block back to the free blocks transient heap
 *
 * Reallocation is a combination of the above, which one additional step
 * of copying the old content in the meantime.
 */
int
palloc_operation(struct palloc_heap *heap,
	uint64_t off, uint64_t *dest_off, size_t size,
	palloc_constr constructor, void *arg,
	struct operation_context *ctx)
{
	struct bucket *b = NULL;
	struct allocation_header *alloc = NULL;
	struct memory_block existing_block = {0, 0, 0, 0};
	struct memory_block new_block = {0, 0, 0, 0};
	struct memory_block reclaimed_block = {0, 0, 0, 0};

	size_t sizeh = size + sizeof(struct allocation_header);

	/*
	 * The offset of an existing block can be nonzero which means this
	 * operation is either free or a realloc - either way the offset of the
	 * object needs to be translated into structure that all of the heap
	 * methods operate in.
	 */
	if (off != 0) {
		alloc = ALLOC_GET_HEADER(heap, off);
		/*
		 * The memory block must return back to the originating bucket,
		 * otherwise coalescing of neighbouring blocks will be rendered
		 * impossible.
		 *
		 * If the block was allocated in a different incarnation of the
		 * heap (i.e. the application was restarted) and the chunk from
		 * which the allocation comes from was not yet processed, the
		 * originating bucket does not exists and all of the otherwise
		 * necessary volatile heap modifications won't be performed for
		 * this memory block.
		 */
		b = heap_get_chunk_bucket(heap, alloc->chunk_id,
				alloc->zone_id);
		existing_block = get_mblock_from_alloc(heap, alloc);
	}

	/* if allocation or reallocation, reserve new memory */
	if (size != 0) {
		/* reallocation to exactly the same size, which is a no-op */
		if (alloc != NULL && alloc->size == sizeh)
			return 0;

		errno = alloc_reserve_block(heap, &new_block, sizeh);
		if (errno != 0)
			return -1;
	}


	/*
	 * The offset value which is to be written to the destination pointer
	 * provided by the caller.
	 */
	uint64_t offset_value = 0;

	/* lock and persistently free the existing memory block */
	if (!MEMORY_BLOCK_IS_EMPTY(existing_block)) {
#ifdef DEBUG
		if (!heap_block_is_allocated(heap, existing_block)) {
			ERR("Double free or heap corruption");
			ASSERT(0);
		}
#endif /* DEBUG */

		/*
		 * This lock must be held until the operation is processed
		 * successfully, because other threads might operate on the
		 * same bitmap value.
		 */
		MEMBLOCK_OPS(AUTO, &existing_block)->
				lock(&existing_block, heap);

		/*
		 * This method will insert new entries into the operation
		 * context which will, after processing, update the chunk
		 * metadata to 'free' - it also takes care of all the necessary
		 * coalescing of blocks.
		 * Even though the transient state of the heap is used during
		 * this method to locate neighbouring blocks, it isn't modified.
		 *
		 * The rb block is the coalesced memory block that the free
		 * resulted in, to prevent volatile memory leak it needs to be
		 * inserted into the corresponding bucket.
		 */
		reclaimed_block = heap_free_block(heap, b, existing_block, ctx);
		offset_value = 0;
	}

	if (!MEMORY_BLOCK_IS_EMPTY(new_block)) {
#ifdef DEBUG
		if (heap_block_is_allocated(heap, new_block)) {
			ERR("heap corruption");
			ASSERT(0);
		}
#endif /* DEBUG */

		if (alloc_prep_block(heap, new_block, constructor,
				arg, &offset_value) != 0) {
			/*
			 * Constructor returned non-zero value which means
			 * the memory block reservation has to be rolled back.
			 */
			struct bucket *new_bucket = heap_get_chunk_bucket(heap,
				new_block.chunk_id, new_block.zone_id);
			ASSERTne(new_bucket, NULL);

			/*
			 * Omitting the context in this method results in
			 * coalescing of blocks without affecting the persistent
			 * heap state.
			 */
			new_block = heap_free_block(heap, new_bucket,
					new_block, NULL);
			CNT_OP(new_bucket, insert, heap, new_block);

			if (new_bucket->type == BUCKET_RUN)
				heap_degrade_run_if_empty(heap,
					new_bucket, new_block);

			errno = ECANCELED;
			return -1;
		}

		/*
		 * This lock must be held for the duration between the creation
		 * of the allocation metadata updates in the operation context
		 * and the operation processing. This is because a different
		 * thread might operate on the same 8-byte value of the run
		 * bitmap and override allocation performed by this thread.
		 */
		MEMBLOCK_OPS(AUTO, &new_block)->lock(&new_block, heap);

		/*
		 * The actual required metadata modifications are chunk-type
		 * dependent, but it always is a modification of a single 8 byte
		 * value - either modification of few bits in a bitmap or
		 * changing a chunk type from free to used.
		 */
		MEMBLOCK_OPS(AUTO, &new_block)->prep_hdr(&new_block,
				heap, HDR_OP_ALLOC, ctx);
	}

	/* not in-place realloc */
	if (!MEMORY_BLOCK_IS_EMPTY(existing_block) &&
		!MEMORY_BLOCK_IS_EMPTY(new_block)) {
		size_t old_size = alloc->size;
		size_t to_cpy = old_size > sizeh ? sizeh : old_size;
		VALGRIND_ADD_TO_TX(PMALLOC_OFF_TO_PTR(heap, offset_value),
			to_cpy - ALLOC_OFF);
		pmemops_memcpy_persist(&heap->p_ops,
			PMALLOC_OFF_TO_PTR(heap, offset_value),
			PMALLOC_OFF_TO_PTR(heap, off),
			to_cpy - ALLOC_OFF);
		VALGRIND_REMOVE_FROM_TX(PMALLOC_OFF_TO_PTR(heap, offset_value),
			to_cpy - ALLOC_OFF);
	}

	/*
	 * If the caller provided a destination value to update, it needs to be
	 * modified atomically alongside the heap metadata, and so the operation
	 * context must be used.
	 * The actual offset value depends on whether the operation type.
	 */
	if (dest_off != NULL)
		operation_add_entry(ctx, dest_off, offset_value, OPERATION_SET);

	operation_process(ctx);

	/*
	 * After the operation succeeded, the persistent state is all in order
	 * but in some cases it might not be in-sync with the its transient
	 * representation.
	 */

	if (!MEMORY_BLOCK_IS_EMPTY(new_block)) {
		/* new block run lock */
		MEMBLOCK_OPS(AUTO, &new_block)->unlock(&new_block, heap);
	}

	if (!MEMORY_BLOCK_IS_EMPTY(existing_block)) {
		/* existing (freed) run lock */
		MEMBLOCK_OPS(AUTO, &existing_block)->
				unlock(&existing_block, heap);

		VALGRIND_DO_MEMPOOL_FREE(heap->layout,
			(char *)heap_get_block_data(heap, existing_block)
			+ ALLOC_OFF);

		/* we might have been operating on inactive run */
		if (b != NULL) {
			/*
			 * Even though the initial condition is to check
			 * whether the existing block exists it's important to
			 * use the 'reclaimed block' - it is the coalesced one
			 * and reflects the current persistent heap state,
			 * whereas the existing block reflects the state from
			 * before this operation started.
			 */
			CNT_OP(b, insert, heap, reclaimed_block);
#ifdef DEBUG
			if (heap_block_is_allocated(heap, reclaimed_block)) {
				ERR("heap corruption");
				ASSERT(0);
			}
#endif /* DEBUG */
			/*
			 * Degrading of a run means turning it back into a chunk
			 * in case it's no longer needed.
			 * It might be tempting to defer this operation until
			 * such time that the chunk is actually needed, but
			 * right now the decision is to keep the persistent heap
			 * state as clean as possible - and that means not
			 * leaving unused data around.
			 */
			if (b->type == BUCKET_RUN)
				heap_degrade_run_if_empty(heap, b,
					reclaimed_block);
		}
	}

	return 0;
}