Exemple #1
0
/*
 * persist_alloc -- (internal) performs a persistent allocation of the
 *	memory block previously reserved by volatile bucket
 */
static int
persist_alloc(PMEMobjpool *pop, struct lane_section *lane,
	struct memory_block m, uint64_t real_size, uint64_t *off,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
	void *arg, uint64_t data_off)
{
	int err;

#ifdef DEBUG
	if (heap_block_is_allocated(pop, m)) {
		ERR("heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	uint64_t op_result = 0;

	void *block_data = heap_get_block_data(pop, m);
	void *datap = (char *)block_data + sizeof (struct allocation_header);
	void *userdatap = (char *)datap + data_off;

	ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0);

	/* mark everything (including headers) as accessible */
	VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, block_data, real_size);
	/* mark space as allocated */
	VALGRIND_DO_MEMPOOL_ALLOC(pop, userdatap,
			real_size -
			sizeof (struct allocation_header) - data_off);

	alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size);

	if (constructor != NULL)
		constructor(pop, userdatap, arg);

	if ((err = heap_lock_if_run(pop, m)) != 0) {
		VALGRIND_DO_MEMPOOL_FREE(pop, userdatap);
		return err;
	}

	void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), pop_offset(pop, datap));
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Exemple #2
0
/*
 * redo_log_recover -- (internal) recovery of redo log
 *
 * The redo_log_recover shall be preceded by redo_log_check call.
 */
void
redo_log_recover(PMEMobjpool *pop, struct redo_log *redo,
		size_t nentries)
{
	LOG(15, "redo %p nentries %zu", redo, nentries);

	size_t nflags = redo_log_nflags(redo, nentries);
	ASSERT(nflags < 2);

	if (nflags == 1)
		redo_log_process(pop, redo, nentries);
}
Exemple #3
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
void
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct lane_section *lane;
	lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR);

	struct bucket *b = heap_get_chunk_bucket(pop,
		alloc->chunk_id, alloc->zone_id);

	struct memory_block m = get_mblock_from_alloc(pop, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	heap_lock_if_run(pop, m);

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	heap_unlock_if_run(pop, m);

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

	/* we might have been operating on inactive run */
	if (b != NULL) {
		CNT_OP(b, insert, pop, res);

		if (b->type == BUCKET_RUN)
			heap_degrade_run_if_empty(pop, b, res);
	}

	lane_release(pop);
}
Exemple #4
0
/*
 * operation_process_persistent_redo -- (internal) process using redo
 */
static void
operation_process_persistent_redo(struct operation_context *ctx)
{
	struct operation_entry *e;

	size_t i;
	for (i = 0; i < ctx->nentries[ENTRY_PERSISTENT]; ++i) {
		e = &ctx->entries[ENTRY_PERSISTENT][i];

		redo_log_store(ctx->pop, ctx->redo, i,
			OBJ_PTR_TO_OFF(ctx->pop, e->ptr), e->value);
	}

	redo_log_set_last(ctx->pop, ctx->redo, i - 1);
	redo_log_process(ctx->pop, ctx->redo, i);
}
Exemple #5
0
int
main(int argc, char *argv[])
{
	START(argc, argv, "obj_redo_log");
	util_init();

	if (argc < 4)
		FATAL_USAGE();

	PMEMobjpool *pop = pmemobj_open_mock(argv[1]);
	UT_ASSERTne(pop, NULL);

	UT_ASSERTeq(util_is_zeroed((char *)pop->addr + PMEMOBJ_POOL_HDR_SIZE,
			pop->size - PMEMOBJ_POOL_HDR_SIZE), 1);

	char *end = NULL;
	errno = 0;
	size_t redo_size = strtoul(argv[2], &end, 0);
	if (errno || !end || *end != '\0')
		FATAL_USAGE();

	UT_ASSERT(pop->size >= redo_size * sizeof(struct redo_log));

	struct redo_log *redo = (struct redo_log *)pop->addr;

	uint64_t offset;
	uint64_t value;
	int i;
	int ret;
	size_t index;
	for (i = 3; i < argc; i++) {
		char *arg = argv[i];
		UT_ASSERTne(arg, NULL);

		switch (arg[0]) {
		case 's':
			if (sscanf(arg, "s:%ld:0x%lx:0x%lx",
					&index, &offset, &value) != 3)
				FATAL_USAGE();
			UT_OUT("s:%ld:0x%08lx:0x%08lx", index, offset, value);
			redo_log_store(pop, redo, index, offset, value);
			break;
		case 'f':
			if (sscanf(arg, "f:%ld:0x%lx:0x%lx",
					&index, &offset, &value) != 3)
				FATAL_USAGE();
			UT_OUT("f:%ld:0x%08lx:0x%08lx", index, offset, value);
			redo_log_store_last(pop, redo, index, offset,
					value);
			break;
		case 'F':
			if (sscanf(arg, "F:%ld", &index) != 1)
				FATAL_USAGE();
			UT_OUT("F:%ld", index);
			redo_log_set_last(pop, redo, index);
			break;
		case 'r':
			if (sscanf(arg, "r:0x%lx", &offset) != 1)
				FATAL_USAGE();

			uint64_t *valp = (uint64_t *)((uintptr_t)pop->addr
					+ offset);
			UT_OUT("r:0x%08lx:0x%08lx", offset, *valp);
			break;
		case 'e':
			if (sscanf(arg, "e:%ld", &index) != 1)
				FATAL_USAGE();

			struct redo_log *entry = redo + index;

			int flag = (entry->offset & REDO_FINISH_FLAG) ? 1 : 0;
			offset = entry->offset & REDO_FLAG_MASK;
			value = entry->value;

			UT_OUT("e:%ld:0x%08lx:%d:0x%08lx", index, offset,
					flag, value);
			break;
		case 'P':
			redo_log_process(pop, redo, redo_size);
			UT_OUT("P");
			break;
		case 'R':
			redo_log_recover(pop, redo, redo_size);
			UT_OUT("R");
			break;
		case 'C':
			ret = redo_log_check(pop, redo, redo_size);
			UT_OUT("C:%d", ret);
			break;
		default:
			FATAL_USAGE();
		}
	}

	pmemobj_close_mock(pop);

	DONE(NULL);
}
Exemple #6
0
/*
 * list_remove -- remove object from list
 *
 * pop          - pmemobj handle
 * pe_offset    - offset to list entry on user list relative to user data
 * head         - list head
 * oid          - target object ID
 */
int
list_remove(PMEMobjpool *pop,
	ssize_t pe_offset, struct list_head *head,
	PMEMoid oid)
{
	LOG(3, NULL);
	ASSERTne(head, NULL);

	int ret;

	struct lane_section *lane_section;

	lane_hold(pop, &lane_section, LANE_SECTION_LIST);

	ASSERTne(lane_section, NULL);
	ASSERTne(lane_section->layout, NULL);

	if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
		errno = ret;
		LOG(2, "pmemobj_mutex_lock failed");
		ret = -1;
		goto err;
	}

	struct lane_list_layout *section =
		(struct lane_list_layout *)lane_section->layout;
	struct redo_log *redo = section->redo;
	size_t redo_index = 0;

	struct list_entry *entry_ptr =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
				oid.off + (size_t)pe_offset);

	struct list_args_remove args = {
		.pe_offset = (ssize_t)pe_offset,
		.head = head,
		.entry_ptr = entry_ptr,
		.obj_doffset = oid.off,
	};

	struct list_args_common args_common = {
		.obj_doffset = oid.off,
		.entry_ptr = entry_ptr,
		.pe_offset = (ssize_t)pe_offset,
	};

	/* remove element from user list */
	redo_index = list_remove_single(pop, redo, redo_index, &args);

	/* clear next and prev offsets in removing element using redo log */
	redo_index = list_fill_entry_redo_log(pop, redo, redo_index,
			&args_common, 0, 0, 0);

	redo_log_set_last(pop->redo, redo, redo_index - 1);

	redo_log_process(pop->redo, redo, REDO_NUM_ENTRIES);

	pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
	lane_release(pop);

	ASSERT(ret == 0 || ret == -1);
	return ret;
}

/*
 * list_move -- move object between two lists
 *
 * pop           - pmemobj handle
 * pe_offset_old - offset to old list entry relative to user data
 * head_old      - old list head
 * pe_offset_new - offset to new list entry relative to user data
 * head_new      - new list head
 * dest          - destination object ID
 * before        - before/after destination
 * oid           - target object ID
 */
int
list_move(PMEMobjpool *pop,
	size_t pe_offset_old, struct list_head *head_old,
	size_t pe_offset_new, struct list_head *head_new,
	PMEMoid dest, int before, PMEMoid oid)
{
	LOG(3, NULL);
	ASSERTne(head_old, NULL);
	ASSERTne(head_new, NULL);

	int ret;

	struct lane_section *lane_section;

	lane_hold(pop, &lane_section, LANE_SECTION_LIST);

	ASSERTne(lane_section, NULL);
	ASSERTne(lane_section->layout, NULL);

	/*
	 * Grab locks in specified order to avoid dead-locks.
	 *
	 * XXX performance improvement: initialize oob locks at pool opening
	 */
	if ((ret = list_mutexes_lock(pop, head_new, head_old))) {
		errno = ret;
		LOG(2, "list_mutexes_lock failed");
		ret = -1;
		goto err;
	}

	struct lane_list_layout *section =
		(struct lane_list_layout *)lane_section->layout;
	struct redo_log *redo = section->redo;
	size_t redo_index = 0;

	dest = list_get_dest(pop, head_new, dest,
		(ssize_t)pe_offset_new, before);

	struct list_entry *entry_ptr_old =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
				oid.off + pe_offset_old);

	struct list_entry *entry_ptr_new =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
				oid.off + pe_offset_new);

	struct list_entry *dest_entry_ptr =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
				dest.off + pe_offset_new);

	if (head_old == head_new) {
		/* moving within the same list */

		if (dest.off == oid.off)
			goto unlock;

		if (before && dest_entry_ptr->pe_prev.off == oid.off) {
			if (head_old->pe_first.off != dest.off)
				goto unlock;

			redo_index = list_update_head(pop, redo, redo_index,
					head_old, oid.off);

			goto redo_last;
		}

		if (!before && dest_entry_ptr->pe_next.off == oid.off) {
			if (head_old->pe_first.off != oid.off)
				goto unlock;

			redo_index = list_update_head(pop, redo, redo_index,
					head_old, entry_ptr_old->pe_next.off);

			goto redo_last;
		}
	}

	ASSERT((ssize_t)pe_offset_old >= 0);
	struct list_args_remove args_remove = {
		.pe_offset = (ssize_t)pe_offset_old,
		.head = head_old,
		.entry_ptr = entry_ptr_old,
		.obj_doffset = oid.off,
	};

	struct list_args_insert args_insert = {
		.head = head_new,
		.dest = dest,
		.dest_entry_ptr = dest_entry_ptr,
		.before = before,
	};

	ASSERT((ssize_t)pe_offset_new >= 0);
	struct list_args_common args_common = {
		.obj_doffset = oid.off,
		.entry_ptr = entry_ptr_new,
		.pe_offset = (ssize_t)pe_offset_new,
	};

	uint64_t next_offset;
	uint64_t prev_offset;

	/* remove element from user list */
	redo_index = list_remove_single(pop, redo, redo_index, &args_remove);

	/* insert element to user list */
	redo_index = list_insert_user(pop, redo, redo_index, &args_insert,
			&args_common, &next_offset, &prev_offset);

	/* offsets differ, move is between different list entries - set uuid */
	int set_uuid = pe_offset_new != pe_offset_old ? 1 : 0;

	/* fill next and prev offsets of moving element using redo log */
	redo_index = list_fill_entry_redo_log(pop, redo, redo_index,
			&args_common, next_offset, prev_offset, set_uuid);

redo_last:
	redo_log_set_last(pop->redo, redo, redo_index - 1);

	redo_log_process(pop->redo, redo, REDO_NUM_ENTRIES);

unlock:
	list_mutexes_unlock(pop, head_new, head_old);
err:
	lane_release(pop);

	ASSERT(ret == 0 || ret == -1);
	return ret;
}

/*
 * lane_list_recovery -- (internal) recover the list section of the lane
 */
static int
lane_list_recovery(PMEMobjpool *pop, void *data, unsigned length)
{
	LOG(7, "list lane %p", data);

	struct lane_list_layout *section = data;
	ASSERT(sizeof(*section) <= length);

	redo_log_recover(pop->redo, section->redo, REDO_NUM_ENTRIES);

	if (section->obj_offset) {
		/* alloc or free recovery */
		pfree(pop, &section->obj_offset);
	}

	return 0;
}

/*
 * lane_list_check -- (internal) check consistency of lane
 */
static int
lane_list_check(PMEMobjpool *pop, void *data, unsigned length)
{
	LOG(3, "list lane %p", data);

	struct lane_list_layout *section = data;

	int ret = 0;
	if ((ret = redo_log_check(pop->redo,
			section->redo, REDO_NUM_ENTRIES)) != 0) {
		ERR("list lane: redo log check failed");
		ASSERT(ret == 0 || ret == -1);
		return ret;
	}

	if (section->obj_offset &&
	    !OBJ_OFF_FROM_HEAP(pop, section->obj_offset)) {
		ERR("list lane: invalid offset 0x%" PRIx64,
				section->obj_offset);

		return -1;
	}

	return 0;
}

/*
 * lane_list_construct_rt -- (internal) construct runtime part of list section
 */
static void *
lane_list_construct_rt(PMEMobjpool *pop)
{
	return NULL;
}

/*
 * lane_list_destroy_rt -- (internal) destroy runtime part of list section
 */
static void
lane_list_destroy_rt(PMEMobjpool *pop, void *rt)
{
	/* NOP */
}

/*
 * lane_list_boot -- global runtime init routine of list section
 */
static int
lane_list_boot(PMEMobjpool *pop)
{
	/* NOP */
	return 0;
}

/*
 * lane_list_cleanup -- global runtime cleanup routine of list section
 */
static int
lane_list_cleanup(PMEMobjpool *pop)
{
	/* NOP */
	return 0;
}

static struct section_operations list_ops = {
	.construct_rt = lane_list_construct_rt,
	.destroy_rt = lane_list_destroy_rt,
	.recover = lane_list_recovery,
	.check = lane_list_check,
	.boot = lane_list_boot,
	.cleanup = lane_list_cleanup,
};

SECTION_PARM(LANE_SECTION_LIST, &list_ops);
Exemple #7
0
/*
 * list_insert -- insert object to a single list
 *
 * pop          - pmemobj handle
 * pe_offset    - offset to list entry on user list relative to user data
 * head         - list head
 * dest         - destination object ID
 * before       - before/after destination
 * oid          - target object ID
 */
int
list_insert(PMEMobjpool *pop,
	ssize_t pe_offset, struct list_head *head,
	PMEMoid dest, int before,
	PMEMoid oid)
{
	LOG(3, NULL);
	ASSERTne(head, NULL);

	int ret;

	struct lane_section *lane_section;

	lane_hold(pop, &lane_section, LANE_SECTION_LIST);

	if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
		errno = ret;
		LOG(2, "pmemobj_mutex_lock failed");
		ret = -1;
		goto err;
	}

	ASSERTne(lane_section, NULL);
	ASSERTne(lane_section->layout, NULL);

	struct lane_list_layout *section =
		(struct lane_list_layout *)lane_section->layout;
	struct redo_log *redo = section->redo;
	size_t redo_index = 0;

	dest = list_get_dest(pop, head, dest, pe_offset, before);

	struct list_entry *entry_ptr =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
			(uintptr_t)((ssize_t)oid.off + pe_offset));

	struct list_entry *dest_entry_ptr =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
			(uintptr_t)((ssize_t)dest.off + pe_offset));

	struct list_args_insert args = {
		.dest = dest,
		.dest_entry_ptr = dest_entry_ptr,
		.head = head,
		.before = before,
	};

	struct list_args_common args_common = {
		.obj_doffset = oid.off,
		.entry_ptr = entry_ptr,
		.pe_offset = (ssize_t)pe_offset,
	};

	uint64_t next_offset;
	uint64_t prev_offset;

	/* insert element to user list */
	redo_index = list_insert_user(pop, redo, redo_index,
			&args, &args_common, &next_offset, &prev_offset);

	/* fill entry of existing element using redo log */
	redo_index = list_fill_entry_redo_log(pop, redo, redo_index,
			&args_common, next_offset, prev_offset, 1);

	redo_log_set_last(pop->redo, redo, redo_index - 1);

	redo_log_process(pop->redo, redo, REDO_NUM_ENTRIES);

	pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
	lane_release(pop);

	ASSERT(ret == 0 || ret == -1);
	return ret;
}

/*
 * list_remove_free -- remove from two lists and free an object
 *
 * pop         - pmemobj pool handle
 * oob_head    - oob list head
 * pe_offset   - offset to list entry on user list relative to user data
 * user_head   - user list head, *must* be locked if not NULL
 * oidp        - pointer to target object ID
 */
static void
list_remove_free(PMEMobjpool *pop, size_t pe_offset,
	struct list_head *user_head, PMEMoid *oidp)
{
	LOG(3, NULL);
	ASSERT(user_head != NULL);

#ifdef DEBUG
	int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
	ASSERTeq(r, 0);
#endif

	struct lane_section *lane_section;

	lane_hold(pop, &lane_section, LANE_SECTION_LIST);

	ASSERTne(lane_section, NULL);
	ASSERTne(lane_section->layout, NULL);

	struct lane_list_layout *section =
		(struct lane_list_layout *)lane_section->layout;
	uint64_t sec_off_off = OBJ_PTR_TO_OFF(pop, &section->obj_offset);
	struct redo_log *redo = section->redo;
	size_t redo_index = 0;

	uint64_t obj_doffset = oidp->off;

	ASSERT((ssize_t)pe_offset >= 0);

	struct list_entry *entry_ptr =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
				obj_doffset + pe_offset);

	struct list_args_remove args = {
		.pe_offset = (ssize_t)pe_offset,
		.head = user_head,
		.entry_ptr = entry_ptr,
		.obj_doffset = obj_doffset
	};

	/* remove from user list */
	redo_index = list_remove_single(pop, redo, redo_index, &args);

	/* clear the oid */
	if (OBJ_PTR_IS_VALID(pop, oidp))
		redo_index = list_set_oid_redo_log(pop, redo, redo_index,
				oidp, 0, 1);
	else
		oidp->off = 0;

	redo_log_store_last(pop->redo, redo, redo_index, sec_off_off,
			obj_doffset);

	redo_log_process(pop->redo, redo, REDO_NUM_ENTRIES);

	/*
	 * Don't need to fill next and prev offsets of removing element
	 * because the element is freed.
	 */
	pfree(pop, &section->obj_offset);

	lane_release(pop);
}

/*
 * list_remove_free_user -- remove from two lists and free an object
 *
 * pop         - pmemobj pool handle
 * oob_head    - oob list head
 * pe_offset   - offset to list entry on user list relative to user data
 * user_head   - user list head
 * oidp        - pointer to target object ID
 */
int
list_remove_free_user(PMEMobjpool *pop, size_t pe_offset,
	struct list_head *user_head, PMEMoid *oidp)
{
	LOG(3, NULL);

	int ret;
	if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
		errno = ret;
		LOG(2, "pmemobj_mutex_lock failed");
		return -1;
	}

	list_remove_free(pop, pe_offset, user_head, oidp);

	pmemobj_mutex_unlock_nofail(pop, &user_head->lock);

	return 0;
}
Exemple #8
0
/*
 * list_insert_new -- allocate and insert element to oob and user lists
 *
 * pop         - pmemobj pool handle
 * pe_offset   - offset to list entry on user list relative to user data
 * user_head   - user list head, must be locked if not NULL
 * dest        - destination on user list
 * before      - insert before/after destination on user list
 * size        - size of allocation, will be increased by OBJ_OOB_SIZE
 * constructor - object's constructor
 * arg         - argument for object's constructor
 * oidp        - pointer to target object ID
 */
static int
list_insert_new(PMEMobjpool *pop,
	size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
	size_t size, int (*constructor)(void *ctx, void *ptr,
	size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
	LOG(3, NULL);
	ASSERT(user_head != NULL);

	int ret;

	struct lane_section *lane_section;

#ifdef DEBUG
	int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
	ASSERTeq(r, 0);
#endif

	lane_hold(pop, &lane_section, LANE_SECTION_LIST);

	ASSERTne(lane_section, NULL);
	ASSERTne(lane_section->layout, NULL);

	struct lane_list_layout *section =
		(struct lane_list_layout *)lane_section->layout;
	struct redo_log *redo = section->redo;
	size_t redo_index = 0;
	uint64_t sec_off_off = OBJ_PTR_TO_OFF(pop, &section->obj_offset);

	if (constructor) {
		if ((ret = pmalloc_construct(pop,
				&section->obj_offset, size,
				constructor, arg, 0, 0, 0))) {
			ERR("!pmalloc_construct");
			goto err_pmalloc;
		}
	} else {
		ret = pmalloc(pop, &section->obj_offset, size, 0, 0);
		if (ret) {
			ERR("!pmalloc");
			goto err_pmalloc;
		}
	}

	uint64_t obj_doffset = section->obj_offset;

	ASSERT((ssize_t)pe_offset >= 0);

	dest = list_get_dest(pop, user_head, dest,
		(ssize_t)pe_offset, before);

	struct list_entry *entry_ptr =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
				obj_doffset + pe_offset);

	struct list_entry *dest_entry_ptr =
		(struct list_entry *)OBJ_OFF_TO_PTR(pop,
				dest.off + pe_offset);

	struct list_args_insert args = {
		.dest = dest,
		.dest_entry_ptr = dest_entry_ptr,
		.head = user_head,
		.before = before,
	};

	struct list_args_common args_common = {
		.obj_doffset = obj_doffset,
		.entry_ptr = entry_ptr,
		.pe_offset = (ssize_t)pe_offset,
	};

	uint64_t next_offset;
	uint64_t prev_offset;

	/* insert element to user list */
	redo_index = list_insert_user(pop,
		redo, redo_index, &args, &args_common,
		&next_offset, &prev_offset);

	/* don't need to use redo log for filling new element */
	list_fill_entry_persist(pop, entry_ptr,
			next_offset, prev_offset);

	if (oidp != NULL) {
		if (OBJ_PTR_IS_VALID(pop, oidp)) {
			redo_index = list_set_oid_redo_log(pop, redo,
					redo_index, oidp, obj_doffset, 0);
		} else {
			oidp->off = obj_doffset;
			oidp->pool_uuid_lo = pop->uuid_lo;
		}
	}

	/* clear the obj_offset in lane section */
	redo_log_store_last(pop->redo, redo, redo_index, sec_off_off, 0);

	redo_log_process(pop->redo, redo, REDO_NUM_ENTRIES);

	ret = 0;

err_pmalloc:
	lane_release(pop);

	ASSERT(ret == 0 || ret == -1);
	return ret;
}

/*
 * list_insert_new_user -- allocate and insert element to oob and user lists
 *
 * pop         - pmemobj pool handle
 * oob_head    - oob list head
 * pe_offset   - offset to list entry on user list relative to user data
 * user_head   - user list head
 * dest        - destination on user list
 * before      - insert before/after destination on user list
 * size        - size of allocation, will be increased by OBJ_OOB_SIZE
 * constructor - object's constructor
 * arg         - argument for object's constructor
 * oidp        - pointer to target object ID
 */
int
list_insert_new_user(PMEMobjpool *pop,
	size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
	size_t size, int (*constructor)(void *ctx, void *ptr,
	size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
	int ret;
	if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
		errno = ret;
		LOG(2, "pmemobj_mutex_lock failed");
		return -1;
	}

	ret = list_insert_new(pop, pe_offset, user_head,
			dest, before, size, constructor, arg, oidp);

	pmemobj_mutex_unlock_nofail(pop, &user_head->lock);

	ASSERT(ret == 0 || ret == -1);
	return ret;
}
Exemple #9
0
int
pfree(PMEMobjpool *pop, uint64_t *off)
{

	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	int err = 0;

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

#ifdef _EAP_ALLOC_OPTIMIZE
		//fprintf(stderr,"_EAP_ALLOC_OPTIMIZE\n");
		if(is_alloc_free_opt_enable(alloc->size))
		{
			goto error_lane_hold;		
			//goto temphere;
		}else {
			//printf("Relaxing allocs %zu\n", alloc->size);	
		}
#endif

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);


	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

#ifdef _EAP_ALLOC_OPTIMIZE
	goto temphere;
	temphere:
//		if(is_alloc_free_opt_enable(alloc->size))
//			goto error_lane_hold;
#endif


	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res)
		!= 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

	return 0;

error_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Exemple #10
0
/*
 * prealloc_construct -- resizes an existing memory block with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg,
	uint64_t data_off)
{
	if (size <= pmalloc_usable_size(pop, *off))
		return 0;

	size_t sizeh = size + sizeof (struct allocation_header);

	int err = 0;

	struct allocation_header *alloc = alloc_get_header(pop, *off);
	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	uint32_t add_size_idx = bucket_calc_units(b, sizeh - alloc->size);
	uint32_t new_size_idx = bucket_calc_units(b, sizeh);
	uint64_t real_size = new_size_idx * bucket_unit_size(b);

	struct memory_block cnt = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, cnt)) != 0)
		return err;

	struct memory_block next = {0};
	if ((err = heap_get_adjacent_free_block(pop, &next, cnt, 0)) != 0)
		goto error;

	if (next.size_idx < add_size_idx) {
		err = ENOMEM;
		goto error;
	}

	if ((err = heap_get_exact_block(pop, b, &next,
		add_size_idx)) != 0)
		goto error;

	struct memory_block *blocks[2] = {&cnt, &next};
	uint64_t op_result;
	void *hdr;
	struct memory_block m =
		heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result);

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);
	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, &alloc->size), real_size);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, cnt) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

error:
	if (heap_unlock_if_run(pop, cnt) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Exemple #11
0
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
	void *arg, uint64_t data_off)
{
	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	int err = 0;
	uint32_t units = bucket_calc_units(b, sizeh);

	struct memory_block m = {0, 0, units, 0};

	if ((err = heap_get_bestfit_block(pop, b, &m)) != 0)
		return err;

	uint64_t op_result = 0;

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);

	ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0);

	uint64_t real_size = bucket_unit_size(b) * units;

	alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size);

	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

	void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto err_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), pop_offset(pop, datap));
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

err_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_insert_block(b, m) != 0) {
		ERR("Failed to recover heap volatile state");
		ASSERT(0);
	}

	return err;
}
Exemple #12
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_chunk_bucket(pop,
		alloc->chunk_id, alloc->zone_id);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

	bucket_insert_block(pop, b, res);

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
Exemple #13
0
/*
 * prealloc_construct -- resizes an existing memory block with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg), void *arg, uint64_t data_off)
{
	if (size <= pmalloc_usable_size(pop, *off))
		return 0;

	size_t sizeh = size + sizeof (struct allocation_header);

	int err;

	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct lane_section *lane;
	lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR);

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	uint32_t add_size_idx = b->calc_units(b, sizeh - alloc->size);
	uint32_t new_size_idx = b->calc_units(b, sizeh);
	uint64_t real_size = new_size_idx * b->unit_size;

	struct memory_block cnt = get_mblock_from_alloc(pop, alloc);

	heap_lock_if_run(pop, cnt);

	struct memory_block next = {0, 0, 0, 0};
	if ((err = heap_get_adjacent_free_block(pop, b, &next, cnt, 0)) != 0)
		goto out;

	if (next.size_idx < add_size_idx) {
		err = ENOMEM;
		goto out;
	}

	if ((err = heap_get_exact_block(pop, b, &next, add_size_idx)) != 0)
		goto out;

	struct memory_block *blocks[2] = {&cnt, &next};
	uint64_t op_result;
	void *hdr;
	struct memory_block m =
		heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result);

	void *block_data = heap_get_block_data(pop, m);
	void *datap = (char *)block_data + sizeof (struct allocation_header);
	void *userdatap = (char *)datap + data_off;

	/* mark new part as accessible and undefined */
	VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, (char *)block_data + alloc->size,
			real_size - alloc->size);
	/* resize allocated space */
	VALGRIND_DO_MEMPOOL_CHANGE(pop, userdatap, userdatap,
		real_size  - sizeof (struct allocation_header) - data_off);

	if (constructor != NULL)
		constructor(pop, userdatap,
			real_size - sizeof (struct allocation_header) -
			data_off, arg);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, &alloc->size), real_size);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

out:
	heap_unlock_if_run(pop, cnt);
	lane_release(pop);

	return err;
}
Exemple #14
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res) != 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}