Beispiel #1
0
/*
 * list_insert_after -- (internal) insert element at offset after an element
 */
static size_t
list_insert_after(PMEMobjpool *pop,
	struct redo_log *redo, size_t redo_index,
	struct list_args_insert *args, struct list_args_common *args_common,
	uint64_t *next_offset, uint64_t *prev_offset)
{
	LOG(15, NULL);

	/* current->next = dest->next and current->prev = dest */
	*next_offset = args->dest_entry_ptr->pe_next.off;
	*prev_offset = args->dest.off;

	/* dest->next = current and dest->next->prev = current */
	uint64_t dest_next_off = args->dest.off + NEXT_OFF;
	u64_add_offset(&dest_next_off, args_common->pe_offset);
	uint64_t dest_next_prev_off = args->dest_entry_ptr->pe_next.off +
					PREV_OFF;
	u64_add_offset(&dest_next_prev_off, args_common->pe_offset);

	redo_log_store(pop->redo, redo, redo_index + 0,
			dest_next_off, args_common->obj_doffset);
	redo_log_store(pop->redo, redo, redo_index + 1,
			dest_next_prev_off, args_common->obj_doffset);

	return redo_index + 2;
}
Beispiel #2
0
/*
 * persist_alloc -- (internal) performs a persistent allocation of the
 *	memory block previously reserved by volatile bucket
 */
static int
persist_alloc(PMEMobjpool *pop, struct lane_section *lane,
	struct memory_block m, uint64_t real_size, uint64_t *off,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
	void *arg, uint64_t data_off)
{
	int err;

#ifdef DEBUG
	if (heap_block_is_allocated(pop, m)) {
		ERR("heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	uint64_t op_result = 0;

	void *block_data = heap_get_block_data(pop, m);
	void *datap = (char *)block_data + sizeof (struct allocation_header);
	void *userdatap = (char *)datap + data_off;

	ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0);

	/* mark everything (including headers) as accessible */
	VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, block_data, real_size);
	/* mark space as allocated */
	VALGRIND_DO_MEMPOOL_ALLOC(pop, userdatap,
			real_size -
			sizeof (struct allocation_header) - data_off);

	alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size);

	if (constructor != NULL)
		constructor(pop, userdatap, arg);

	if ((err = heap_lock_if_run(pop, m)) != 0) {
		VALGRIND_DO_MEMPOOL_FREE(pop, userdatap);
		return err;
	}

	void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), pop_offset(pop, datap));
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Beispiel #3
0
/*
 * list_fill_entry_redo_log -- (internal) fill new entry using redo log
 *
 * Used to update entry in existing object.
 */
static size_t
list_fill_entry_redo_log(PMEMobjpool *pop,
	struct redo_log *redo, size_t redo_index,
	struct list_args_common *args,
	uint64_t next_offset, uint64_t prev_offset, int set_uuid)
{
	LOG(15, NULL);
	struct pmem_ops *ops = &pop->p_ops;

	ASSERTne(args->entry_ptr, NULL);
	ASSERTne(args->obj_doffset, 0);

	if (set_uuid) {
		VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_next.pool_uuid_lo),
				sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
		VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_prev.pool_uuid_lo),
				sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
		/* don't need to fill pool uuid using redo log */
		args->entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
		args->entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
		VALGRIND_REMOVE_FROM_TX(
				&(args->entry_ptr->pe_next.pool_uuid_lo),
				sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
		VALGRIND_REMOVE_FROM_TX(
				&(args->entry_ptr->pe_prev.pool_uuid_lo),
				sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
		pmemops_persist(ops, args->entry_ptr, sizeof(*args->entry_ptr));
	} else {
		ASSERTeq(args->entry_ptr->pe_next.pool_uuid_lo, pop->uuid_lo);
		ASSERTeq(args->entry_ptr->pe_prev.pool_uuid_lo, pop->uuid_lo);
	}

	/* set current->next and current->prev using redo log */
	uint64_t next_off_off = args->obj_doffset + NEXT_OFF;
	uint64_t prev_off_off = args->obj_doffset + PREV_OFF;
	u64_add_offset(&next_off_off, args->pe_offset);
	u64_add_offset(&prev_off_off, args->pe_offset);

	redo_log_store(pop->redo, redo, redo_index + 0, next_off_off,
			next_offset);
	redo_log_store(pop->redo, redo, redo_index + 1, prev_off_off,
			prev_offset);

	return redo_index + 2;
}
Beispiel #4
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
void
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct lane_section *lane;
	lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR);

	struct bucket *b = heap_get_chunk_bucket(pop,
		alloc->chunk_id, alloc->zone_id);

	struct memory_block m = get_mblock_from_alloc(pop, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	heap_lock_if_run(pop, m);

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	heap_unlock_if_run(pop, m);

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

	/* we might have been operating on inactive run */
	if (b != NULL) {
		CNT_OP(b, insert, pop, res);

		if (b->type == BUCKET_RUN)
			heap_degrade_run_if_empty(pop, b, res);
	}

	lane_release(pop);
}
Beispiel #5
0
/*
 * list_set_oid_redo_log -- (internal) set PMEMoid value using redo log
 */
static size_t
list_set_oid_redo_log(PMEMobjpool *pop,
	struct redo_log *redo, size_t redo_index,
	PMEMoid *oidp, uint64_t obj_doffset, int oidp_inited)
{
	ASSERT(OBJ_PTR_IS_VALID(pop, oidp));

	if (!oidp_inited || oidp->pool_uuid_lo != pop->uuid_lo) {
		if (oidp_inited)
			ASSERTeq(oidp->pool_uuid_lo, 0);
		uint64_t oid_uuid_off = OBJ_PTR_TO_OFF(pop,
				&oidp->pool_uuid_lo);
		redo_log_store(pop->redo, redo, redo_index, oid_uuid_off,
				pop->uuid_lo);
		redo_index += 1;
	}

	uint64_t oid_off_off = OBJ_PTR_TO_OFF(pop, &oidp->off);
	redo_log_store(pop->redo, redo, redo_index, oid_off_off,
			obj_doffset);

	return redo_index + 1;
}
Beispiel #6
0
/*
 * list_remove_single -- (internal) remove element from single list
 */
static size_t
list_remove_single(PMEMobjpool *pop,
	struct redo_log *redo, size_t redo_index,
	struct list_args_remove *args)
{
	LOG(15, NULL);

	if (args->entry_ptr->pe_next.off == args->obj_doffset) {
		/* only one element on list */
		ASSERTeq(args->head->pe_first.off, args->obj_doffset);
		ASSERTeq(args->entry_ptr->pe_prev.off, args->obj_doffset);

		return list_update_head(pop, redo, redo_index, args->head, 0);
	} else {
		/* set next->prev = prev and prev->next = next */
		uint64_t next_off = args->entry_ptr->pe_next.off;
		uint64_t next_prev_off = next_off + PREV_OFF;
		u64_add_offset(&next_prev_off, args->pe_offset);
		uint64_t prev_off = args->entry_ptr->pe_prev.off;
		uint64_t prev_next_off = prev_off + NEXT_OFF;
		u64_add_offset(&prev_next_off, args->pe_offset);

		redo_log_store(pop->redo, redo, redo_index + 0,
				next_prev_off, prev_off);
		redo_log_store(pop->redo, redo, redo_index + 1,
				prev_next_off, next_off);
		redo_index += 2;

		if (args->head->pe_first.off == args->obj_doffset) {
			/* removing element is the first one */
			return list_update_head(pop, redo, redo_index,
					args->head, next_off);
		} else {
			return redo_index;
		}
	}
}
Beispiel #7
0
/*
 * list_update_head -- (internal) update pe_first entry in list head
 */
static size_t
list_update_head(PMEMobjpool *pop,
	struct redo_log *redo, size_t redo_index,
	struct list_head *head, uint64_t first_offset)
{
	LOG(15, NULL);

	uint64_t pe_first_off_off = OBJ_PTR_TO_OFF(pop, &head->pe_first.off);

	redo_log_store(pop->redo, redo, redo_index + 0,
			pe_first_off_off, first_offset);

	if (head->pe_first.pool_uuid_lo == 0) {
		uint64_t pe_first_uuid_off = OBJ_PTR_TO_OFF(pop,
				&head->pe_first.pool_uuid_lo);

		redo_log_store(pop->redo, redo, redo_index + 1,
				pe_first_uuid_off, pop->uuid_lo);

		return redo_index + 2;
	} else {
		return redo_index + 1;
	}
}
Beispiel #8
0
/*
 * operation_process_persistent_redo -- (internal) process using redo
 */
static void
operation_process_persistent_redo(struct operation_context *ctx)
{
	struct operation_entry *e;

	size_t i;
	for (i = 0; i < ctx->nentries[ENTRY_PERSISTENT]; ++i) {
		e = &ctx->entries[ENTRY_PERSISTENT][i];

		redo_log_store(ctx->pop, ctx->redo, i,
			OBJ_PTR_TO_OFF(ctx->pop, e->ptr), e->value);
	}

	redo_log_set_last(ctx->pop, ctx->redo, i - 1);
	redo_log_process(ctx->pop, ctx->redo, i);
}
Beispiel #9
0
int
main(int argc, char *argv[])
{
	START(argc, argv, "obj_redo_log");
	util_init();

	if (argc < 4)
		FATAL_USAGE();

	PMEMobjpool *pop = pmemobj_open_mock(argv[1]);
	UT_ASSERTne(pop, NULL);

	UT_ASSERTeq(util_is_zeroed((char *)pop->addr + PMEMOBJ_POOL_HDR_SIZE,
			pop->size - PMEMOBJ_POOL_HDR_SIZE), 1);

	char *end = NULL;
	errno = 0;
	size_t redo_size = strtoul(argv[2], &end, 0);
	if (errno || !end || *end != '\0')
		FATAL_USAGE();

	UT_ASSERT(pop->size >= redo_size * sizeof(struct redo_log));

	struct redo_log *redo = (struct redo_log *)pop->addr;

	uint64_t offset;
	uint64_t value;
	int i;
	int ret;
	size_t index;
	for (i = 3; i < argc; i++) {
		char *arg = argv[i];
		UT_ASSERTne(arg, NULL);

		switch (arg[0]) {
		case 's':
			if (sscanf(arg, "s:%ld:0x%lx:0x%lx",
					&index, &offset, &value) != 3)
				FATAL_USAGE();
			UT_OUT("s:%ld:0x%08lx:0x%08lx", index, offset, value);
			redo_log_store(pop, redo, index, offset, value);
			break;
		case 'f':
			if (sscanf(arg, "f:%ld:0x%lx:0x%lx",
					&index, &offset, &value) != 3)
				FATAL_USAGE();
			UT_OUT("f:%ld:0x%08lx:0x%08lx", index, offset, value);
			redo_log_store_last(pop, redo, index, offset,
					value);
			break;
		case 'F':
			if (sscanf(arg, "F:%ld", &index) != 1)
				FATAL_USAGE();
			UT_OUT("F:%ld", index);
			redo_log_set_last(pop, redo, index);
			break;
		case 'r':
			if (sscanf(arg, "r:0x%lx", &offset) != 1)
				FATAL_USAGE();

			uint64_t *valp = (uint64_t *)((uintptr_t)pop->addr
					+ offset);
			UT_OUT("r:0x%08lx:0x%08lx", offset, *valp);
			break;
		case 'e':
			if (sscanf(arg, "e:%ld", &index) != 1)
				FATAL_USAGE();

			struct redo_log *entry = redo + index;

			int flag = (entry->offset & REDO_FINISH_FLAG) ? 1 : 0;
			offset = entry->offset & REDO_FLAG_MASK;
			value = entry->value;

			UT_OUT("e:%ld:0x%08lx:%d:0x%08lx", index, offset,
					flag, value);
			break;
		case 'P':
			redo_log_process(pop, redo, redo_size);
			UT_OUT("P");
			break;
		case 'R':
			redo_log_recover(pop, redo, redo_size);
			UT_OUT("R");
			break;
		case 'C':
			ret = redo_log_check(pop, redo, redo_size);
			UT_OUT("C:%d", ret);
			break;
		default:
			FATAL_USAGE();
		}
	}

	pmemobj_close_mock(pop);

	DONE(NULL);
}
Beispiel #10
0
int
pfree(PMEMobjpool *pop, uint64_t *off)
{

	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	int err = 0;

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

#ifdef _EAP_ALLOC_OPTIMIZE
		//fprintf(stderr,"_EAP_ALLOC_OPTIMIZE\n");
		if(is_alloc_free_opt_enable(alloc->size))
		{
			goto error_lane_hold;		
			//goto temphere;
		}else {
			//printf("Relaxing allocs %zu\n", alloc->size);	
		}
#endif

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);


	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

#ifdef _EAP_ALLOC_OPTIMIZE
	goto temphere;
	temphere:
//		if(is_alloc_free_opt_enable(alloc->size))
//			goto error_lane_hold;
#endif


	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res)
		!= 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

	return 0;

error_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Beispiel #11
0
/*
 * prealloc_construct -- resizes an existing memory block with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg,
	uint64_t data_off)
{
	if (size <= pmalloc_usable_size(pop, *off))
		return 0;

	size_t sizeh = size + sizeof (struct allocation_header);

	int err = 0;

	struct allocation_header *alloc = alloc_get_header(pop, *off);
	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	uint32_t add_size_idx = bucket_calc_units(b, sizeh - alloc->size);
	uint32_t new_size_idx = bucket_calc_units(b, sizeh);
	uint64_t real_size = new_size_idx * bucket_unit_size(b);

	struct memory_block cnt = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, cnt)) != 0)
		return err;

	struct memory_block next = {0};
	if ((err = heap_get_adjacent_free_block(pop, &next, cnt, 0)) != 0)
		goto error;

	if (next.size_idx < add_size_idx) {
		err = ENOMEM;
		goto error;
	}

	if ((err = heap_get_exact_block(pop, b, &next,
		add_size_idx)) != 0)
		goto error;

	struct memory_block *blocks[2] = {&cnt, &next};
	uint64_t op_result;
	void *hdr;
	struct memory_block m =
		heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result);

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);
	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, &alloc->size), real_size);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, cnt) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

error:
	if (heap_unlock_if_run(pop, cnt) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Beispiel #12
0
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
	void *arg, uint64_t data_off)
{
	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	int err = 0;
	uint32_t units = bucket_calc_units(b, sizeh);

	struct memory_block m = {0, 0, units, 0};

	if ((err = heap_get_bestfit_block(pop, b, &m)) != 0)
		return err;

	uint64_t op_result = 0;

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);

	ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0);

	uint64_t real_size = bucket_unit_size(b) * units;

	alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size);

	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

	void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto err_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), pop_offset(pop, datap));
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

err_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_insert_block(b, m) != 0) {
		ERR("Failed to recover heap volatile state");
		ASSERT(0);
	}

	return err;
}
Beispiel #13
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_chunk_bucket(pop,
		alloc->chunk_id, alloc->zone_id);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

	bucket_insert_block(pop, b, res);

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
Beispiel #14
0
/*
 * prealloc_construct -- resizes an existing memory block with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg), void *arg, uint64_t data_off)
{
	if (size <= pmalloc_usable_size(pop, *off))
		return 0;

	size_t sizeh = size + sizeof (struct allocation_header);

	int err;

	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct lane_section *lane;
	lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR);

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	uint32_t add_size_idx = b->calc_units(b, sizeh - alloc->size);
	uint32_t new_size_idx = b->calc_units(b, sizeh);
	uint64_t real_size = new_size_idx * b->unit_size;

	struct memory_block cnt = get_mblock_from_alloc(pop, alloc);

	heap_lock_if_run(pop, cnt);

	struct memory_block next = {0, 0, 0, 0};
	if ((err = heap_get_adjacent_free_block(pop, b, &next, cnt, 0)) != 0)
		goto out;

	if (next.size_idx < add_size_idx) {
		err = ENOMEM;
		goto out;
	}

	if ((err = heap_get_exact_block(pop, b, &next, add_size_idx)) != 0)
		goto out;

	struct memory_block *blocks[2] = {&cnt, &next};
	uint64_t op_result;
	void *hdr;
	struct memory_block m =
		heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result);

	void *block_data = heap_get_block_data(pop, m);
	void *datap = (char *)block_data + sizeof (struct allocation_header);
	void *userdatap = (char *)datap + data_off;

	/* mark new part as accessible and undefined */
	VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, (char *)block_data + alloc->size,
			real_size - alloc->size);
	/* resize allocated space */
	VALGRIND_DO_MEMPOOL_CHANGE(pop, userdatap, userdatap,
		real_size  - sizeof (struct allocation_header) - data_off);

	if (constructor != NULL)
		constructor(pop, userdatap,
			real_size - sizeof (struct allocation_header) -
			data_off, arg);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, &alloc->size), real_size);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

out:
	heap_unlock_if_run(pop, cnt);
	lane_release(pop);

	return err;
}
Beispiel #15
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res) != 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}