Exemplo n.º 1
0
/*
 * calc_block_offset -- (internal) calculates the block offset of allocation
 */
static uint16_t
calc_block_offset(PMEMobjpool *pop, struct bucket *b,
	struct allocation_header *alloc)
{
	uint16_t block_off = 0;
	if (bucket_is_small(b)) {
		struct memory_block m = {alloc->chunk_id, alloc->zone_id, 0, 0};
		void *data = heap_get_block_data(pop, m);
		uintptr_t diff = (uintptr_t)alloc - (uintptr_t)data;
		block_off = diff / bucket_unit_size(b);
		ASSERT(diff % bucket_unit_size(b) == 0);
	}

	return block_off;
}
Exemplo n.º 2
0
/*
 * heap_populate_run_bucket -- (internal) split bitmap into memory blocks
 */
static void
heap_populate_run_bucket(PMEMobjpool *pop, struct bucket *b,
	uint32_t chunk_id, uint32_t zone_id)
{
	struct pmalloc_heap *h = pop->heap;
	struct zone *z = &h->layout->zones[zone_id];
	struct chunk_header *hdr = &z->chunk_headers[chunk_id];
	struct chunk_run *run = (struct chunk_run *)&z->chunks[chunk_id];

	if (hdr->type != CHUNK_TYPE_RUN)
		heap_init_run(pop, b, hdr, run);

	ASSERT(hdr->size_idx == 1);
	ASSERT(bucket_unit_size(b) == run->block_size);

	uint16_t run_bits = RUNSIZE / run->block_size;
	ASSERT(run_bits < (MAX_BITMAP_VALUES * BITS_PER_VALUE));
	uint16_t block_off = 0;
	uint16_t block_size_idx = 0;

	for (int i = 0; i < bucket_bitmap_nval(b); ++i) {
		uint64_t v = run->bitmap[i];
		block_off = BITS_PER_VALUE * i;
		if (v == 0) {
			heap_run_insert(b, chunk_id, zone_id,
				BITS_PER_VALUE, block_off);
			continue;
		} else if (v == ~0L) {
			continue;
		}

		for (int j = 0; j < BITS_PER_VALUE; ++j) {
			if (BIT_IS_CLR(v, j)) {
				block_size_idx++;
			} else if (block_size_idx != 0) {
				heap_run_insert(b, chunk_id, zone_id,
					block_size_idx,
					block_off - block_size_idx);
				block_size_idx = 0;
			}

			if ((block_off++) == run_bits) {
				i = MAX_BITMAP_VALUES;
				break;
			}
		}

		if (block_size_idx != 0) {
			heap_run_insert(b, chunk_id, zone_id, block_size_idx,
				block_off - block_size_idx);
			block_size_idx = 0;
		}
	}
}
Exemplo n.º 3
0
Arquivo: bucket.c Projeto: jxy859/nvml
/*
 * bucket_insert_block -- inserts a new memory block into the container
 */
int
bucket_insert_block(PMEMobjpool *pop, struct bucket *b, struct memory_block m)
{
	ASSERT(m.chunk_id < MAX_CHUNK);
	ASSERT(m.zone_id < UINT16_MAX);
	ASSERT(m.size_idx != 0);

#ifdef USE_VG_MEMCHECK
	if (On_valgrind) {
		size_t rsize = m.size_idx * bucket_unit_size(b);
		void *block_data = heap_get_block_data(pop, m);
		VALGRIND_DO_MAKE_MEM_NOACCESS(pop, block_data, rsize);
	}
#endif

	uint64_t key = CHUNK_KEY_PACK(m.zone_id, m.chunk_id, m.block_off,
				m.size_idx);

	return ctree_insert(b->tree, key, 0);
}
Exemplo n.º 4
0
/*
 * prealloc_construct -- resizes an existing memory block with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg,
	uint64_t data_off)
{
	if (size <= pmalloc_usable_size(pop, *off))
		return 0;

	size_t sizeh = size + sizeof (struct allocation_header);

	int err = 0;

	struct allocation_header *alloc = alloc_get_header(pop, *off);
	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	uint32_t add_size_idx = bucket_calc_units(b, sizeh - alloc->size);
	uint32_t new_size_idx = bucket_calc_units(b, sizeh);
	uint64_t real_size = new_size_idx * bucket_unit_size(b);

	struct memory_block cnt = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, cnt)) != 0)
		return err;

	struct memory_block next = {0};
	if ((err = heap_get_adjacent_free_block(pop, &next, cnt, 0)) != 0)
		goto error;

	if (next.size_idx < add_size_idx) {
		err = ENOMEM;
		goto error;
	}

	if ((err = heap_get_exact_block(pop, b, &next,
		add_size_idx)) != 0)
		goto error;

	struct memory_block *blocks[2] = {&cnt, &next};
	uint64_t op_result;
	void *hdr;
	struct memory_block m =
		heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result);

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);
	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, &alloc->size), real_size);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, cnt) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

error:
	if (heap_unlock_if_run(pop, cnt) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Exemplo n.º 5
0
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
	void *arg, uint64_t data_off)
{
	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	int err = 0;
	uint32_t units = bucket_calc_units(b, sizeh);

	struct memory_block m = {0, 0, units, 0};

	if ((err = heap_get_bestfit_block(pop, b, &m)) != 0)
		return err;

	uint64_t op_result = 0;

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);

	ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0);

	uint64_t real_size = bucket_unit_size(b) * units;

	alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size);

	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

	void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto err_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), pop_offset(pop, datap));
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

err_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_insert_block(b, m) != 0) {
		ERR("Failed to recover heap volatile state");
		ASSERT(0);
	}

	return err;
}
Exemplo n.º 6
0
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg), void *arg, uint64_t data_off)
{
	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	struct memory_block m = {0, 0, 0, 0};

	m.size_idx = bucket_calc_units(b, sizeh);

	err = heap_get_bestfit_block(pop, b, &m);

	if (err == ENOMEM && !bucket_is_small(b))
		goto out; /* there's only one huge bucket */

	if (err == ENOMEM) {
		/*
		 * There's no more available memory in the common heap and in
		 * this lane cache, fallback to the auxiliary (shared) bucket.
		 */
		b = heap_get_auxiliary_bucket(pop, sizeh);
		err = heap_get_bestfit_block(pop, b, &m);
	}

	if (err == ENOMEM) {
		/*
		 * The auxiliary bucket cannot satisfy our request, borrow
		 * memory from other caches.
		 */
		heap_drain_to_auxiliary(pop, b, m.size_idx);
		err = heap_get_bestfit_block(pop, b, &m);
	}

	if (err == ENOMEM) {
		/* we are completely out of memory */
		goto out;
	}

	/*
	 * Now that the memory is reserved we can go ahead with making the
	 * allocation persistent.
	 */
	uint64_t real_size = bucket_unit_size(b) * m.size_idx;
	err = persist_alloc(pop, lane, m, real_size, off,
		constructor, arg, data_off);

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
Exemplo n.º 7
0
/*
 * heap_chunk_init -- (internal) writes chunk header
 */
static void
heap_chunk_init(PMEMobjpool *pop, struct chunk_header *hdr,
	uint16_t type, uint32_t size_idx)
{
	struct chunk_header nhdr = {
		.type = type,
		.flags = 0,
		.size_idx = size_idx
	};
	*hdr = nhdr; /* write the entire header (8 bytes) at once */
	pop->persist(hdr, sizeof (*hdr));

	heap_chunk_write_footer(hdr, size_idx);
}

/*
 * heap_zone_init -- (internal) writes zone's first chunk and header
 */
static void
heap_zone_init(PMEMobjpool *pop, uint32_t zone_id)
{
	struct zone *z = &pop->heap->layout->zones[zone_id];
	uint32_t size_idx = get_zone_size_idx(zone_id, pop->heap->max_zone,
			pop->heap_size);

	heap_chunk_init(pop, &z->chunk_headers[0], CHUNK_TYPE_FREE, size_idx);

	struct zone_header nhdr = {
		.size_idx = size_idx,
		.magic = ZONE_HEADER_MAGIC,
	};
	z->header = nhdr;  /* write the entire header (8 bytes) at once */
	pop->persist(&z->header, sizeof (z->header));
}

/*
 * heap_init_run -- (internal) creates a run based on a chunk
 */
static void
heap_init_run(PMEMobjpool *pop, struct bucket *b, struct chunk_header *hdr,
	struct chunk_run *run)
{
	/* add/remove chunk_run and chunk_header to valgrind transaction */
	VALGRIND_ADD_TO_TX(run, sizeof (*run));
	run->block_size = bucket_unit_size(b);
	pop->persist(&run->block_size, sizeof (run->block_size));

	ASSERT(hdr->type == CHUNK_TYPE_FREE);

	/* set all the bits */
	memset(run->bitmap, 0xFF, sizeof (run->bitmap));

	/* clear only the bits available for allocations from this bucket */
	memset(run->bitmap, 0, sizeof (uint64_t) * (bucket_bitmap_nval(b) - 1));
	run->bitmap[bucket_bitmap_nval(b) - 1] = bucket_bitmap_lastval(b);
	VALGRIND_REMOVE_FROM_TX(run, sizeof (*run));

	pop->persist(run->bitmap, sizeof (run->bitmap));

	VALGRIND_ADD_TO_TX(hdr, sizeof (*hdr));
	hdr->type = CHUNK_TYPE_RUN;
	VALGRIND_REMOVE_FROM_TX(hdr, sizeof (*hdr));

	pop->persist(hdr, sizeof (*hdr));
}