Beispiel #1
0
/*
 * heap_run_get_block -- (internal) returns next/prev memory block from run
 */
static int
heap_run_get_block(PMEMobjpool *pop, struct chunk_run *r,
	struct memory_block *mblock, uint16_t size_idx, uint16_t block_off,
	int prev)
{
	int v = block_off / BITS_PER_VALUE;
	int b = block_off % BITS_PER_VALUE;

	if (prev) {
		int i;
		for (i = b - 1;
			(i + 1) % RUN_UNIT_MAX && BIT_IS_CLR(r->bitmap[v], i);
			--i);

		mblock->block_off = (v * BITS_PER_VALUE) + (i + 1);
		mblock->size_idx = block_off - mblock->block_off;
	} else { /* next */
		int i;
		for (i = b + size_idx;
			i % RUN_UNIT_MAX && BIT_IS_CLR(r->bitmap[v], i);
			++i);

		mblock->block_off = block_off + size_idx;
		mblock->size_idx = i - (b + size_idx);
	}

	if (mblock->size_idx == 0)
		return ENOENT;

	return 0;
}
Beispiel #2
0
/*
 * heap_populate_run_bucket -- (internal) split bitmap into memory blocks
 */
static void
heap_populate_run_bucket(PMEMobjpool *pop, struct bucket *b,
	uint32_t chunk_id, uint32_t zone_id)
{
	struct pmalloc_heap *h = pop->heap;
	struct zone *z = &h->layout->zones[zone_id];
	struct chunk_header *hdr = &z->chunk_headers[chunk_id];
	struct chunk_run *run = (struct chunk_run *)&z->chunks[chunk_id];

	if (hdr->type != CHUNK_TYPE_RUN)
		heap_init_run(pop, b, hdr, run);

	ASSERT(hdr->size_idx == 1);
	ASSERT(bucket_unit_size(b) == run->block_size);

	uint16_t run_bits = RUNSIZE / run->block_size;
	ASSERT(run_bits < (MAX_BITMAP_VALUES * BITS_PER_VALUE));
	uint16_t block_off = 0;
	uint16_t block_size_idx = 0;

	for (int i = 0; i < bucket_bitmap_nval(b); ++i) {
		uint64_t v = run->bitmap[i];
		block_off = BITS_PER_VALUE * i;
		if (v == 0) {
			heap_run_insert(b, chunk_id, zone_id,
				BITS_PER_VALUE, block_off);
			continue;
		} else if (v == ~0L) {
			continue;
		}

		for (int j = 0; j < BITS_PER_VALUE; ++j) {
			if (BIT_IS_CLR(v, j)) {
				block_size_idx++;
			} else if (block_size_idx != 0) {
				heap_run_insert(b, chunk_id, zone_id,
					block_size_idx,
					block_off - block_size_idx);
				block_size_idx = 0;
			}

			if ((block_off++) == run_bits) {
				i = MAX_BITMAP_VALUES;
				break;
			}
		}

		if (block_size_idx != 0) {
			heap_run_insert(b, chunk_id, zone_id, block_size_idx,
				block_off - block_size_idx);
			block_size_idx = 0;
		}
	}
}
Beispiel #3
0
/*
 * heap_run_foreach_object -- (internal) iterates through objects in a run
 */
int
heap_run_foreach_object(struct palloc_heap *heap, object_callback cb,
		void *arg, struct memory_block *m, struct alloc_class *c)
{
	if (c == NULL)
		return -1;

	uint16_t i = m->block_off / BITS_PER_VALUE;
	uint16_t block_start = m->block_off % BITS_PER_VALUE;
	uint16_t block_off;

	struct chunk_run *run = (struct chunk_run *)
		&ZID_TO_ZONE(heap->layout, m->zone_id)->chunks[m->chunk_id];

	for (; i < c->run.bitmap_nval; ++i) {
		uint64_t v = run->bitmap[i];
		block_off = (uint16_t)(BITS_PER_VALUE * i);

		for (uint16_t j = block_start; j < BITS_PER_VALUE; ) {
			if (block_off + j >= (uint16_t)c->run.bitmap_nallocs)
				break;

			if (!BIT_IS_CLR(v, j)) {
				m->block_off = (uint16_t)(block_off + j);

				/*
				 * The size index of this memory block cannot be
				 * retrieved at this time because the header
				 * might not be initialized in valgrind yet.
				 */
				m->size_idx = 0;

				if (cb(m, arg)
						!= 0)
					return 1;

				m->size_idx = CALC_SIZE_IDX(c->unit_size,
					m->m_ops->get_real_size(m));
				j = (uint16_t)(j + m->size_idx);
			} else {
				++j;
			}
		}
		block_start = 0;
	}

	return 0;
}
Beispiel #4
0
/*
 * run_iterate_used -- iterates over used blocks in a run
 */
static int
run_iterate_used(const struct memory_block *m, object_callback cb, void *arg)
{
	uint32_t i = m->block_off / RUN_BITS_PER_VALUE;
	uint32_t block_start = m->block_off % RUN_BITS_PER_VALUE;
	uint32_t block_off;

	struct chunk_run *run = heap_get_chunk_run(m->heap, m);

	struct memory_block iter = *m;

	struct run_bitmap b;
	run_get_bitmap(m, &b);

	for (; i < b.nvalues; ++i) {
		uint64_t v = b.values[i];
		block_off = (uint32_t)(RUN_BITS_PER_VALUE * i);

		for (uint32_t j = block_start; j < RUN_BITS_PER_VALUE; ) {
			if (block_off + j >= (uint32_t)b.nbits)
				break;

			if (!BIT_IS_CLR(v, j)) {
				iter.block_off = (uint32_t)(block_off + j);

				/*
				 * The size index of this memory block cannot be
				 * retrieved at this time because the header
				 * might not be initialized in valgrind yet.
				 */
				iter.size_idx = 0;

				if (cb(&iter, arg) != 0)
					return 1;

				iter.size_idx = CALC_SIZE_IDX(
					run->hdr.block_size,
					iter.m_ops->get_real_size(&iter));
				j = (uint32_t)(j + iter.size_idx);
			} else {
				++j;
			}
		}
		block_start = 0;
	}

	return 0;
}
Beispiel #5
0
/*
 * huge_get_state -- returns whether a block from a run is allocated or not
 */
static enum memblock_state
run_get_state(const struct memory_block *m)
{
	struct run_bitmap b;
	run_get_bitmap(m, &b);

	unsigned v = m->block_off / RUN_BITS_PER_VALUE;
	uint64_t bitmap = b.values[v];
	unsigned bit = m->block_off % RUN_BITS_PER_VALUE;

	unsigned bit_last = bit + m->size_idx;
	ASSERT(bit_last <= RUN_BITS_PER_VALUE);

	for (unsigned i = bit; i < bit_last; ++i) {
		if (!BIT_IS_CLR(bitmap, i)) {
			return MEMBLOCK_ALLOCATED;
		}
	}

	return MEMBLOCK_FREE;
}
Beispiel #6
0
/*
 * huge_get_state -- returns whether a block from a run is allocated or not
 */
static enum memblock_state
run_get_state(struct memory_block *m, struct palloc_heap *heap)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
	ASSERTeq(hdr->type, CHUNK_TYPE_RUN);

	struct chunk_run *r = (struct chunk_run *)&z->chunks[m->chunk_id];

	unsigned v = m->block_off / BITS_PER_VALUE;
	uint64_t bitmap = r->bitmap[v];
	unsigned b = m->block_off % BITS_PER_VALUE;

	unsigned b_last = b + m->size_idx;
	ASSERT(b_last <= BITS_PER_VALUE);

	for (unsigned i = b; i < b_last; ++i) {
		if (!BIT_IS_CLR(bitmap, i)) {
			return MEMBLOCK_ALLOCATED;
		}
	}

	return MEMBLOCK_FREE;
}
Beispiel #7
0
/*
 * heap_chunk_init -- (internal) writes chunk header
 */
static void
heap_chunk_init(struct palloc_heap *heap, struct chunk_header *hdr,
	uint16_t type, uint32_t size_idx)
{
	struct chunk_header nhdr = {
		.type = type,
		.flags = 0,
		.size_idx = size_idx
	};
	VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr));

	*hdr = nhdr; /* write the entire header (8 bytes) at once */
	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));

	heap_chunk_write_footer(hdr, size_idx);
}

/*
 * heap_zone_init -- (internal) writes zone's first chunk and header
 */
static void
heap_zone_init(struct palloc_heap *heap, uint32_t zone_id)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
	uint32_t size_idx = get_zone_size_idx(zone_id, heap->rt->max_zone,
			heap->size);

	heap_chunk_init(heap, &z->chunk_headers[0], CHUNK_TYPE_FREE, size_idx);

	struct zone_header nhdr = {
		.size_idx = size_idx,
		.magic = ZONE_HEADER_MAGIC,
	};
	z->header = nhdr;  /* write the entire header (8 bytes) at once */
	pmemops_persist(&heap->p_ops, &z->header, sizeof(z->header));
}

/*
 * heap_run_init -- (internal) creates a run based on a chunk
 */
static void
heap_run_init(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);

	struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id];
	ASSERTne(m->size_idx, 0);
	size_t runsize = SIZEOF_RUN(run, m->size_idx);

	VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize);

	/* add/remove chunk_run and chunk_header to valgrind transaction */
	VALGRIND_ADD_TO_TX(run, runsize);
	run->block_size = c->unit_size;
	pmemops_persist(&heap->p_ops, &run->block_size,
			sizeof(run->block_size));

	/* set all the bits */
	memset(run->bitmap, 0xFF, sizeof(run->bitmap));

	unsigned nval = c->run.bitmap_nval;
	ASSERT(nval > 0);
	/* clear only the bits available for allocations from this bucket */
	memset(run->bitmap, 0, sizeof(uint64_t) * (nval - 1));
	run->bitmap[nval - 1] = c->run.bitmap_lastval;

	run->incarnation_claim = heap->run_id;
	VALGRIND_SET_CLEAN(&run->incarnation_claim,
		sizeof(run->incarnation_claim));

	VALGRIND_REMOVE_FROM_TX(run, runsize);

	pmemops_persist(&heap->p_ops, run->bitmap, sizeof(run->bitmap));

	struct chunk_header run_data_hdr;
	run_data_hdr.type = CHUNK_TYPE_RUN_DATA;
	run_data_hdr.flags = 0;

	struct chunk_header *data_hdr;
	for (unsigned i = 1; i < m->size_idx; ++i) {
		data_hdr = &z->chunk_headers[m->chunk_id + i];
		VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr));
		VALGRIND_ADD_TO_TX(data_hdr, sizeof(*data_hdr));
		run_data_hdr.size_idx = i;
		*data_hdr = run_data_hdr;
		VALGRIND_REMOVE_FROM_TX(data_hdr, sizeof(*data_hdr));
	}
	pmemops_persist(&heap->p_ops,
		&z->chunk_headers[m->chunk_id + 1],
		sizeof(struct chunk_header) * (m->size_idx - 1));

	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
	ASSERT(hdr->type == CHUNK_TYPE_FREE);

	VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr));
	struct chunk_header run_hdr;
	run_hdr.size_idx = hdr->size_idx;
	run_hdr.type = CHUNK_TYPE_RUN;
	run_hdr.flags = header_type_to_flag[c->header_type];
	*hdr = run_hdr;
	VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr));

	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));
}

/*
 * heap_run_insert -- (internal) inserts and splits a block of memory into a run
 */
static void
heap_run_insert(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m, uint32_t size_idx, uint16_t block_off)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	ASSERT(size_idx <= BITS_PER_VALUE);
	ASSERT(block_off + size_idx <= c->run.bitmap_nallocs);

	uint32_t unit_max = c->run.unit_max;
	struct memory_block nm = *m;
	nm.size_idx = unit_max - (block_off % unit_max);
	nm.block_off = block_off;
	if (nm.size_idx > size_idx)
		nm.size_idx = size_idx;

	do {
		bucket_insert_block(b, &nm);
		ASSERT(nm.size_idx <= UINT16_MAX);
		ASSERT(nm.block_off + nm.size_idx <= UINT16_MAX);
		nm.block_off = (uint16_t)(nm.block_off + (uint16_t)nm.size_idx);
		size_idx -= nm.size_idx;
		nm.size_idx = size_idx > unit_max ? unit_max : size_idx;
	} while (size_idx != 0);
}

/*
 * heap_process_run_metadata -- (internal) parses the run bitmap
 */
static uint32_t
heap_process_run_metadata(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	uint16_t block_off = 0;
	uint16_t block_size_idx = 0;
	uint32_t inserted_blocks = 0;

	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
	struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id];

	for (unsigned i = 0; i < c->run.bitmap_nval; ++i) {
		ASSERT(i < MAX_BITMAP_VALUES);
		uint64_t v = run->bitmap[i];
		ASSERT(BITS_PER_VALUE * i <= UINT16_MAX);
		block_off = (uint16_t)(BITS_PER_VALUE * i);
		if (v == 0) {
			heap_run_insert(heap, b, m, BITS_PER_VALUE, block_off);
			inserted_blocks += BITS_PER_VALUE;
			continue;
		} else if (v == UINT64_MAX) {
			continue;
		}

		for (unsigned j = 0; j < BITS_PER_VALUE; ++j) {
			if (BIT_IS_CLR(v, j)) {
				block_size_idx++;
			} else if (block_size_idx != 0) {
				ASSERT(block_off >= block_size_idx);

				heap_run_insert(heap, b, m,
					block_size_idx,
					(uint16_t)(block_off - block_size_idx));
				inserted_blocks += block_size_idx;
				block_size_idx = 0;
			}

			if ((block_off++) == c->run.bitmap_nallocs) {
				i = MAX_BITMAP_VALUES;
				break;
			}
		}

		if (block_size_idx != 0) {
			ASSERT(block_off >= block_size_idx);

			heap_run_insert(heap, b, m,
					block_size_idx,
					(uint16_t)(block_off - block_size_idx));
			inserted_blocks += block_size_idx;
			block_size_idx = 0;
		}
	}

	return inserted_blocks;
}

/*
 * heap_create_run -- (internal) initializes a new run on an existing free chunk
 */
static void
heap_create_run(struct palloc_heap *heap, struct bucket *b,
	struct memory_block *m)
{
	heap_run_init(heap, b, m);
	memblock_rebuild_state(heap, m);
	heap_process_run_metadata(heap, b, m);
}