Example #1
0
/*
 * heap_run_process_bitmap_value -- (internal) looks for unset bits in the
 * value, creates a valid memory block out of them and inserts that
 * block into the given bucket.
 */
static int
run_process_bitmap_value(const struct memory_block *m,
	uint64_t value, uint32_t base_offset, object_callback cb, void *arg)
{
	int ret = 0;

	uint64_t shift = 0; /* already processed bits */
	struct memory_block s = *m;
	do {
		/*
		 * Shift the value so that the next memory block starts on the
		 * least significant position:
		 *	..............0 (free block)
		 * or	..............1 (used block)
		 */
		uint64_t shifted = value >> shift;

		/* all clear or set bits indicate the end of traversal */
		if (shifted == 0) {
			/*
			 * Insert the remaining blocks as free. Remember that
			 * unsigned values are always zero-filled, so we must
			 * take the current shift into account.
			 */
			s.block_off = (uint32_t)(base_offset + shift);
			s.size_idx = (uint32_t)(RUN_BITS_PER_VALUE - shift);

			if ((ret = cb(&s, arg)) != 0)
				return ret;

			break;
		} else if (shifted == UINT64_MAX) {
			break;
		}

		/*
		 * Offset and size of the next free block, either of these
		 * can be zero depending on where the free block is located
		 * in the value.
		 */
		unsigned off = (unsigned)util_lssb_index64(~shifted);
		unsigned size = (unsigned)util_lssb_index64(shifted);

		shift += off + size;

		if (size != 0) { /* zero size means skip to the next value */
			s.block_off = (uint32_t)(base_offset + (shift - size));
			s.size_idx = (uint32_t)(size);

			memblock_rebuild_state(m->heap, &s);
			if ((ret = cb(&s, arg)) != 0)
				return ret;
		}
	} while (shift != RUN_BITS_PER_VALUE);

	return 0;
}
Example #2
0
/*
 * container_seglists_get_rm_block_bestfit -- (internal) removes and returns the
 *	best-fit memory block for size
 */
static int
container_seglists_get_rm_block_bestfit(struct block_container *bc,
	struct memory_block *m)
{
	struct block_container_seglists *c =
		(struct block_container_seglists *)bc;

	ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
	uint32_t i = 0;

	/* applicable lists */
	uint64_t size_mask = (1ULL << (m->size_idx - 1)) - 1;
	uint64_t v = c->nonempty_lists & ~size_mask;
	if (v == 0)
		return ENOMEM;

	/* finds the list that serves the smallest applicable size */
	i = util_lssb_index64(v);

	uint32_t block_offset = VECQ_DEQUEUE(&c->blocks[i]);

	if (VECQ_SIZE(&c->blocks[i]) == 0) /* marks the list as empty */
		c->nonempty_lists &= ~(1ULL << (i));

	*m = c->m;
	m->block_off = block_offset;
	m->size_idx = i + 1;

	return 0;
}