Exemple #1
0
static int init_bucket(Bucket * const bucket)
{
    init_slab(&bucket->slab, sizeof(Slot), "slots");
    bucket->bucket_size = (NbSlots) BUCKET_SIZE;
    bucket->busy_slots = (NbSlots) 0U;
    
    return 0;    
}
Exemple #2
0
static struct page *make_new_slab(struct kmem_cache *cache)
{
    struct page *page = alloc_pages(cache->pages_per_slab);
    if (page)
    {
        init_slab(page, cache);
    }
    return page;
}
Exemple #3
0
/* Initialize a slab cache without any allocations */
void slab_cache_init(slab_cache_t *slab_cache, void *slab, size_t object_size, int flags)
{
	/* Fill in the slab cache information */
	slab_cache->object_size = object_size;
	slab_cache->flags = flags;
	slab_cache->empty = (slab_header_t*) slab;
	slab_cache->partial = NULL;
	slab_cache->full = NULL;
	spinlock_recursive_init(&slab_cache->lock);

	/* Calculate the amount of available space in the slab */
	size_t available_space = SLAB_SIZE - sizeof(slab_header_t);

	/* Find the largest number of objects we can pack into the slab */
	size_t objs_per_slab = 0;
	while (true)
	{
		/* Calculate the space needed for objects and bitmaps */
		size_t object_space = (objs_per_slab + 1) * object_size;
		size_t bitmap_space = ceil(objs_per_slab + 1, 8);

		/* Adjust the bitmap space to a multiple of 4, if needed */
		if (bitmap_space & 3)
		{
			bitmap_space = (bitmap_space & ~3) + 4;
		}

		/* If the needed space exceeds the available space, end the loop */
		if (object_space + bitmap_space > available_space)
		{
			/* We can't put any objects in the slab */
			if (objs_per_slab == 0) return;

			/* Fill in the slab header size and number of objects per slab */
			slab_cache->slab_header_size = sizeof(slab_header_t) + bitmap_space;
			slab_cache->objs_per_slab = objs_per_slab;

			/* Set up the slab's data */
			init_slab(slab_cache, (slab_header_t*) slab, bitmap_space);

			return;
		}

		/* We can fit one more object than our original guess */
		objs_per_slab++;
	}
}
Exemple #4
0
/* Allocate an object from a slab cache */
void *slab_cache_alloc(slab_cache_t *slab_cache)
{
	/* Lock the entire slab cache */
	spinlock_recursive_acquire(&slab_cache->lock);

	/* Take a partial slab if possible, or an empty one if not */
find_slab: ;
	slab_header_t *slab_header = NULL;
	if (slab_cache->partial)
	{
		/* Use the first partial slab we see */
		slab_header = slab_cache->partial;

		/* No objects left after allocation */
		if (slab_header->num_free_objs == 1)
		{
			slab_header_t *old_full_head = slab_cache->full;
			slab_cache->partial = slab_header->next;
			slab_header->next = old_full_head;
			slab_cache->full = slab_header;
		}
	}
	/* Empty slab available */
	else if (slab_cache->empty)
	{
		/* Use the first empty slab we see */
		slab_header = slab_cache->empty;

		/* No objects left after allocation, so put the slab in full */
		if (slab_header->num_free_objs == 1)
		{
			slab_header_t *old_full_head = slab_cache->full;
			slab_cache->empty = slab_header->next;
			slab_header->next = old_full_head;
			slab_cache->full = slab_header;
		}
		/* Otherwise, put it in the partial list */
		else
		{
			slab_header_t *old_partial_head = slab_cache->partial;
			slab_cache->empty = slab_header->next;
			slab_header->next = old_partial_head;
			slab_cache->partial = slab_header;
		}
	}
	/* No empty or partial slabs available */
	else
	{
		/* Allocate a new slab and fill in its information */
		slab_header = (slab_header_t*) addrspace_alloc(ADDRSPACE_SYSTEM, SLAB_SIZE, SLAB_SIZE, slab_cache->flags | PAGE_GLOBAL);
		if (!slab_header)
		{
			spinlock_recursive_release(&slab_cache->lock);
			return NULL;
		}
		init_slab(slab_cache, slab_header, slab_cache->slab_header_size - sizeof(slab_header_t));

		/* Add it to the empty list and redo this */
		slab_header->next = slab_cache->empty;
		slab_cache->empty = slab_header;
		goto find_slab;
	}

	/* Now that we've found a slab, we can lock it and unlock the entire cache */
	spinlock_recursive_acquire(&slab_header->lock);
	spinlock_recursive_release(&slab_cache->lock);

	/* One less object in the slab */
	slab_header->num_free_objs--;

	/* Search the bitmap for an available free object */
	size_t bitmap_space = slab_cache->slab_header_size - sizeof(slab_header_t);
	for (size_t i = 0; i < bitmap_space; i++)
	{
		uint8_t byte = slab_header->free_bitmap[i];

		for (uint8_t j = 0; j < 8; j++)
		{
			if (byte == 0xFF)
			{
				break;
			}

			if (byte & 1)
			{
				byte >>= 1;
				continue;
			}

			slab_header->free_bitmap[i] |= (1 << j);

			size_t object_num = (i * 8) + j;
			void *object = ((void*) slab_header) + slab_cache->slab_header_size + (object_num * slab_cache->object_size);
			spinlock_recursive_release(&slab_header->lock);
			return object;
		}
	}