Ejemplo n.º 1
0
/* Initialize an address space */
void addrspace_init(addrspace_t *addrspace, paddr_t address_space, vaddr_t free_start, vaddr_t free_length)
{
	/* System address space */
	if (addrspace == ADDRSPACE_SYSTEM)
	{
		/* Create the initial slab cache for VADs */
		for (size_t i = free_start; i < free_start + SLAB_SIZE; i += PAGE_SIZE)
		{
			int color = vaddr_cache_color(i, NUMA_DOMAIN_BEST, 0);
			vmm_map_page(address_space, i, pmm_alloc_page(0, NUMA_DOMAIN_BEST, color), PAGE_READ | PAGE_WRITE | PAGE_GLOBAL);
		}
		vad_cache = (slab_cache_t*) free_start;
		slab_cache_init(vad_cache, sizeof(vad_t), PAGE_READ | PAGE_WRITE);
		free_start += SLAB_SIZE;
		free_length -= SLAB_SIZE;

		/* Set up the pointer to the system address space */
		addrspace = &system_addrspace;
	}

	/* Fill in the information */
	addrspace->address_space = address_space;
	addrspace->numa_domain = NUMA_DOMAIN_CURRENT;
	spinlock_recursive_init(&addrspace->lock);

	/* Initialize the free VAD */
	addrspace->free.start = free_start;
	addrspace->free.length = free_length;
	addrspace->free.flags = 0;
	addrspace->free.prev = addrspace->free.next = NULL;

	/* Initialize the used VAD */
	addrspace->used_root = &addrspace->used;
	addrspace->used.height = 0;
	if (addrspace == &system_addrspace)
	{
		addrspace->used.start = KERNEL_ADDRSPACE_START;
		addrspace->used.length = free_start - KERNEL_ADDRSPACE_START;
		addrspace->used.flags = PAGE_READ | PAGE_WRITE | PAGE_EXECUTE | PAGE_PRIVATE;
		addrspace->used.left = addrspace->used.right = NULL;
	}
	else
	{
		addrspace->used.start = USER_ADDRSPACE_START;
		addrspace->used.length = free_start - USER_ADDRSPACE_START;
		addrspace->used.flags = PAGE_INVALID | PAGE_PRIVATE;
		addrspace->used.left = addrspace->used.right = NULL;
	}
}
Ejemplo n.º 2
0
/* Initialize a slab cache without any allocations */
void slab_cache_init(slab_cache_t *slab_cache, void *slab, size_t object_size, int flags)
{
	/* Fill in the slab cache information */
	slab_cache->object_size = object_size;
	slab_cache->flags = flags;
	slab_cache->empty = (slab_header_t*) slab;
	slab_cache->partial = NULL;
	slab_cache->full = NULL;
	spinlock_recursive_init(&slab_cache->lock);

	/* Calculate the amount of available space in the slab */
	size_t available_space = SLAB_SIZE - sizeof(slab_header_t);

	/* Find the largest number of objects we can pack into the slab */
	size_t objs_per_slab = 0;
	while (true)
	{
		/* Calculate the space needed for objects and bitmaps */
		size_t object_space = (objs_per_slab + 1) * object_size;
		size_t bitmap_space = ceil(objs_per_slab + 1, 8);

		/* Adjust the bitmap space to a multiple of 4, if needed */
		if (bitmap_space & 3)
		{
			bitmap_space = (bitmap_space & ~3) + 4;
		}

		/* If the needed space exceeds the available space, end the loop */
		if (object_space + bitmap_space > available_space)
		{
			/* We can't put any objects in the slab */
			if (objs_per_slab == 0) return;

			/* Fill in the slab header size and number of objects per slab */
			slab_cache->slab_header_size = sizeof(slab_header_t) + bitmap_space;
			slab_cache->objs_per_slab = objs_per_slab;

			/* Set up the slab's data */
			init_slab(slab_cache, (slab_header_t*) slab, bitmap_space);

			return;
		}

		/* We can fit one more object than our original guess */
		objs_per_slab++;
	}
}
Ejemplo n.º 3
0
/* Initialize a slab */
static void init_slab(slab_cache_t *slab_cache, slab_header_t *slab_header, size_t bitmap_space)
{
	/* Return if the slab is NULL */
	if (!slab_header) return;

	/* Set up the slab's data */
	size_t objs_per_slab = slab_cache->objs_per_slab;
	slab_header->num_free_objs = objs_per_slab;
	slab_header->next = NULL;
	spinlock_recursive_init(&slab_header->lock);

	/* Mark the entire bitmap as used */
	memset(slab_header->free_bitmap, 0xFF, bitmap_space);

	/* Free objects that are in both the slab and bitmap */
	for (size_t i = 0; i < objs_per_slab; i++)
	{
		size_t byte_start = i / 8;
		uint8_t bit_start = i % 8;
		slab_header->free_bitmap[byte_start] &= ~(1 << bit_start);
	}
}