コード例 #1
0
ファイル: addrspace.c プロジェクト: dwizard/darkside-kernel
/* Allocate regions of a virtual address space */
void *addrspace_alloc(addrspace_t *addrspace, size_t size_reserved, size_t size_committed, int flags)
{
	/* Get the address space pointer */
	addrspace = resolve_addrspace(addrspace);

	/* Round up both the reserved and committed sizes to a page boundary */
	size_reserved = PAGE_ALIGN_UP(size_reserved);
	size_committed = PAGE_ALIGN_UP(size_committed);

	/* Make sure we don't commit more than we reserve */
	if (size_committed > size_reserved)
	{
		size_committed = size_reserved;
	}

	/* Search the address space for a free region of suitable size */
	spinlock_recursive_acquire(&addrspace->lock);
	vad_t *vad = &addrspace->free;
	while (vad)
	{
		/* Move on if it doesn't fit our allocation */
		if (vad->length < size_reserved)
		{
			vad = vad->next;
			continue;
		}

		/* Store the starting address of the allocation */
		vaddr_t address = vad->start;

		/* Create the guard page if requested */
		vaddr_t i = address;
		if (flags & GUARD_BOTTOM)
		{
			vmm_map_page(addrspace->address_space, i, 0, PAGE_INVALID);
			i += PAGE_SIZE;
		}

		/* Commit all the needed pages */
		for (; i < address + size_committed; i += PAGE_SIZE)
		{
			int color = vaddr_cache_color(i, addrspace->numa_domain, 0);
			vmm_map_page(addrspace->address_space, i, pmm_alloc_page(0, addrspace->numa_domain, color), flags);
		}

		/* Modify the free VAD or remove it entirely */
		if (size_reserved < vad->length)
		{
			vad->start += size_reserved;
			vad->length -= size_reserved;
		}
		else
		{
			/* Later VAD */
			if (vad != &addrspace->free)
			{
				/* Readjust the linked list */
				vad->prev->next = vad->next;
				vad->next->prev = vad->prev;

				/* Free the VAD */
				slab_cache_free(vad_cache, vad);
			}
			/* Root VAD */
			else
			{
				/* Copy the next VAD into the root one */
				vad_t *vad_next = vad->next;
				memcpy(vad, vad_next, sizeof(vad_t));

				/* Free the dynamically-allocated VAD */
				slab_cache_free(vad_cache, vad_next);
			}
		}

		/* Record metadata, unless told not to */
		if (!(flags & PAGE_PRIVATE))
		{
			/* Create a new VAD to represent the now-used region */
			vad = slab_cache_alloc(vad_cache);
			vad->start = address;
			vad->length = size_reserved;
			vad->flags = flags;
			vad->left = vad->right = NULL;
			vad->height = 0;

			/* Insert it into the tree */
			addrspace->used_root = vad_tree_insert(addrspace->used_root, vad);
		}

		/* Return the address of the allocated region */
		spinlock_recursive_release(&addrspace->lock);
		return (void*) address;
	}

	/* No free region of the address space available */
	spinlock_recursive_release(&addrspace->lock);
	return NULL;
}
コード例 #2
0
ファイル: slab.c プロジェクト: darksideos/darkside-kernel
/* Allocate an object from a slab cache */
void *slab_cache_alloc(slab_cache_t *slab_cache)
{
	/* Lock the entire slab cache */
	spinlock_recursive_acquire(&slab_cache->lock);

	/* Take a partial slab if possible, or an empty one if not */
find_slab: ;
	slab_header_t *slab_header = NULL;
	if (slab_cache->partial)
	{
		/* Use the first partial slab we see */
		slab_header = slab_cache->partial;

		/* No objects left after allocation */
		if (slab_header->num_free_objs == 1)
		{
			slab_header_t *old_full_head = slab_cache->full;
			slab_cache->partial = slab_header->next;
			slab_header->next = old_full_head;
			slab_cache->full = slab_header;
		}
	}
	/* Empty slab available */
	else if (slab_cache->empty)
	{
		/* Use the first empty slab we see */
		slab_header = slab_cache->empty;

		/* No objects left after allocation, so put the slab in full */
		if (slab_header->num_free_objs == 1)
		{
			slab_header_t *old_full_head = slab_cache->full;
			slab_cache->empty = slab_header->next;
			slab_header->next = old_full_head;
			slab_cache->full = slab_header;
		}
		/* Otherwise, put it in the partial list */
		else
		{
			slab_header_t *old_partial_head = slab_cache->partial;
			slab_cache->empty = slab_header->next;
			slab_header->next = old_partial_head;
			slab_cache->partial = slab_header;
		}
	}
	/* No empty or partial slabs available */
	else
	{
		/* Allocate a new slab and fill in its information */
		slab_header = (slab_header_t*) addrspace_alloc(ADDRSPACE_SYSTEM, SLAB_SIZE, SLAB_SIZE, slab_cache->flags | PAGE_GLOBAL);
		if (!slab_header)
		{
			spinlock_recursive_release(&slab_cache->lock);
			return NULL;
		}
		init_slab(slab_cache, slab_header, slab_cache->slab_header_size - sizeof(slab_header_t));

		/* Add it to the empty list and redo this */
		slab_header->next = slab_cache->empty;
		slab_cache->empty = slab_header;
		goto find_slab;
	}

	/* Now that we've found a slab, we can lock it and unlock the entire cache */
	spinlock_recursive_acquire(&slab_header->lock);
	spinlock_recursive_release(&slab_cache->lock);

	/* One less object in the slab */
	slab_header->num_free_objs--;

	/* Search the bitmap for an available free object */
	size_t bitmap_space = slab_cache->slab_header_size - sizeof(slab_header_t);
	for (size_t i = 0; i < bitmap_space; i++)
	{
		uint8_t byte = slab_header->free_bitmap[i];

		for (uint8_t j = 0; j < 8; j++)
		{
			if (byte == 0xFF)
			{
				break;
			}

			if (byte & 1)
			{
				byte >>= 1;
				continue;
			}

			slab_header->free_bitmap[i] |= (1 << j);

			size_t object_num = (i * 8) + j;
			void *object = ((void*) slab_header) + slab_cache->slab_header_size + (object_num * slab_cache->object_size);
			spinlock_recursive_release(&slab_header->lock);
			return object;
		}
	}