Exemple #1
0
void kfree(void *p) {
  p = (void*) ((uintptr_t)p - sizeof(uintptr_t));
  uintptr_t *ptr = (uintptr_t*)p;

  unsigned l2 = ptr[0] & 0xFF;
  unsigned canary = ptr[0] >> 8;

  assert(canary == KMALLOC_CANARY && "Heap corruption!");

  if (l2 <= MAX_CACHESZ_LOG2)
    slab_cache_free(&caches[l2-MIN_CACHESZ_LOG2], p);
  else
    vmspace_free(&kernel_vmspace, (1U << l2), (uintptr_t)p, 1);
}
Exemple #2
0
void thread_destroy(thread_t *t) {
  spinlock_acquire(&thread_list_lock);
  if (t->next)
    t->next->prev = t->prev;

  if (t->prev)
    t->prev->next = t->next;
  else
    thread_list_head = t->next;
  spinlock_release(&thread_list_lock);

  free_stack_and_tls(t->stack);
  slab_cache_free(&thread_cache, (void*)t);
}  
Exemple #3
0
/* Allocate regions of a virtual address space */
void *addrspace_alloc(addrspace_t *addrspace, size_t size_reserved, size_t size_committed, int flags)
{
	/* Get the address space pointer */
	addrspace = resolve_addrspace(addrspace);

	/* Round up both the reserved and committed sizes to a page boundary */
	size_reserved = PAGE_ALIGN_UP(size_reserved);
	size_committed = PAGE_ALIGN_UP(size_committed);

	/* Make sure we don't commit more than we reserve */
	if (size_committed > size_reserved)
	{
		size_committed = size_reserved;
	}

	/* Search the address space for a free region of suitable size */
	spinlock_recursive_acquire(&addrspace->lock);
	vad_t *vad = &addrspace->free;
	while (vad)
	{
		/* Move on if it doesn't fit our allocation */
		if (vad->length < size_reserved)
		{
			vad = vad->next;
			continue;
		}

		/* Store the starting address of the allocation */
		vaddr_t address = vad->start;

		/* Create the guard page if requested */
		vaddr_t i = address;
		if (flags & GUARD_BOTTOM)
		{
			vmm_map_page(addrspace->address_space, i, 0, PAGE_INVALID);
			i += PAGE_SIZE;
		}

		/* Commit all the needed pages */
		for (; i < address + size_committed; i += PAGE_SIZE)
		{
			int color = vaddr_cache_color(i, addrspace->numa_domain, 0);
			vmm_map_page(addrspace->address_space, i, pmm_alloc_page(0, addrspace->numa_domain, color), flags);
		}

		/* Modify the free VAD or remove it entirely */
		if (size_reserved < vad->length)
		{
			vad->start += size_reserved;
			vad->length -= size_reserved;
		}
		else
		{
			/* Later VAD */
			if (vad != &addrspace->free)
			{
				/* Readjust the linked list */
				vad->prev->next = vad->next;
				vad->next->prev = vad->prev;

				/* Free the VAD */
				slab_cache_free(vad_cache, vad);
			}
			/* Root VAD */
			else
			{
				/* Copy the next VAD into the root one */
				vad_t *vad_next = vad->next;
				memcpy(vad, vad_next, sizeof(vad_t));

				/* Free the dynamically-allocated VAD */
				slab_cache_free(vad_cache, vad_next);
			}
		}

		/* Record metadata, unless told not to */
		if (!(flags & PAGE_PRIVATE))
		{
			/* Create a new VAD to represent the now-used region */
			vad = slab_cache_alloc(vad_cache);
			vad->start = address;
			vad->length = size_reserved;
			vad->flags = flags;
			vad->left = vad->right = NULL;
			vad->height = 0;

			/* Insert it into the tree */
			addrspace->used_root = vad_tree_insert(addrspace->used_root, vad);
		}

		/* Return the address of the allocated region */
		spinlock_recursive_release(&addrspace->lock);
		return (void*) address;
	}

	/* No free region of the address space available */
	spinlock_recursive_release(&addrspace->lock);
	return NULL;
}
Exemple #4
0
void
ObjFreePage(PagePool *pool, struct sBTPage *page)
{
    sys$slab_cache_free(&bt_cache, page);
    return;
}