예제 #1
0
파일: kmalloc.c 프로젝트: roscopeco/mink
void *kmalloc(unsigned sz) {
  /* We need to add a small header to the allocation to track which
     cache (if any) it came from. It must be a multiple of the pointer
     size in order that the address after it (which we will be returning)
     has natural alignment. */
  sz += sizeof(uintptr_t);

  uintptr_t *ptr;

  unsigned l2 = log2_roundup(sz);
  if (l2 < MIN_CACHESZ_LOG2) l2 = MIN_CACHESZ_LOG2;

  if (l2 >= MIN_CACHESZ_LOG2 && l2 <= MAX_CACHESZ_LOG2) {
    ptr = (uintptr_t*)slab_cache_alloc(&caches[l2-MIN_CACHESZ_LOG2]);
  } else {
    /* Get the size as the smallest power of 2 >= sz */
    unsigned sz_p2 = 1U << l2;
    if (sz_p2 < get_page_size()) {
      sz_p2 = get_page_size();
      l2 = log2_roundup(sz_p2);
    }

    ptr = (uintptr_t*)vmspace_alloc(&kernel_vmspace, sz_p2, 1);
  }

  ptr[0] = (KMALLOC_CANARY << 8) | l2;
  return &ptr[1];
}
예제 #2
0
/* Queue an APC to a thread */
void apc_queue(thread_t *thread, apc_handler_t handler, void *context, int type)
{
	/* Allocate an APC from the slab cache */
	apc_t *apc = (apc_t*) slab_cache_alloc(&internal_apc_cache);

	/* Initialize the APC with the given parameters */
	apc_init(apc, handler, context, type);

	/* Queue it to the specified thread */
	mkthread_queue_apc(&thread->mkthread, apc);
}
예제 #3
0
/** Create slab cache  */
slab_cache_t * slab_cache_create(
				 size_t size,
				 size_t align,
				 int (*constructor)(void *obj, int kmflag),
				 int (*destructor)(void *obj),
				 int flags)
{
	slab_cache_t *cache;

    DBG("%s\n", __FUNCTION__);

	cache = (slab_cache_t*)slab_cache_alloc();

    _slab_cache_create(cache, size, align, constructor, destructor, flags);

	return cache;
}
예제 #4
0
파일: device.c 프로젝트: karthick18/mir-os
int register_device(const char *name,int dev,device_type type,int blksize,struct device_operations *device_operations) {
  int err = -1;
  struct device *device ;
  device = slab_cache_alloc(dev_cache,0);
  if(!device) {
    printf("Unable to allocate dev structure in Function %s\n",__FUNCTION__);
    goto out;
  }
  strncpy (device->name,name,sizeof(device->name)-1);
  device->blksize = blksize;
  device->dev = dev;
  device->type = type;
  device->device_operations = get_device_operations(type,device_operations);
  link_dev(device);
  err = 0;
 out:
  return err;
}
예제 #5
0
static int slab_tests_run(int argc, char *argv[])
{
	// 1. Create slab cache
	srand(time(0));
	const unsigned pattern = 0xdeadbeef;
	slab_cache_t cache;
	int ret = slab_cache_init(&cache, sizeof(int));
	ok(ret == 0, "slab: created empty cache");

	// 2. Couple alloc/free
	bool valid_free = true;
	lives_ok({
	for(int i = 0; i < 100; ++i) {
		int* data = (int*)slab_cache_alloc(&cache);
		*data = pattern;
		slab_free(data);
		if (*data == pattern)
			valid_free = false;
	}
	}, "slab: couple alloc/free");
예제 #6
0
파일: thread.c 프로젝트: harveyzh/JMTK
static int threading_init() {
  static thread_t dummy_t = {
    .id = 0,
    .prev = NULL, .next = NULL,
    .scheduler_next = NULL,
    .semaphore_next = NULL,
    .stack = 0,
    .request_kill = 0,
    .state = 0,
    .priority = 0,
    .auto_free = 0
  };

  int r = slab_cache_create(&thread_cache, &kernel_vmspace, sizeof(thread_t), (void*)&dummy_t);
  assert(r == 0 && "slab_cache_create failed!");

  thread_t *t = (thread_t*)slab_cache_alloc(&thread_cache);
  t->stack = (uintptr_t)__builtin_frame_address(0) & ~(THREAD_STACK_SZ-1);

  *tls_slot(TLS_SLOT_TCB, t->stack) = (uintptr_t)t;
  *tls_slot(TLS_SLOT_CANARY, t->stack) = CANARY_VAL;

  assert(*tls_slot(TLS_SLOT_TCB, t->stack) == (uintptr_t)t);

  assert(*tls_slot(TLS_SLOT_CANARY, t->stack) == CANARY_VAL);

  thread_list_head = t;

  register_debugger_handler("threads", "List all thread states", &inspect_threads);

  return 0;
}

static prereq_t p[] = { {"kmalloc",NULL}, {"scheduler",NULL}, {NULL,NULL} };
static module_t x run_on_startup = {
  .name = "threading",
  .required = p,
  .load_after = NULL,
  .init = &threading_init,
  .fini = NULL
};
예제 #7
0
파일: thread.c 프로젝트: harveyzh/JMTK
thread_t *thread_spawn(void (*fn)(void*), void *p, uint8_t auto_free) {
  thread_t *t = (thread_t*)slab_cache_alloc(&thread_cache);

  memset(t, 0, sizeof(thread_t));

  t->auto_free = auto_free;
  t->stack = alloc_stack_and_tls();
 
  spinlock_acquire(&thread_list_lock);
  t->next = thread_list_head;
  t->next->prev = t;
  thread_list_head = t;
  spinlock_release(&thread_list_lock);
 
  /* TLS slot zero always contains the thread object. */
  *tls_slot(TLS_SLOT_TCB, t->stack) = (uintptr_t)t;

  /* Store the function and argument temporarily in TLS */
  *tls_slot(1, t->stack) = (uintptr_t)fn;
  *tls_slot(2, t->stack) = (uintptr_t)p;

  /* In the last valid TLS slot, store a canary. */
  *tls_slot(TLS_SLOT_CANARY, t->stack) = CANARY_VAL;

  if (setjmp(t->jmpbuf) == 0) {
    jmp_buf_set_stack(t->jmpbuf, t->stack + THREAD_STACK_SZ);

    scheduler_ready(t);

    return t;
  } else {
    /* Tail call to trampoline which is defined as noinline, to force the creation
       of a new stack frame as the previous stack frame is now invalid! */
    trampoline();
  }
}
예제 #8
0
/* Allocate regions of a virtual address space */
void *addrspace_alloc(addrspace_t *addrspace, size_t size_reserved, size_t size_committed, int flags)
{
	/* Get the address space pointer */
	addrspace = resolve_addrspace(addrspace);

	/* Round up both the reserved and committed sizes to a page boundary */
	size_reserved = PAGE_ALIGN_UP(size_reserved);
	size_committed = PAGE_ALIGN_UP(size_committed);

	/* Make sure we don't commit more than we reserve */
	if (size_committed > size_reserved)
	{
		size_committed = size_reserved;
	}

	/* Search the address space for a free region of suitable size */
	spinlock_recursive_acquire(&addrspace->lock);
	vad_t *vad = &addrspace->free;
	while (vad)
	{
		/* Move on if it doesn't fit our allocation */
		if (vad->length < size_reserved)
		{
			vad = vad->next;
			continue;
		}

		/* Store the starting address of the allocation */
		vaddr_t address = vad->start;

		/* Create the guard page if requested */
		vaddr_t i = address;
		if (flags & GUARD_BOTTOM)
		{
			vmm_map_page(addrspace->address_space, i, 0, PAGE_INVALID);
			i += PAGE_SIZE;
		}

		/* Commit all the needed pages */
		for (; i < address + size_committed; i += PAGE_SIZE)
		{
			int color = vaddr_cache_color(i, addrspace->numa_domain, 0);
			vmm_map_page(addrspace->address_space, i, pmm_alloc_page(0, addrspace->numa_domain, color), flags);
		}

		/* Modify the free VAD or remove it entirely */
		if (size_reserved < vad->length)
		{
			vad->start += size_reserved;
			vad->length -= size_reserved;
		}
		else
		{
			/* Later VAD */
			if (vad != &addrspace->free)
			{
				/* Readjust the linked list */
				vad->prev->next = vad->next;
				vad->next->prev = vad->prev;

				/* Free the VAD */
				slab_cache_free(vad_cache, vad);
			}
			/* Root VAD */
			else
			{
				/* Copy the next VAD into the root one */
				vad_t *vad_next = vad->next;
				memcpy(vad, vad_next, sizeof(vad_t));

				/* Free the dynamically-allocated VAD */
				slab_cache_free(vad_cache, vad_next);
			}
		}

		/* Record metadata, unless told not to */
		if (!(flags & PAGE_PRIVATE))
		{
			/* Create a new VAD to represent the now-used region */
			vad = slab_cache_alloc(vad_cache);
			vad->start = address;
			vad->length = size_reserved;
			vad->flags = flags;
			vad->left = vad->right = NULL;
			vad->height = 0;

			/* Insert it into the tree */
			addrspace->used_root = vad_tree_insert(addrspace->used_root, vad);
		}

		/* Return the address of the allocated region */
		spinlock_recursive_release(&addrspace->lock);
		return (void*) address;
	}

	/* No free region of the address space available */
	spinlock_recursive_release(&addrspace->lock);
	return NULL;
}
예제 #9
0
struct sBTPage *
ObjAllocPage(PagePool *pool)
{
    return((struct sBTPage *) sys$slab_cache_alloc(&bt_cache));
}
예제 #10
0
파일: slab.c 프로젝트: dnstap/knot
int main(int argc, char *argv[])
{
	plan(4);

	// 1. Create slab cache
	srand(time(0));
	const unsigned pattern = 0xdeadbeef;
	slab_cache_t cache;
	int ret = slab_cache_init(&cache, sizeof(int));
	is_int(0, ret, "slab: created empty cache");

	// 2. Couple alloc/free
	bool valid_free = true;
	for(int i = 0; i < 100; ++i) {
		int* data = (int*)slab_cache_alloc(&cache);
		*data = pattern;
		slab_free(data);
		if (*data == pattern)
			valid_free = false;
	}

	// 5. Verify freed block
	ok(valid_free, "slab: freed memory is correctly invalidated");

	// 4. Reap memory
	slab_t* slab = cache.slabs_free;
	int free_count = 0;
	while (slab) {
		slab_t* next = slab->next;
		if (slab_isempty(slab)) {
			++free_count;
		}
		slab = next;
	}

	int reaped = slab_cache_reap(&cache);
	is_int(reaped, free_count, "slab: cache reaping works");

	// Stress cache
	int alloc_count = 73521;
	void** ptrs = alloca(alloc_count * sizeof(void*));
	int ptrs_i = 0;
	for(int i = 0; i < alloc_count; ++i) {
		double roll = rand() / (double) RAND_MAX;
		if ((ptrs_i == 0) || (roll < 0.6)) {
			int id = ptrs_i++;
			ptrs[id] = slab_cache_alloc(&cache);
			if (ptrs[id] == 0) {
				ptrs_i--;
			} else {
				int* data = (int*)ptrs[id];
				*data = pattern;
			}
		} else {
			slab_free(ptrs[--ptrs_i]);
		}
	}

	// 5. Delete cache
	slab_cache_destroy(&cache);
	is_int(0, cache.bufsize, "slab: freed cache");

	return 0;
}