Exemple #1
0
void *kmalloc(unsigned sz) {
  /* We need to add a small header to the allocation to track which
     cache (if any) it came from. It must be a multiple of the pointer
     size in order that the address after it (which we will be returning)
     has natural alignment. */
  sz += sizeof(uintptr_t);

  uintptr_t *ptr;

  unsigned l2 = log2_roundup(sz);
  if (l2 < MIN_CACHESZ_LOG2) l2 = MIN_CACHESZ_LOG2;

  if (l2 >= MIN_CACHESZ_LOG2 && l2 <= MAX_CACHESZ_LOG2) {
    ptr = (uintptr_t*)slab_cache_alloc(&caches[l2-MIN_CACHESZ_LOG2]);
  } else {
    /* Get the size as the smallest power of 2 >= sz */
    unsigned sz_p2 = 1U << l2;
    if (sz_p2 < get_page_size()) {
      sz_p2 = get_page_size();
      l2 = log2_roundup(sz_p2);
    }

    ptr = (uintptr_t*)vmspace_alloc(&kernel_vmspace, sz_p2, 1);
  }

  ptr[0] = (KMALLOC_CANARY << 8) | l2;
  return &ptr[1];
}
Exemple #2
0
static uintptr_t alloc_stack_and_tls() {
  unsigned pagesz = get_page_size();

  uintptr_t addr = vmspace_alloc(&kernel_vmspace, THREAD_STACK_SZ, 0);

  for (unsigned i = 0; i < THREAD_STACK_SZ; i += pagesz)
    map(addr+i, alloc_page(PAGE_REQ_NONE), 1, PAGE_WRITE);

  return addr;
}
Exemple #3
0
static slab_footer_t *create(slab_cache_t *c) {
  uintptr_t addr = vmspace_alloc(c->vms, SLAB_SIZE, /*alloc_phys=*/PAGE_WRITE);

  slab_footer_t *f = FOOTER_FOR_PTR(addr);
  f->next = NULL;
  
  /* Initialise the used/free bitmap. */
  uintptr_t bm = (uintptr_t)f - bitmap_sz(c->size);
  memset((uint8_t*)bm, 0, bitmap_sz(c->size));

  return f;
}
Exemple #4
0
int f () {
    vmspace_t vms;
    // CHECK: init: 0
    kprintf("init: %d\n", vmspace_init(&vms, 0xC1000000, 0x1C000000));

    // CHECK: alloc1: dcfea000
    kprintf("alloc1: %x\n", vmspace_alloc(&vms, 0x1000, 0));
    // CHECK: alloc2: dcfe8000
    kprintf("alloc2: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc3: dcfe9000
    kprintf("alloc3: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc4: dcfe0000
    kprintf("alloc4: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc5: dcfe1000
    kprintf("alloc5: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc6: dcfe2000
    kprintf("alloc6: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc7: dcfc0000
    kprintf("alloc7: %x\n", vmspace_alloc(&vms, 0x10000, 0)); 

    vmspace_free(&vms, 0x1000, 0xdcfe2000, 0);
    // CHECK: alloc8: dcfe2000
    kprintf("alloc8: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 

    // If we free everything we just allocated, and then allocate
    // them again, we can check buddies were correctly merged
    // by observing that the allocations return the same values
    // in the same order.
    vmspace_free(&vms, 0x1000, 0xdcfea000, 0);
    vmspace_free(&vms, 0x1000, 0xdcfe8000, 0);
    vmspace_free(&vms, 0x1000, 0xdcfe9000, 0);
    vmspace_free(&vms, 0x1000, 0xdcfe0000, 0);
    vmspace_free(&vms, 0x1000, 0xdcfe1000, 0);
    vmspace_free(&vms, 0x1000, 0xdcfe2000, 0);
    vmspace_free(&vms, 0x10000, 0xdcfc0000, 0);

    // CHECK: alloc1: dcfea000
    kprintf("alloc1: %x\n", vmspace_alloc(&vms, 0x1000, 0));
    // CHECK: alloc2: dcfe8000
    kprintf("alloc2: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc3: dcfe9000
    kprintf("alloc3: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc4: dcfe0000
    kprintf("alloc4: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc5: dcfe1000
    kprintf("alloc5: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc6: dcfe2000
    kprintf("alloc6: %x\n", vmspace_alloc(&vms, 0x1000, 0)); 
    // CHECK: alloc7: dcfc0000
    kprintf("alloc7: %x\n", vmspace_alloc(&vms, 0x10000, 0)); 

    // CHECK-NOT: Page fault
    uintptr_t *addr = (uintptr_t*)vmspace_alloc(&vms, 0x1000, 1);
    *addr = 0x42;

    return 0;
}
Exemple #5
0
struct vmspace *
ept_vmspace_alloc(vm_offset_t min, vm_offset_t max)
{

	return (vmspace_alloc(min, max, ept_pinit));
}
Exemple #6
0
struct vmspace *
svm_npt_alloc(vm_offset_t min, vm_offset_t max)
{
	
	return (vmspace_alloc(min, max, npt_pinit));
}
Exemple #7
0
int test() {
  // Create a new block cache group.
  disk_cache_group_t *grp = disk_cache_group_new();

  // And several new block caches.
  disk_cache_t *cache1 = disk_cache_new(grp, &dev);
  disk_cache_t *cache2 = disk_cache_new(grp, &dev);

  // Create some scratch vmspace.
  void *scratch1 = (void*)vmspace_alloc(&kernel_vmspace, 0x1000, 0);
  void *scratch2 = (void*)vmspace_alloc(&kernel_vmspace, 0x1000, 0);
  void *scratch3 = (void*)vmspace_alloc(&kernel_vmspace, 0x1000, 0);

  // Test caching of data.
  // CHECK: b1 = 1
  kprintf("b1 = %d\n", disk_cache_get(cache1, 0x0, scratch1));
  // CHECK: b2 = 1
  kprintf("b2 = %d\n", disk_cache_get(cache2, 0x0, scratch2));
  // CHECK: b3 = 1
  kprintf("b3 = %d\n", disk_cache_get(cache1, 0x1000, scratch3));

  // CHECK: c1[0] = 0x0 c1[1] = 0x4
  // CHECK: c2[0] = 0x0 c2[1] = 0x4
  // CHECK: c3[0] = 0x1000 c3[1] = 0x1004
  kprintf("c1[0] = %#x c1[1] = %#x\n",
          ((uint32_t*)scratch1)[0], ((uint32_t*)scratch1)[1]);
  kprintf("c2[0] = %#x c2[1] = %#x\n",
          ((uint32_t*)scratch2)[0], ((uint32_t*)scratch2)[1]);
  kprintf("c3[0] = %#x c3[1] = %#x\n",
          ((uint32_t*)scratch3)[0], ((uint32_t*)scratch3)[1]);

  // Attempt to evict a page. It should fail because all allocated
  // pages have handles.
  // CHECK: evict = 0
  kprintf("evict = %d\n", disk_cache_group_evict(grp, 0x1000));

  // Now release one handle, and check it is still cached (and that there
  // are no handles left).
  unmap((uintptr_t)scratch2, 1);
  disk_cache_release(cache2, 0x0);
  // CHECK: released: iscached 1 n_handles 0
  kprintf("released: iscached %d n_handles %d\n",
          disk_cache_is_cached(cache2, 0x0),
          disk_cache_get_n_handles(cache2, 0x0));

  // Try eviction again. It should succeed.
  // CHECK: evict = 1
  kprintf("evict = %d\n", disk_cache_group_evict(grp, 0x1000));

  // And now the address should not be cached.
  // CHECK: released: iscached 0 n_handles 0
  kprintf("released: iscached %d n_handles %d\n",
          disk_cache_is_cached(cache2, 0x0),
          disk_cache_get_n_handles(cache2, 0x0));

  // Get another handle to one address, and check the #handles increases
  // and they map to the same phys address.
  // CHECK: b4 = 1
  kprintf("b4 = %d\n", disk_cache_get(cache1, 0x1000, scratch2));

  // CHECK: nhandles = 2
  kprintf("nhandles = %d\n", disk_cache_get_n_handles(cache1, 0x1000));

  // CHECK: m1 = [[addr:0x[a-f0-9]*]]
  // CHECK: m2 = [[addr]]
  unsigned flags;
  kprintf("m1 = %#x\nm2 = %#x\n",
          (uint32_t)get_mapping((uintptr_t)scratch3, &flags),
          (uint32_t)get_mapping((uintptr_t)scratch2, &flags));
  
  return 0;
}