void *kmalloc(unsigned sz) { /* We need to add a small header to the allocation to track which cache (if any) it came from. It must be a multiple of the pointer size in order that the address after it (which we will be returning) has natural alignment. */ sz += sizeof(uintptr_t); uintptr_t *ptr; unsigned l2 = log2_roundup(sz); if (l2 < MIN_CACHESZ_LOG2) l2 = MIN_CACHESZ_LOG2; if (l2 >= MIN_CACHESZ_LOG2 && l2 <= MAX_CACHESZ_LOG2) { ptr = (uintptr_t*)slab_cache_alloc(&caches[l2-MIN_CACHESZ_LOG2]); } else { /* Get the size as the smallest power of 2 >= sz */ unsigned sz_p2 = 1U << l2; if (sz_p2 < get_page_size()) { sz_p2 = get_page_size(); l2 = log2_roundup(sz_p2); } ptr = (uintptr_t*)vmspace_alloc(&kernel_vmspace, sz_p2, 1); } ptr[0] = (KMALLOC_CANARY << 8) | l2; return &ptr[1]; }
void buddy_free(buddy_t *bd, uint64_t addr, unsigned sz) { uint64_t offs = addr - bd->start; unsigned log_sz = log2_roundup(sz); unsigned idx = offs >> log_sz; while (log_sz >= MIN_BUDDY_SZ_LOG2) { int order_idx = log_sz - MIN_BUDDY_SZ_LOG2; // Mark node free bitmap_set(&bd->orders[order_idx], idx); // Can we coalese up another level? if (log_sz == MAX_BUDDY_SZ_LOG2) { break; } if (bitmap_isset(&bd->orders[order_idx], BUDDY(idx)) == 0) { // guess not... break; } // Mark both non-free bitmap_clear(&bd->orders[order_idx], idx); bitmap_clear(&bd->orders[order_idx], BUDDY(idx)); // Move up an order idx = DEC_ORDER(idx); log_sz++; } }
/** Freeing is actually easier, as we never have to worry about splitting blocks. Note that this function can only free addresses and sizes that are correctly aligned, so it's only really safe to call this with addresses returned from buddy_alloc(). We simply mark the incoming block as free, then while we are not at the top level of the tree, see if the buddy is also free. If so, we mark them both as unavailable and move up the tree one level. { */ void buddy_free(buddy_t *bd, uint64_t addr, unsigned sz) { uint64_t offs = addr - bd->start; unsigned log_sz = log2_roundup(sz); unsigned idx = offs >> log_sz; while (log_sz >= MIN_BUDDY_SZ_LOG2) { int order_idx = log_sz - MIN_BUDDY_SZ_LOG2; /* Mark this node free. */ bitmap_set(&bd->orders[order_idx], idx); /* Can we coalesce up another level? */ if (log_sz == MAX_BUDDY_SZ_LOG2) break; /* Is this node's buddy also free? */ if (bitmap_isset(&bd->orders[order_idx], BUDDY(idx)) == 0) /* no :( */ break; /* FIXME: Ensure max(this, buddy) wouldn't go over the max extent of the region. */ /* Mark them both non free. */ bitmap_clear(&bd->orders[order_idx], idx); bitmap_clear(&bd->orders[order_idx], BUDDY(idx)); /* Move up an order. */ idx = DEC_ORDER(idx); ++log_sz; } }
uint64_t buddy_alloc(buddy_t *bd, unsigned sz) { /** Firstly we find the smallest power of 2 that will hold the allocation request, and take the log base 2 of it. { */ unsigned log_sz = log2_roundup(sz); if (log_sz > MAX_BUDDY_SZ_LOG2) panic("buddy_alloc had request that was too large to handle!"); unsigned orig_log_sz = log_sz; /** Then we try and find a free block of this size. This involves searching in the right bitmap for an set bit. If there are no set bits, we increase the size of the block we're searching for. { */ /* Search for a free block - we may have to increase the size of the block to find a free one. */ int64_t idx; while (log_sz <= MAX_BUDDY_SZ_LOG2) { idx = bitmap_first_set(&bd->orders[log_sz - MIN_BUDDY_SZ_LOG2]); if (idx != -1) /* Block found! */ break; ++log_sz; } if (idx == -1) /* No free blocks :( */ return ~0ULL; /** Now, if we couldn't get a block of the size we wanted, we'll have to split it down to the right size. { */ /* We may have to split blocks to get back to a block of the minimum size. */ for (; log_sz != orig_log_sz; --log_sz) { int order_idx = log_sz - MIN_BUDDY_SZ_LOG2; /* We're splitting a block, so deallocate it first... */ bitmap_clear(&bd->orders[order_idx], idx); /* Then set both its children as free in the next order. */ idx = INC_ORDER(idx); bitmap_set(&bd->orders[order_idx-1], idx); bitmap_set(&bd->orders[order_idx-1], idx+1); } /** By this point we have a block that is free. We should now mark it as allocated then calculate the address that actually equates to. { */ /* Mark the block as not free. */ int order_idx = log_sz - MIN_BUDDY_SZ_LOG2; bitmap_clear(&bd->orders[order_idx], idx); uint64_t addr = bd->start + ((uint64_t)idx << log_sz); return addr; }
uint64_t buddy_alloc(buddy_t *bd, unsigned sz) { unsigned log_sz = log2_roundup(sz); if(log_sz > MAX_BUDDY_SZ_LOG2) { panic("buddy_alloc had request that was too large to handle"); } unsigned orig_log_sz = log_sz; // Search for a free block -- we may need to increase the size of the block // to find a free one int64_t idx; while (log_sz <= MAX_BUDDY_SZ_LOG2) { idx = bitmap_first_set(&bd->orders[log_sz - MIN_BUDDY_SZ_LOG2]); if(idx != -1) { // we found a block break; } log_sz++; } if (idx == -1) { // No free blocks return ~0ULL; } // Split blocks to get a block of minimum size for(; log_sz != orig_log_sz; log_sz--) { int order_idx = log_sz - MIN_BUDDY_SZ_LOG2; // splitting a block, so deallocate it first. bitmap_clear(&bd->orders[order_idx], idx); // Set both children as free. idx = INC_ORDER(idx); bitmap_set(&bd->orders[order_idx - 1], idx); bitmap_set(&bd->orders[order_idx - 1], idx + 1); } int order_idx = log_sz - MIN_BUDDY_SZ_LOG2; bitmap_clear(&bd->orders[order_idx], idx); uint64_t addr = bd->start + ((uint64_t)idx << log_sz); return addr; }