/** Initialize allocated memory as a slab cache */ static void _slab_cache_create(slab_cache_t *cache, size_t size, size_t align, int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), int flags) { int pages; // ipl_t ipl; // memsetb((uintptr_t)cache, sizeof(*cache), 0); // cache->name = name; //if (align < sizeof(unative_t)) // align = sizeof(unative_t); // size = ALIGN_UP(size, align); cache->size = size; // cache->constructor = constructor; // cache->destructor = destructor; cache->flags = flags; list_initialize(&cache->full_slabs); list_initialize(&cache->partial_slabs); list_initialize(&cache->magazines); // spinlock_initialize(&cache->slablock, "slab_lock"); // spinlock_initialize(&cache->maglock, "slab_maglock"); // if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) // make_magcache(cache); /* Compute slab sizes, object counts in slabs etc. */ /* Minimum slab order */ pages = SIZE2FRAMES(cache->size); /* We need the 2^order >= pages */ if (pages <= 1) cache->order = 0; else cache->order = fnzb(pages-1)+1; while (badness(cache) > SLAB_MAX_BADNESS(cache)) { cache->order += 1; } cache->objects = comp_objects(cache); /* Add cache to cache list */ // ipl = interrupts_disable(); // spinlock_lock(&slab_cache_lock); list_append(&cache->link, &slab_cache_list); // spinlock_unlock(&slab_cache_lock); // interrupts_restore(ipl); }
/** Allocate memory with specified alignment * * @param align Alignment in byes. * @param size Number of bytes to allocate. * * @return Allocated memory or NULL. * */ void *memalign(const size_t align, const size_t size) { if (align == 0) return NULL; size_t palign = 1 << (fnzb(max(sizeof(void *), align) - 1) + 1); futex_down(&malloc_futex); void *block = malloc_internal(size, palign); futex_up(&malloc_futex); return block; }
static uintptr_t km_map_aligned(uintptr_t paddr, size_t size, unsigned int flags) { uintptr_t vaddr; size_t align; uintptr_t offs; ASSERT(ALIGN_DOWN(paddr, FRAME_SIZE) == paddr); ASSERT(ALIGN_UP(size, FRAME_SIZE) == size); /* Enforce natural or at least PAGE_SIZE alignment. */ align = ispwr2(size) ? size : (1U << (fnzb(size) + 1)); vaddr = km_page_alloc(size, max(PAGE_SIZE, align)); page_table_lock(AS_KERNEL, true); for (offs = 0; offs < size; offs += PAGE_SIZE) { page_mapping_insert(AS_KERNEL, vaddr + offs, paddr + offs, flags); } page_table_unlock(AS_KERNEL, true); return vaddr; }