/** Initilize IPC subsystem * */ void ipc_init(void) { ipc_call_slab = slab_cache_create("call_t", sizeof(call_t), 0, NULL, NULL, 0); ipc_answerbox_slab = slab_cache_create("answerbox_t", sizeof(answerbox_t), 0, NULL, NULL, 0); }
int main() { srand(time(0)); struct slab_arena arena; struct slab_cache cache; slab_arena_create(&arena, 0, UINT_MAX, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena, 0); int i = 0; while (i < ITERATIONS) { int run = random() % NRUNS; int size = random() % MAX_ALLOC; if (runs[run]) { slab_put(&cache, runs[run]); } runs[run] = slab_get(&cache, size); fail_unless(runs[run]); slab_cache_check(&cache); i++; } slab_cache_destroy(&cache); }
void init_threads() { DBG("%s\n", __FUNCTION__); thr_slab = slab_cache_create(sizeof(thr_t), 16, NULL,NULL,SLAB_CACHE_MAGDEFERRED); };
void init_core_dll() { PIMAGE_DOS_HEADER dos; PIMAGE_NT_HEADERS32 nt; PIMAGE_EXPORT_DIRECTORY exp; dos = (PIMAGE_DOS_HEADER)LOAD_BASE; nt = MakePtr( PIMAGE_NT_HEADERS32, dos, dos->e_lfanew); exp = MakePtr(PIMAGE_EXPORT_DIRECTORY,LOAD_BASE, nt->OptionalHeader.DataDirectory[0].VirtualAddress); list_initialize(&core_dll.link); core_dll.img_base = LOAD_BASE; core_dll.img_size = nt->OptionalHeader.SizeOfImage; core_dll.img_md = NULL; core_dll.img_hdr = nt; core_dll.img_sec = MakePtr(PIMAGE_SECTION_HEADER,nt, sizeof(IMAGE_NT_HEADERS32)); core_dll.img_exp = MakePtr(PIMAGE_EXPORT_DIRECTORY,LOAD_BASE, nt->OptionalHeader.DataDirectory[0].VirtualAddress); core_dll.img_name = strupr(MakePtr(char*, LOAD_BASE, exp->Name)); dll_slab = slab_cache_create(sizeof(dll_t), 16,NULL,NULL,SLAB_CACHE_MAGDEFERRED); DBG("%s base %x size %x sections %d exports %x\n", core_dll.img_name, core_dll.img_base, core_dll.img_size, nt->FileHeader.NumberOfSections, core_dll.img_exp ); };
/** Initialize sysinfo subsystem * * Create SLAB cache for sysinfo items. * */ void sysinfo_init(void) { sysinfo_item_slab = slab_cache_create("sysinfo_item_t", sizeof(sysinfo_item_t), 0, sysinfo_item_constructor, sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED); mutex_initialize(&sysinfo_lock, MUTEX_ACTIVE); }
int main() { quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); region_basic(); region_test_truncate(); slab_cache_destroy(&cache); }
void slab_cache_init(void) { DBG("%s\n", __FUNCTION__); _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t), sizeof(void *), NULL, NULL, SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); /* Initialize external slab cache */ slab_cache = slab_cache_create(sizeof(slab_t), 0, NULL, NULL,SLAB_CACHE_MAGDEFERRED); };
static int ts_init(void) { int rc = ts_reftable_init(&tss.rt); if (rc == -1) return -1; ts_options_init(&tss.opts); memset(&tss.s, 0, sizeof(tss.s)); tss.last_snap_lsn = 0; tss.last_xlog_lsn = 0; slab_cache_create(&tss.sc); region_create(&tss.ra, &tss.sc); return 0; }
int main() { seed = time(0); srand(seed); quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); small_alloc_basic(); slab_cache_destroy(&cache); }
int main() { seed = time(0); srand(seed); objsize = rand() % OBJSIZE_MAX; if (objsize < OBJSIZE_MIN) objsize = OBJSIZE_MIN; slab_arena_create(&arena, 0, UINT_MAX, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena, 0); mempool_basic(); slab_cache_destroy(&cache); }
static int kmalloc_init() { /* FIXME: Make vmspace_init deal with addresses that aren't initially maximally aligned so we can give it 0xC0400000 as the starting address (first address after first 4MB identity map (x86)) */ if (vmspace_init(&kernel_vmspace, MMAP_KERNEL_VMSPACE_START, MMAP_KERNEL_VMSPACE_END-MMAP_KERNEL_VMSPACE_START) == -1) { assert(0 && "kernel_vmspace init failed!"); return -1; } int r = 0; for (unsigned i = 0; i <= MAX_CACHESZ_LOG2-MIN_CACHESZ_LOG2; ++i) { r |= slab_cache_create(&caches[i], &kernel_vmspace, 1U<<(i+MIN_CACHESZ_LOG2), NULL); } assert(r == 0 && "slab cache creation failed!"); return 1; }
int main() { struct slab_cache cache; struct slab_arena arena; struct quota quota; seed = time(0); srand(seed); quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); obuf_basic(&cache); slab_cache_destroy(&cache); }
static int threading_init() { static thread_t dummy_t = { .id = 0, .prev = NULL, .next = NULL, .scheduler_next = NULL, .semaphore_next = NULL, .stack = 0, .request_kill = 0, .state = 0, .priority = 0, .auto_free = 0 }; int r = slab_cache_create(&thread_cache, &kernel_vmspace, sizeof(thread_t), (void*)&dummy_t); assert(r == 0 && "slab_cache_create failed!"); thread_t *t = (thread_t*)slab_cache_alloc(&thread_cache); t->stack = (uintptr_t)__builtin_frame_address(0) & ~(THREAD_STACK_SZ-1); *tls_slot(TLS_SLOT_TCB, t->stack) = (uintptr_t)t; *tls_slot(TLS_SLOT_CANARY, t->stack) = CANARY_VAL; assert(*tls_slot(TLS_SLOT_TCB, t->stack) == (uintptr_t)t); assert(*tls_slot(TLS_SLOT_CANARY, t->stack) == CANARY_VAL); thread_list_head = t; register_debugger_handler("threads", "List all thread states", &inspect_threads); return 0; } static prereq_t p[] = { {"kmalloc",NULL}, {"scheduler",NULL}, {NULL,NULL} }; static module_t x run_on_startup = { .name = "threading", .required = p, .load_after = NULL, .init = &threading_init, .fini = NULL };
/* Allow kernel-mode code to generate its own APCs */ void apcs_init() { /* Create the slab cache for internally-created APCs */ internal_apc_cache = slab_cache_create(sizeof(apc_t), PAGE_READ | PAGE_WRITE); }