/** * \brief Initializer that does not allocate any space * * #slot_alloc_init duplicates some of the code below, * modify it if making changes here. * * XXX: top_buf head_buf and reserve_buf each point to a separate buffer of * size bufsize bytes which can be used for backing storage. bufsize evidently * needs to be >= sizeof(struct cnode_meta) * nslots / 2. Don't ask me why! -AB */ errval_t multi_slot_alloc_init_raw(struct multi_slot_allocator *ret, cslot_t nslots, struct capref top_cap, struct cnoderef top_cnode, void *top_buf, void *head_buf, void *reserve_buf, size_t bufsize) { errval_t err; struct capref cap; struct cnoderef cnode; /* Generic part */ ret->a.alloc = multi_alloc; ret->a.free = multi_free; ret->a.space = nslots; ret->a.nslots = nslots; thread_mutex_init(&ret->a.mutex); ret->head->next = NULL; ret->reserve->next = NULL; /* Top */ err = single_slot_alloc_init_raw((struct single_slot_allocator*)ret->top, top_cap, top_cnode, nslots, top_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Head */ err = ret->top->alloc(ret->top, &cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = cnode_create_raw(cap, &cnode, nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } err = single_slot_alloc_init_raw(&ret->head->a, cap, cnode, nslots, head_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Reserve */ err = ret->top->alloc(ret->top, &cap); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } err = cnode_create_raw(cap, &cnode, nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } err = single_slot_alloc_init_raw(&ret->reserve->a, cap, cnode, nslots, reserve_buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT); } /* Slab */ size_t allocation_unit = sizeof(struct slot_allocator_list) + SINGLE_SLOT_ALLOC_BUFLEN(nslots); slab_init(&ret->slab, allocation_unit, NULL); return SYS_ERR_OK; }
/** * \brief slot allocator * * \param ca Instance of the allocator * \param ret Pointer to return the allocated slot */ errval_t multi_alloc(struct slot_allocator *ca, struct capref *ret) { errval_t err = SYS_ERR_OK; struct multi_slot_allocator *mca = (struct multi_slot_allocator*)ca; thread_mutex_lock(&ca->mutex); assert(ca->space != 0); ca->space--; /* Try allocating from the list of single slot allocators */ struct slot_allocator_list *walk = mca->head; //struct slot_allocator_list *prev = NULL; while(walk != NULL) { err = walk->a.a.alloc(&walk->a.a, ret); if (err_no(err) != LIB_ERR_SLOT_ALLOC_NO_SPACE) { break; } //prev = walk; walk = walk->next; } if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC); } /* If no more slots left, grow */ if (ca->space == 0) { ca->space = ca->nslots; /* Pull in the reserve */ mca->reserve->next = mca->head; mca->head = mca->reserve; /* Setup a new reserve */ // Cnode struct capref cap; struct cnoderef cnode; err = mca->top->alloc(mca->top, &cap); if (err_is_fail(err)) { thread_mutex_unlock(&ca->mutex); return err_push(err, LIB_ERR_SLOT_ALLOC); } thread_mutex_unlock(&ca->mutex); // cnode_create_raw uses ram_alloc // which may call slot_alloc err = cnode_create_raw(cap, &cnode, ca->nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } thread_mutex_lock(&ca->mutex); // Buffers void *buf = slab_alloc(&mca->slab); if (!buf) { /* Grow slab */ // Allocate slot out of the list mca->a.space--; struct capref frame; err = mca->head->a.a.alloc(&mca->head->a.a, &frame); if (err_is_fail(err)) { thread_mutex_unlock(&ca->mutex); return err_push(err, LIB_ERR_SLOT_ALLOC); } thread_mutex_unlock(&ca->mutex); // following functions may call // slot_alloc void *slab_buf; size_t size; err = vspace_mmu_aware_map(&mca->mmu_state, frame, mca->slab.blocksize, &slab_buf, &size); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP); } thread_mutex_lock(&ca->mutex); // Grow slab slab_grow(&mca->slab, slab_buf, size); // Try allocating again buf = slab_alloc(&mca->slab); if (err_is_fail(err)) { thread_mutex_unlock(&ca->mutex); return err_push(err, LIB_ERR_SLAB_ALLOC_FAIL); } } mca->reserve = buf; buf = (char *)buf + sizeof(struct slot_allocator_list); size_t bufsize = mca->slab.blocksize - sizeof(struct slot_allocator_list); // Allocator err = single_slot_alloc_init_raw(&mca->reserve->a, cap, cnode, mca->a.nslots, buf, bufsize); if (err_is_fail(err)) { thread_mutex_unlock(&ca->mutex); return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW); } } thread_mutex_unlock(&ca->mutex); return SYS_ERR_OK; }
/** * \brief Setup an initial cspace * * Create an initial cspace layout */ static errval_t spawn_setup_cspace(struct spawninfo *si) { errval_t err; struct capref t1; /* Create root CNode */ err = cnode_create(&si->rootcn_cap, &si->rootcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_ROOTCN); } /* Create taskcn */ err = cnode_create(&si->taskcn_cap, &si->taskcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_TASKCN); } // Mint into rootcn setting the guard t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_TASKCN; err = cap_mint(t1, si->taskcn_cap, 0, GUARD_REMAINDER(2 * DEFAULT_CNODE_BITS)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MINT_TASKCN); } /* Create slot_alloc_cnode */ t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC0; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC1; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC2; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } // Create DCB si->dcb.cnode = si->taskcn; si->dcb.slot = TASKCN_SLOT_DISPATCHER; err = dispatcher_create(si->dcb); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_DISPATCHER); } // Give domain endpoint to itself (in taskcn) struct capref selfep = { .cnode = si->taskcn, .slot = TASKCN_SLOT_SELFEP, }; err = cap_retype(selfep, si->dcb, ObjType_EndPoint, 0); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SELFEP); } // Map root CNode (in taskcn) t1.cnode = si->taskcn; t1.slot = TASKCN_SLOT_ROOTCN; err = cap_mint(t1, si->rootcn_cap, 0, 0); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MINT_ROOTCN); } #ifdef TRACING_EXISTS // Set up tracing for the child err = trace_setup_child(si->taskcn, si->handle); if (err_is_fail(err)) { printf("Warning: error setting up tracing for child domain\n"); // SYS_DEBUG(err, ...); } #endif // XXX: copy over argspg? memset(&si->argspg, 0, sizeof(si->argspg)); /* Fill up basecn */ struct capref basecn_cap; struct cnoderef basecn; // Create basecn in rootcn basecn_cap.cnode = si->rootcn; basecn_cap.slot = ROOTCN_SLOT_BASE_PAGE_CN; err = cnode_create_raw(basecn_cap, &basecn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } // Place the ram caps for (uint8_t i = 0; i < DEFAULT_CNODE_SLOTS; i++) { struct capref base = { .cnode = basecn, .slot = i }; struct capref ram; err = ram_alloc(&ram, BASE_PAGE_BITS); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC); } err = cap_copy(base, ram); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_COPY); } err = cap_destroy(ram); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_DESTROY); } } return SYS_ERR_OK; } static errval_t spawn_setup_vspace(struct spawninfo *si) { errval_t err; /* Create pagecn */ si->pagecn_cap = (struct capref){.cnode = si->rootcn, .slot = ROOTCN_SLOT_PAGECN}; err = cnode_create_raw(si->pagecn_cap, &si->pagecn, PAGE_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_PAGECN); } /* Init pagecn's slot allocator */ // XXX: satisfy a peculiarity of the single_slot_alloc_init_raw API size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(PAGE_CNODE_SLOTS); void *buf = malloc(bufsize); assert(buf != NULL); err = single_slot_alloc_init_raw(&si->pagecn_slot_alloc, si->pagecn_cap, si->pagecn, PAGE_CNODE_SLOTS, buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW); } // Create root of pagetable err = si->pagecn_slot_alloc.a.alloc(&si->pagecn_slot_alloc.a, &si->vtree); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } // top-level table should always live in slot 0 of pagecn assert(si->vtree.slot == 0); switch(si->cpu_type) { case CPU_X86_64: case CPU_K1OM: err = vnode_create(si->vtree, ObjType_VNode_x86_64_pml4); break; case CPU_X86_32: case CPU_SCC: #ifdef CONFIG_PAE err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdpt); #else err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdir); #endif break; case CPU_ARM5: case CPU_ARM7: err = vnode_create(si->vtree, ObjType_VNode_ARM_l1); break; default: assert(!"Other architecture"); return err_push(err, SPAWN_ERR_UNKNOWN_TARGET_ARCH); } if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_VNODE); } err = spawn_vspace_init(si, si->vtree, si->cpu_type); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_INIT); } return SYS_ERR_OK; } #if 0 /** * \brief Lookup and map an image */ static errval_t spawn_map(const char *name, struct bootinfo *bi, lvaddr_t *binary, size_t *binary_size) { errval_t err; /* Get the module from the multiboot */ struct mem_region *module = multiboot_find_module(bi, name); if (module == NULL) { return SPAWN_ERR_FIND_MODULE; } /* Map the image */ err = spawn_map_module(module, binary_size, binary, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_MODULE); } return SYS_ERR_OK; }