/** * \brief Initializer that uses memory */ errval_t multi_slot_alloc_init(struct multi_slot_allocator *ret, cslot_t nslots, cslot_t *retslots) { errval_t err; nslots = ROUND_UP(nslots, DEFAULT_CNODE_SLOTS); size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(nslots); // XXX? ret->top = malloc(sizeof(struct single_slot_allocator)); if (!ret->top) { return LIB_ERR_MALLOC_FAIL; } void *top_buf = malloc(bufsize); if (!top_buf) { return LIB_ERR_MALLOC_FAIL; } ret->head = malloc(sizeof(struct slot_allocator_list)); if (!ret->head) { return LIB_ERR_MALLOC_FAIL; } ret->head->next = NULL; void *head_buf = malloc(bufsize); if (!head_buf) { return LIB_ERR_MALLOC_FAIL; } ret->reserve = malloc(sizeof(struct single_slot_allocator)); if (!ret->reserve) { return LIB_ERR_MALLOC_FAIL; } void *reserve_buf = malloc(bufsize); if (!reserve_buf) { return LIB_ERR_MALLOC_FAIL; } size_t allocation_unit = sizeof(struct slot_allocator_list) + bufsize; err = vspace_mmu_aware_init(&ret->mmu_state, allocation_unit * nslots * 2); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT); } struct capref cap; struct cnoderef cnode; err = cnode_create(&cap, &cnode, nslots, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } err = multi_slot_alloc_init_raw(ret, nslots, cap, cnode, top_buf, head_buf, reserve_buf, bufsize); if (retslots) { *retslots = nslots; } return SYS_ERR_OK; }
static errval_t cow_init(size_t bufsize, size_t granularity, struct cnoderef *cow_cn, size_t *frame_count) { assert(cow_cn); assert(frame_count); errval_t err; struct capref frame, cncap; struct cnoderef cnode; // get RAM cap bufsize = (bufsize / granularity + 1) * granularity; err = slot_alloc(&frame); assert(err_is_ok(err)); size_t rambits = log2floor(bufsize); debug_printf("bits = %zu\n", rambits); err = ram_alloc(&frame, rambits); assert(err_is_ok(err)); // calculate #slots cslot_t cap_count = bufsize / granularity; cslot_t slots; // get CNode err = cnode_create(&cncap, &cnode, cap_count, &slots); assert(err_is_ok(err)); assert(slots >= cap_count); // retype RAM into Frames struct capref first_frame = (struct capref) { .cnode = cnode, .slot = 0 }; err = cap_retype(first_frame, frame, ObjType_Frame, log2floor(granularity)); assert(err_is_ok(err)); err = cap_destroy(frame); assert(err_is_ok(err)); *frame_count = slots; *cow_cn = cnode; return SYS_ERR_OK; } // create cow-enabled vregion & backing // Can copy-on-write in granularity-sized chunks static errval_t vspace_map_one_frame_cow(void **buf, size_t size, struct capref frame, vregion_flags_t flags, struct memobj **memobj, struct vregion **vregion, size_t granularity) { errval_t err; if (!memobj) { memobj = malloc(sizeof(*memobj)); } assert(memobj); if (!vregion) { vregion = malloc(sizeof(*vregion)); } assert(vregion); err = vspace_map_anon_attr(buf, memobj, vregion, size, &size, flags); assert(err_is_ok(err)); size_t chunks = size / granularity; cslot_t slots; struct capref cncap; struct cnoderef cnode; err = cnode_create(&cncap, &cnode, chunks, &slots); assert(err_is_ok(err)); assert(slots >= chunks); struct capref fc = (struct capref) { .cnode = cnode, .slot = 0 }; for (int i = 0; i < chunks; i++) { err = cap_copy(fc, frame); assert(err_is_ok(err)); err = (*memobj)->f.fill_foff(*memobj, i * granularity, fc, granularity, i*granularity); assert(err_is_ok(err)); err = (*memobj)->f.pagefault(*memobj, *vregion, i * granularity, 0); assert(err_is_ok(err)); fc.slot++; } return SYS_ERR_OK; } int main(int argc, char *argv[]) { errval_t err; struct capref frame; size_t retsize; void *vbuf; struct vregion *vregion; uint8_t *buf; debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = frame_alloc(&frame, BUFSIZE, &retsize); assert(retsize >= BUFSIZE); if (err_is_fail(err)) { debug_printf("frame_alloc: %s\n", err_getstring(err)); return 1; } debug_printf("%s:%d: %zu\n", __FUNCTION__, __LINE__, retsize); // setup region err = vspace_map_one_frame_attr(&vbuf, retsize, frame, VREGION_FLAGS_READ_WRITE, NULL, &vregion); if (err_is_fail(err)) { debug_printf("vspace_map: %s\n", err_getstring(err)); return 1; } debug_printf("vaddr: %p\n", vbuf); // write stuff to region buf = vbuf; debug_printf("%s:%d: %p, %lu pages\n", __FUNCTION__, __LINE__, buf, BUFSIZE / BASE_PAGE_SIZE); memset(buf, 0xAA, BUFSIZE); debug_printf("%s:%d\n", __FUNCTION__, __LINE__); // create cow copy // setup exception handler thread_set_exception_handler(handler, NULL, ex_stack, ex_stack+EX_STACK_SIZE, NULL, NULL); assert(err_is_ok(err)); debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = cow_init(BUFSIZE, BASE_PAGE_SIZE, &cow_frames, &cow_frame_count); assert(err_is_ok(err)); // create r/o copy of region and tell exception handler bounds debug_printf("%s:%d\n", __FUNCTION__, __LINE__); err = vspace_map_one_frame_cow(&cow_vbuf, retsize, frame, VREGION_FLAGS_READ, NULL, &cow_vregion, BASE_PAGE_SIZE); if (err_is_fail(err)) { debug_printf("vspace_map: %s\n", err_getstring(err)); return 1; } debug_printf("cow_vaddr: %p\n", cow_vbuf); // do stuff cow copy uint8_t *cbuf = cow_vbuf; for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i+=2) { cbuf[i * BASE_PAGE_SIZE + 1] = 0x55; } // verify results for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i++) { printf("page %d\n", i); printf("buf[0] = %d; cbuf[0] = %d\n", buf[i*BASE_PAGE_SIZE], cbuf[i*BASE_PAGE_SIZE]); printf("buf[1] = %d; cbuf[1] = %d\n", buf[i*BASE_PAGE_SIZE+1], cbuf[i*BASE_PAGE_SIZE+1]); } debug_dump_hw_ptables(); return EXIT_SUCCESS; }
/** * \brief Setup an initial cspace * * Create an initial cspace layout */ static errval_t spawn_setup_cspace(struct spawninfo *si) { errval_t err; struct capref t1; /* Create root CNode */ err = cnode_create(&si->rootcn_cap, &si->rootcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_ROOTCN); } /* Create taskcn */ err = cnode_create(&si->taskcn_cap, &si->taskcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_TASKCN); } // Mint into rootcn setting the guard t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_TASKCN; err = cap_mint(t1, si->taskcn_cap, 0, GUARD_REMAINDER(2 * DEFAULT_CNODE_BITS)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MINT_TASKCN); } /* Create slot_alloc_cnode */ t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC0; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC1; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } t1.cnode = si->rootcn; t1.slot = ROOTCN_SLOT_SLOT_ALLOC2; err = cnode_create_raw(t1, NULL, (1<<SLOT_ALLOC_CNODE_BITS), NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE); } // Create DCB si->dcb.cnode = si->taskcn; si->dcb.slot = TASKCN_SLOT_DISPATCHER; err = dispatcher_create(si->dcb); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_DISPATCHER); } // Give domain endpoint to itself (in taskcn) struct capref selfep = { .cnode = si->taskcn, .slot = TASKCN_SLOT_SELFEP, }; err = cap_retype(selfep, si->dcb, ObjType_EndPoint, 0); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SELFEP); } // Map root CNode (in taskcn) t1.cnode = si->taskcn; t1.slot = TASKCN_SLOT_ROOTCN; err = cap_mint(t1, si->rootcn_cap, 0, 0); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MINT_ROOTCN); } #ifdef TRACING_EXISTS // Set up tracing for the child err = trace_setup_child(si->taskcn, si->handle); if (err_is_fail(err)) { printf("Warning: error setting up tracing for child domain\n"); // SYS_DEBUG(err, ...); } #endif // XXX: copy over argspg? memset(&si->argspg, 0, sizeof(si->argspg)); /* Fill up basecn */ struct capref basecn_cap; struct cnoderef basecn; // Create basecn in rootcn basecn_cap.cnode = si->rootcn; basecn_cap.slot = ROOTCN_SLOT_BASE_PAGE_CN; err = cnode_create_raw(basecn_cap, &basecn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CNODE_CREATE); } // Place the ram caps for (uint8_t i = 0; i < DEFAULT_CNODE_SLOTS; i++) { struct capref base = { .cnode = basecn, .slot = i }; struct capref ram; err = ram_alloc(&ram, BASE_PAGE_BITS); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC); } err = cap_copy(base, ram); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_COPY); } err = cap_destroy(ram); if (err_is_fail(err)) { return err_push(err, LIB_ERR_CAP_DESTROY); } } return SYS_ERR_OK; } static errval_t spawn_setup_vspace(struct spawninfo *si) { errval_t err; /* Create pagecn */ si->pagecn_cap = (struct capref){.cnode = si->rootcn, .slot = ROOTCN_SLOT_PAGECN}; err = cnode_create_raw(si->pagecn_cap, &si->pagecn, PAGE_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_PAGECN); } /* Init pagecn's slot allocator */ // XXX: satisfy a peculiarity of the single_slot_alloc_init_raw API size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(PAGE_CNODE_SLOTS); void *buf = malloc(bufsize); assert(buf != NULL); err = single_slot_alloc_init_raw(&si->pagecn_slot_alloc, si->pagecn_cap, si->pagecn, PAGE_CNODE_SLOTS, buf, bufsize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW); } // Create root of pagetable err = si->pagecn_slot_alloc.a.alloc(&si->pagecn_slot_alloc.a, &si->vtree); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC); } // top-level table should always live in slot 0 of pagecn assert(si->vtree.slot == 0); switch(si->cpu_type) { case CPU_X86_64: case CPU_K1OM: err = vnode_create(si->vtree, ObjType_VNode_x86_64_pml4); break; case CPU_X86_32: case CPU_SCC: #ifdef CONFIG_PAE err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdpt); #else err = vnode_create(si->vtree, ObjType_VNode_x86_32_pdir); #endif break; case CPU_ARM5: case CPU_ARM7: err = vnode_create(si->vtree, ObjType_VNode_ARM_l1); break; default: assert(!"Other architecture"); return err_push(err, SPAWN_ERR_UNKNOWN_TARGET_ARCH); } if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_VNODE); } err = spawn_vspace_init(si, si->vtree, si->cpu_type); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_INIT); } return SYS_ERR_OK; } #if 0 /** * \brief Lookup and map an image */ static errval_t spawn_map(const char *name, struct bootinfo *bi, lvaddr_t *binary, size_t *binary_size) { errval_t err; /* Get the module from the multiboot */ struct mem_region *module = multiboot_find_module(bi, name); if (module == NULL) { return SPAWN_ERR_FIND_MODULE; } /* Map the image */ err = spawn_map_module(module, binary_size, binary, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_MODULE); } return SYS_ERR_OK; }
static errval_t init_allocators(void) { errval_t err, msgerr; struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client(); assert(cl != NULL); // Get the bootinfo and map it in. struct capref bootinfo_frame; size_t bootinfo_size; struct bootinfo *bootinfo; msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size); if (err_is_fail(msgerr) || err_is_fail(err)) { USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo"); } err = vspace_map_one_frame((void**)&bootinfo, bootinfo_size, bootinfo_frame, NULL, NULL); assert(err_is_ok(err)); /* Initialize the memory allocator to handle PhysAddr caps */ static struct range_slot_allocator devframes_allocator; err = range_slot_alloc_init(&devframes_allocator, PCI_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC_INIT); } err = mm_init(&pci_mm_physaddr, ObjType_DevFrame, 0, 48, /* This next parameter is important. It specifies the maximum * amount that a cap may be "chunked" (i.e. broken up) at each * level in the allocator. Setting it higher than 1 reduces the * memory overhead of keeping all the intermediate caps around, * but leads to problems if you chunk up a cap too small to be * able to allocate a large subregion. This caused problems * for me with a large framebuffer... -AB 20110810 */ 1, /*was DEFAULT_CNODE_BITS,*/ slab_default_refill, slot_alloc_dynamic, &devframes_allocator, false); if (err_is_fail(err)) { return err_push(err, MM_ERR_MM_INIT); } // Request I/O Cap struct capref requested_caps; errval_t error_code; err = cl->vtbl.get_io_cap(cl, &requested_caps, &error_code); assert(err_is_ok(err) && err_is_ok(error_code)); // Copy into correct slot struct capref caps_io = { .cnode = cnode_task, .slot = TASKCN_SLOT_IO }; err = cap_copy(caps_io, requested_caps); // XXX: The code below is confused about gen/l/paddrs. // Caps should be managed in genpaddr, while the bus mgmt must be in lpaddr. err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code); assert(err_is_ok(err) && err_is_ok(error_code)); physical_caps = requested_caps; // Build the capref for the first physical address capability struct capref phys_cap; phys_cap.cnode = build_cnoderef(requested_caps, PHYSADDRCN_BITS); phys_cap.slot = 0; struct cnoderef devcnode; err = slot_alloc(&my_devframes_cnode); assert(err_is_ok(err)); cslot_t slots; err = cnode_create(&my_devframes_cnode, &devcnode, 255, &slots); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cnode create"); } struct capref devframe; devframe.cnode = devcnode; devframe.slot = 0; for (int i = 0; i < bootinfo->regions_length; i++) { struct mem_region *mrp = &bootinfo->regions[i]; if (mrp->mr_type == RegionType_Module) { skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).", mrp->mr_base, 0, mrp->mrmod_size, mrp->mr_type, mrp->mrmod_data); } else { skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).", mrp->mr_base, mrp->mr_bits, ((size_t)1) << mrp->mr_bits, mrp->mr_type, mrp->mrmod_data); } if (mrp->mr_type == RegionType_PhyAddr || mrp->mr_type == RegionType_PlatformData) { ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" %s\n", i, mrp->mr_base, mrp->mr_base + (((size_t)1)<<mrp->mr_bits), mrp->mr_type == RegionType_PhyAddr ? "physical address" : "platform data"); err = cap_retype(devframe, phys_cap, ObjType_DevFrame, mrp->mr_bits); if (err_no(err) == SYS_ERR_REVOKE_FIRST) { printf("cannot retype region %d: need to revoke first; ignoring it\n", i); } else { assert(err_is_ok(err)); err = mm_add(&pci_mm_physaddr, devframe, mrp->mr_bits, mrp->mr_base); if (err_is_fail(err)) { USER_PANIC_ERR(err, "adding region %d FAILED\n", i); } } phys_cap.slot++; devframe.slot++; } } return SYS_ERR_OK; }