// Get the bootinfo and map it in. static errval_t map_bootinfo(struct bootinfo **bootinfo) { errval_t err, msgerr; struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client(); assert(cl != NULL); struct capref bootinfo_frame; size_t bootinfo_size; msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size); if (err_is_fail(msgerr)) { err = msgerr; } if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed in get_bootinfo"); return err; } err = vspace_map_one_frame((void**)bootinfo, bootinfo_size, bootinfo_frame, NULL, NULL); assert(err_is_ok(err)); return err; }
/* allocate inrq */ static errval_t arm_allocirq(struct capref ep, uint32_t irq) { errval_t err, msgerr; struct monitor_blocking_rpc_client *r = get_monitor_blocking_rpc_client(); err = r->vtbl.arm_irq_handle(r, ep, irq, &msgerr); if (err_is_fail(err)){ return err; } else { return msgerr; } }
/* Allocate vector from local monitor */ static errval_t allocirq(struct capref ep, uint32_t *retvector) { errval_t err, msgerr; uint32_t vector; struct monitor_blocking_rpc_client *r = get_monitor_blocking_rpc_client(); err = r->vtbl.irq_handle(r, ep, &msgerr, &vector); if (err_is_fail(err)){ return err; } else if (err_is_fail(msgerr)) { return msgerr; } else { *retvector = vector; return msgerr; } }
int start_aps_x86_32_start(uint8_t core_id, genvaddr_t entry) { DEBUG("%s:%d: start_aps_x86_32_start\n", __FILE__, __LINE__); // Copy the startup code to the real-mode address uint8_t *real_src = (uint8_t *) &x86_32_start_ap; uint8_t *real_end = (uint8_t *) &x86_32_start_ap_end; struct capref bootcap; struct acpi_rpc_client* acl = get_acpi_rpc_client(); errval_t error_code; errval_t err = acl->vtbl.mm_realloc_range_proxy(acl, 16, 0x0, &bootcap, &error_code); if (err_is_fail(err)) { USER_PANIC_ERR(err, "mm_alloc_range_proxy failed."); } if (err_is_fail(error_code)) { USER_PANIC_ERR(error_code, "mm_alloc_range_proxy return failed."); } void* real_base; err = vspace_map_one_frame(&real_base, 1<<16, bootcap, NULL, NULL); uint8_t* real_dest = (uint8_t*)real_base + X86_32_REAL_MODE_LINEAR_OFFSET; memcpy(real_dest, real_src, real_end - real_src); /* Pointer to the entry point called from init_ap.S */ volatile uint64_t *absolute_entry_ptr = (volatile uint64_t *) (( (lpaddr_t) &x86_32_init_ap_absolute_entry - (lpaddr_t) &x86_32_start_ap ) + real_dest); //copy the address of the function start (in boot.S) to the long-mode //assembler code to be able to perform an absolute jump *absolute_entry_ptr = entry; // pointer to the shared global variable amongst all kernels volatile uint64_t *ap_global = (volatile uint64_t *) (( (lpaddr_t) &x86_32_init_ap_global - (lpaddr_t) &x86_32_start_ap ) + real_dest); genpaddr_t global; struct monitor_blocking_rpc_client *mc = get_monitor_blocking_rpc_client(); err = mc->vtbl.get_global_paddr(mc, &global); if (err_is_fail(err)) { DEBUG_ERR(err, "invoke spawn core"); return err_push(err, MON_ERR_SPAWN_CORE); } *ap_global = (uint64_t)(genpaddr_t)global; // pointer to the pseudo-lock used to detect boot up of new core volatile uint32_t *ap_wait = (volatile uint32_t *) ((lpaddr_t) &x86_32_init_ap_wait - ((lpaddr_t) &x86_32_start_ap) + real_dest); // Pointer to the lock variable in the realmode code volatile uint8_t *ap_lock = (volatile uint8_t *) ((lpaddr_t) &x86_32_init_ap_lock - ((lpaddr_t) &x86_32_start_ap) + real_dest); *ap_wait = AP_STARTING_UP; end = bench_tsc(); err = invoke_send_init_ipi(ipi_cap, core_id); if (err_is_fail(err)) { DEBUG_ERR(err, "invoke send init ipi"); return err; } err = invoke_send_start_ipi(ipi_cap, core_id, entry); if (err_is_fail(err)) { DEBUG_ERR(err, "invoke sipi"); return err; } //give the new core a bit time to start-up and set the lock for (uint64_t i = 0; i < STARTUP_TIMEOUT; i++) { if (*ap_lock != 0) { break; } } // If the lock is set, the core has been started, otherwise assume, that // a core with this APIC ID doesn't exist. if (*ap_lock != 0) { while (*ap_wait != AP_STARTED); trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_CORE_START_REQUEST_ACK, core_id); *ap_lock = 0; return 0; } assert(!"badness"); return -1; }
static errval_t spawn(char *path, char *const argv[], char *argbuf, size_t argbytes, char *const envp[], struct capref inheritcn_cap, struct capref argcn_cap, domainid_t *domainid) { errval_t err, msgerr; /* read file into memory */ vfs_handle_t fh; err = vfs_open(path, &fh); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_LOAD); } struct vfs_fileinfo info; err = vfs_stat(fh, &info); if (err_is_fail(err)) { vfs_close(fh); return err_push(err, SPAWN_ERR_LOAD); } assert(info.type == VFS_FILE); uint8_t *image = malloc(info.size); if (image == NULL) { vfs_close(fh); return err_push(err, SPAWN_ERR_LOAD); } size_t pos = 0, readlen; do { err = vfs_read(fh, &image[pos], info.size - pos, &readlen); if (err_is_fail(err)) { vfs_close(fh); free(image); return err_push(err, SPAWN_ERR_LOAD); } else if (readlen == 0) { vfs_close(fh); free(image); return SPAWN_ERR_LOAD; // XXX } else { pos += readlen; } } while (err_is_ok(err) && readlen > 0 && pos < info.size); err = vfs_close(fh); if (err_is_fail(err)) { DEBUG_ERR(err, "failed to close file %s", path); } // find short name (last part of path) char *name = strrchr(path, VFS_PATH_SEP); if (name == NULL) { name = path; } else { name++; } /* spawn the image */ struct spawninfo si; err = spawn_load_image(&si, (lvaddr_t)image, info.size, CURRENT_CPU_TYPE, name, my_core_id, argv, envp, inheritcn_cap, argcn_cap); if (err_is_fail(err)) { free(image); return err; } free(image); /* request connection from monitor */ struct monitor_blocking_rpc_client *mrpc = get_monitor_blocking_rpc_client(); struct capref monep; err = mrpc->vtbl.alloc_monitor_ep(mrpc, &msgerr, &monep); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } else if (err_is_fail(msgerr)) { return msgerr; } /* copy connection into the new domain */ struct capref destep = { .cnode = si.rootcn, .slot = ROOTCN_SLOT_MONITOREP, }; err = cap_copy(destep, monep); if (err_is_fail(err)) { spawn_free(&si); cap_destroy(monep); return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } err = cap_destroy(monep); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MONITOR_CLIENT); } debug_printf("spawning %s on core %u\n", path, my_core_id); /* give the perfmon capability */ struct capref dest, src; dest.cnode = si.taskcn; dest.slot = TASKCN_SLOT_PERF_MON; src.cnode = cnode_task; src.slot = TASKCN_SLOT_PERF_MON; err = cap_copy(dest, src); if (err_is_fail(err)) { return err_push(err, INIT_ERR_COPY_PERF_MON); } /* run the domain */ err = spawn_run(&si); if (err_is_fail(err)) { spawn_free(&si); return err_push(err, SPAWN_ERR_RUN); } // Allocate domain id struct ps_entry *pe = malloc(sizeof(struct ps_entry)); assert(pe != NULL); memset(pe, 0, sizeof(struct ps_entry)); memcpy(pe->argv, argv, MAX_CMDLINE_ARGS*sizeof(*argv)); pe->argbuf = argbuf; pe->argbytes = argbytes; /* * NB: It's important to keep a copy of the DCB *and* the root * CNode around. We need to revoke both (in the right order, see * kill_domain() below), so that we ensure no one else is * referring to the domain's CSpace anymore. Especially the loop * created by placing rootcn into its own address space becomes a * problem here. */ err = slot_alloc(&pe->rootcn_cap); assert(err_is_ok(err)); err = cap_copy(pe->rootcn_cap, si.rootcn_cap); pe->rootcn = si.rootcn; assert(err_is_ok(err)); err = slot_alloc(&pe->dcb); assert(err_is_ok(err)); err = cap_copy(pe->dcb, si.dcb); assert(err_is_ok(err)); pe->status = PS_STATUS_RUNNING; err = ps_allocate(pe, domainid); if(err_is_fail(err)) { free(pe); } // Store in target dispatcher frame struct dispatcher_generic *dg = get_dispatcher_generic(si.handle); dg->domain_id = *domainid; /* cleanup */ err = spawn_free(&si); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_FREE); } return SYS_ERR_OK; } static void retry_use_local_memserv_response(void *a) { errval_t err; struct spawn_binding *b = (struct spawn_binding*)a; err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT); if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { // try again err = b->register_send(b, get_default_waitset(), MKCONT(retry_use_local_memserv_response,a)); } if (err_is_fail(err)) { DEBUG_ERR(err, "error sending use_local_memserv reply\n"); } }
static errval_t init_allocators(void) { errval_t err, msgerr; struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client(); assert(cl != NULL); // Get the bootinfo and map it in. struct capref bootinfo_frame; size_t bootinfo_size; struct bootinfo *bootinfo; msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size); if (err_is_fail(msgerr) || err_is_fail(err)) { USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo"); } err = vspace_map_one_frame((void**)&bootinfo, bootinfo_size, bootinfo_frame, NULL, NULL); assert(err_is_ok(err)); /* Initialize the memory allocator to handle PhysAddr caps */ static struct range_slot_allocator devframes_allocator; err = range_slot_alloc_init(&devframes_allocator, PCI_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC_INIT); } err = mm_init(&pci_mm_physaddr, ObjType_DevFrame, 0, 48, /* This next parameter is important. It specifies the maximum * amount that a cap may be "chunked" (i.e. broken up) at each * level in the allocator. Setting it higher than 1 reduces the * memory overhead of keeping all the intermediate caps around, * but leads to problems if you chunk up a cap too small to be * able to allocate a large subregion. This caused problems * for me with a large framebuffer... -AB 20110810 */ 1, /*was DEFAULT_CNODE_BITS,*/ slab_default_refill, slot_alloc_dynamic, &devframes_allocator, false); if (err_is_fail(err)) { return err_push(err, MM_ERR_MM_INIT); } // Request I/O Cap struct capref requested_caps; errval_t error_code; err = cl->vtbl.get_io_cap(cl, &requested_caps, &error_code); assert(err_is_ok(err) && err_is_ok(error_code)); // Copy into correct slot struct capref caps_io = { .cnode = cnode_task, .slot = TASKCN_SLOT_IO }; err = cap_copy(caps_io, requested_caps); // XXX: The code below is confused about gen/l/paddrs. // Caps should be managed in genpaddr, while the bus mgmt must be in lpaddr. err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code); assert(err_is_ok(err) && err_is_ok(error_code)); physical_caps = requested_caps; // Build the capref for the first physical address capability struct capref phys_cap; phys_cap.cnode = build_cnoderef(requested_caps, PHYSADDRCN_BITS); phys_cap.slot = 0; struct cnoderef devcnode; err = slot_alloc(&my_devframes_cnode); assert(err_is_ok(err)); cslot_t slots; err = cnode_create(&my_devframes_cnode, &devcnode, 255, &slots); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cnode create"); } struct capref devframe; devframe.cnode = devcnode; devframe.slot = 0; for (int i = 0; i < bootinfo->regions_length; i++) { struct mem_region *mrp = &bootinfo->regions[i]; if (mrp->mr_type == RegionType_Module) { skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).", mrp->mr_base, 0, mrp->mrmod_size, mrp->mr_type, mrp->mrmod_data); } else { skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).", mrp->mr_base, mrp->mr_bits, ((size_t)1) << mrp->mr_bits, mrp->mr_type, mrp->mrmod_data); } if (mrp->mr_type == RegionType_PhyAddr || mrp->mr_type == RegionType_PlatformData) { ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" %s\n", i, mrp->mr_base, mrp->mr_base + (((size_t)1)<<mrp->mr_bits), mrp->mr_type == RegionType_PhyAddr ? "physical address" : "platform data"); err = cap_retype(devframe, phys_cap, ObjType_DevFrame, mrp->mr_bits); if (err_no(err) == SYS_ERR_REVOKE_FIRST) { printf("cannot retype region %d: need to revoke first; ignoring it\n", i); } else { assert(err_is_ok(err)); err = mm_add(&pci_mm_physaddr, devframe, mrp->mr_bits, mrp->mr_base); if (err_is_fail(err)) { USER_PANIC_ERR(err, "adding region %d FAILED\n", i); } } phys_cap.slot++; devframe.slot++; } } return SYS_ERR_OK; }