void minios_do_halt(int reason) { minios_printk("minios: halting, reason=%d\n", reason); for( ;; ) { struct sched_shutdown sched_shutdown = { .reason = (reason == MINIOS_HALT_POWEROFF) ? SHUTDOWN_poweroff : SHUTDOWN_crash }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } } /* * do_exit: This is called whenever an IRET fails in entry.S. * This will generally be because an application has got itself into * a really bad state (probably a bad CS or SS). It must be killed. * Of course, minimal OS doesn't have applications :-) */ void minios_do_exit(void) { minios_printk("Do_exit called!\n"); stack_walk(); for( ;; ) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } }
void unbind_all_ports(void) { int i; int cpu = 0; shared_info_t *s = HYPERVISOR_shared_info; vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; int rc; for ( i = 0; i < NR_EVS; i++ ) { if ( i == start_info.console.domU.evtchn || i == start_info.store_evtchn) continue; if ( test_and_clear_bit(i, bound_ports) ) { struct evtchn_close close; minios_printk("port %d still bound!\n", i); minios_mask_evtchn(i); close.port = i; rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if ( rc ) minios_printk("WARN: close_port %s failed rc=%d. ignored\n", i, rc); minios_clear_evtchn(i); } } vcpu_info->evtchn_upcall_pending = 0; vcpu_info->evtchn_pending_sel = 0; }
void init_console(void) { minios_printk("Initialising console ... "); xencons_ring_init(); console_initialised = 1; /* This is also required to notify the daemon */ minios_printk("done.\n"); }
void blkfront_shutdown(struct blkfront_dev *dev) { char* err = NULL; XenbusState state; char path[strlen(dev->backend) + 1 + 5 + 1]; char nodename[strlen(dev->nodename) + 1 + 5 + 1]; blkfront_sync(dev); minios_printk("blkfront detached: node=%s\n", dev->nodename); snprintf(path, sizeof(path), "%s/state", dev->backend); snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename); if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing)) != NULL) { minios_printk("shutdown_blkfront: error changing state to %d: %s\n", XenbusStateClosing, err); goto close; } state = xenbus_read_integer(path); while (err == NULL && state < XenbusStateClosing) err = xenbus_wait_for_state_change(path, &state, &dev->events); if (err) free(err); if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed)) != NULL) { minios_printk("shutdown_blkfront: error changing state to %d: %s\n", XenbusStateClosed, err); goto close; } state = xenbus_read_integer(path); while (state < XenbusStateClosed) { err = xenbus_wait_for_state_change(path, &state, &dev->events); if (err) free(err); } if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising)) != NULL) { minios_printk("shutdown_blkfront: error changing state to %d: %s\n", XenbusStateInitialising, err); goto close; } err = NULL; state = xenbus_read_integer(path); while (err == NULL && (state < XenbusStateInitWait || state >= XenbusStateClosed)) err = xenbus_wait_for_state_change(path, &state, &dev->events); close: if (err) free(err); xenbus_unwatch_path_token(XBT_NIL, path, path); snprintf(path, sizeof(path), "%s/ring-ref", nodename); xenbus_rm(XBT_NIL, path); snprintf(path, sizeof(path), "%s/event-channel", nodename); xenbus_rm(XBT_NIL, path); if (!err) free_blkfront(dev); }
void xencons_rx(char *buf, unsigned len, struct pt_regs *regs) { if(len > 0) { /* Just repeat what's written */ buf[len] = '\0'; minios_printk("%s", buf); if(buf[len-1] == '\r') minios_printk("\nNo console input handler.\n"); } }
/* * INITIAL C ENTRY POINT. */ void _minios_start_kernel(start_info_t *si) { bmk_printf_init(minios_putc, NULL); bmk_core_init(STACK_SIZE_PAGE_ORDER, PAGE_SHIFT); arch_init(si); trap_init(); bmk_sched_init(); /* print out some useful information */ minios_printk(" start_info: %p(VA)\n", si); minios_printk(" nr_pages: 0x%lx\n", si->nr_pages); minios_printk(" shared_inf: 0x%08lx(MA)\n", si->shared_info); minios_printk(" pt_base: %p(VA)\n", (void *)si->pt_base); minios_printk("nr_pt_frames: 0x%lx\n", si->nr_pt_frames); minios_printk(" mfn_list: %p(VA)\n", (void *)si->mfn_list); minios_printk(" mod_start: 0x%lx(VA)\n", si->mod_start); minios_printk(" mod_len: %lu\n", si->mod_len); minios_printk(" flags: 0x%x\n", (unsigned int)si->flags); minios_printk(" cmd_line: %s\n", si->cmd_line ? (const char *)si->cmd_line : "NULL"); /* Set up events. */ init_events(); /* ENABLE EVENT DELIVERY. This is disabled at start of day. */ __sti(); arch_print_info(); setup_xen_features(); /* Init memory management. */ init_mm(); /* Init time and timers. */ init_time(); /* Init the console driver. */ init_console(); /* Init grant tables */ init_gnttab(); /* Init XenBus */ init_xenbus(); /* Init scheduler. */ bmk_sched_startmain(_app_main, &start_info); bmk_platform_halt("unreachable"); }
struct consfront_dev *xencons_ring_init(void) { int err; struct consfront_dev *dev; if (!start_info.console.domU.evtchn) return 0; dev = bmk_memcalloc(1, sizeof(struct consfront_dev)); dev->nodename = "device/console"; dev->dom = 0; dev->backend = 0; dev->ring_ref = 0; dev->evtchn = start_info.console.domU.evtchn; dev->ring = (struct xencons_interface *) mfn_to_virt(start_info.console.domU.mfn); err = minios_bind_evtchn(dev->evtchn, console_handle_input, dev); if (err <= 0) { minios_printk("XEN console request chn bind failed %i\n", err); bmk_memfree(dev); return NULL; } minios_unmask_evtchn(dev->evtchn); /* In case we have in-flight data after save/restore... */ notify_daemon(dev); return dev; }
void bmk_platform_halt(const char *panicstring) { if (panicstring) minios_printk("PANIC: %s\n", panicstring); minios_stop_kernel(); minios_do_halt(MINIOS_HALT_POWEROFF); }
void xenbus_wait_for_watch(struct xenbus_event_queue *queue) { char **ret; if (!queue) queue = &xenbus_default_watch_queue; ret = xenbus_wait_for_watch_return(queue); if (ret) bmk_memfree(ret, BMK_MEMWHO_WIREDBMK); else minios_printk("unexpected path returned by watch\n"); }
void minios_unbind_evtchn(evtchn_port_t port ) { struct evtchn_close close; int rc; if ( ev_actions[port].handler == default_handler ) minios_printk("WARN: No handler for port %d when unbinding\n", port); minios_mask_evtchn(port); minios_clear_evtchn(port); ev_actions[port].handler = default_handler; wmb(); ev_actions[port].data = NULL; clear_bit(port, bound_ports); close.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if ( rc ) minios_printk("WARN: close_port %s failed rc=%d. ignored\n", port, rc); }
static shared_info_t *map_shared_info(unsigned long pa) { int rc; if ( (rc = HYPERVISOR_update_va_mapping( (unsigned long)_minios_shared_info, __pte(pa | 7), UVMF_INVLPG)) ) { minios_printk("Failed to map shared_info!! rc=%d\n", rc); minios_do_exit(); } return (shared_info_t *)_minios_shared_info; }
evtchn_port_t minios_bind_evtchn(evtchn_port_t port, evtchn_handler_t handler, void *data) { if ( ev_actions[port].handler != default_handler ) minios_printk("WARN: Handler for port %d already registered, replacing\n", port); ev_actions[port].data = data; wmb(); ev_actions[port].handler = handler; set_bit(port, bound_ports); return port; }
evtchn_port_t minios_bind_virq(uint32_t virq, evtchn_handler_t handler, void *data) { evtchn_bind_virq_t op; int rc; /* Try to bind the virq to a port */ op.virq = virq; op.vcpu = smp_processor_id(); if ( (rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &op)) != 0 ) { minios_printk("Failed to bind virtual IRQ %d with rc=%d\n", virq, rc); return -1; } minios_bind_evtchn(op.port, handler, data); return op.port; }
evtchn_port_t minios_bind_pirq(uint32_t pirq, int will_share, evtchn_handler_t handler, void *data) { evtchn_bind_pirq_t op; int rc; /* Try to bind the pirq to a port */ op.pirq = pirq; op.flags = will_share ? BIND_PIRQ__WILL_SHARE : 0; if ( (rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &op)) != 0 ) { minios_printk("Failed to bind physical IRQ %d with rc=%d\n", pirq, rc); return -1; } minios_bind_evtchn(op.port, handler, data); return op.port; }
/* * Demux events to different handlers. */ int do_event(evtchn_port_t port, struct pt_regs *regs) { ev_action_t *action; minios_clear_evtchn(port); if ( port >= NR_EVS ) { minios_printk("WARN: do_event(): Port number too large: %d\n", port); return 1; } action = &ev_actions[port]; action->count++; /* call the handler */ action->handler(port, regs, action->data); return 1; }
/* * Make pt_pfn a new 'level' page table frame and hook it into the page * table at offset in previous level MFN (pref_l_mfn). pt_pfn is a guest * PFN. */ static void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn, unsigned long offset, unsigned long level) { pgentry_t *tab = (pgentry_t *)start_info.pt_base; unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn); pgentry_t prot_e, prot_t; mmu_update_t mmu_updates[1]; int rc; prot_e = prot_t = 0; DEBUG("Allocating new L%d pt frame for pfn=%lx, " "prev_l_mfn=%lx, offset=%lx", level, *pt_pfn, prev_l_mfn, offset); /* We need to clear the page, otherwise we might fail to map it as a page table page */ bmk_memset((void*) pt_page, 0, PAGE_SIZE); switch ( level ) { case L1_FRAME: prot_e = L1_PROT; prot_t = L2_PROT; break; case L2_FRAME: prot_e = L2_PROT; prot_t = L3_PROT; break; #if defined(__x86_64__) case L3_FRAME: prot_e = L3_PROT; prot_t = L4_PROT; break; #endif default: minios_printk("new_pt_frame() called with invalid level number %d\n", level); minios_do_exit(); break; } /* Make PFN a page table page */ #if defined(__x86_64__) tab = pte_to_virt(tab[l4_table_offset(pt_page)]); #endif tab = pte_to_virt(tab[l3_table_offset(pt_page)]); mmu_updates[0].ptr = (tab[l2_table_offset(pt_page)] & PAGE_MASK) + sizeof(pgentry_t) * l1_table_offset(pt_page); mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | (prot_e & ~_PAGE_RW); if ( (rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF)) < 0 ) { minios_printk("ERROR: PTE for new page table page could not be updated\n"); minios_printk(" mmu_update failed with rc=%d\n", rc); minios_do_exit(); } /* Hook the new page table page into the hierarchy */ mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset; mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t; if ( (rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF)) < 0 ) { minios_printk("ERROR: mmu_update failed with rc=%d\n", rc); minios_do_exit(); } *pt_pfn += 1; }
void arch_print_info(void) { minios_printk(" stack: %p-%p\n", _minios_stack, _minios_stack + sizeof(_minios_stack)); }
struct blkfront_dev *blkfront_init(char *_nodename, struct blkfront_info *info) { xenbus_transaction_t xbt; char* err; char* message=NULL; struct blkif_sring *s; int retry=0; char* msg = NULL; char* c; char* nodename = _nodename ? _nodename : "device/vbd/768"; struct blkfront_dev *dev; char path[strlen(nodename) + 1 + 10 + 1]; dev = malloc(sizeof(*dev)); memset(dev, 0, sizeof(*dev)); dev->nodename = strdup(nodename); snprintf(path, sizeof(path), "%s/backend-id", nodename); dev->dom = xenbus_read_integer(path); minios_evtchn_alloc_unbound(dev->dom, blkfront_handler, dev, &dev->evtchn); s = (struct blkif_sring*) minios_alloc_page(); memset(s,0,PAGE_SIZE); SHARED_RING_INIT(s); FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE); dev->ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(s),0); xenbus_event_queue_init(&dev->events); again: err = xenbus_transaction_start(&xbt); if (err) { minios_printk("starting transaction\n"); free(err); } err = xenbus_printf(xbt, nodename, "ring-ref","%u", dev->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } snprintf(path, sizeof(path), "%s/state", nodename); err = xenbus_switch_state(xbt, path, XenbusStateConnected); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); if (err) free(err); if (retry) { goto again; minios_printk("completing transaction\n"); } goto done; abort_transaction: free(err); err = xenbus_transaction_end(xbt, 1, &retry); minios_printk("Abort transaction %s\n", message); goto error; done: snprintf(path, sizeof(path), "%s/backend", nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); if (msg) { minios_printk("Error %s when reading the backend path %s\n", msg, path); goto error; } minios_printk("blkfront: node=%s backend=%s\n", nodename, dev->backend); dev->handle = strtoul(strrchr(nodename, '/')+1, NULL, 10); { XenbusState state; char path[strlen(dev->backend) + 1 + 19 + 1]; snprintf(path, sizeof(path), "%s/mode", dev->backend); msg = xenbus_read(XBT_NIL, path, &c); if (msg) { minios_printk("Error %s when reading the mode\n", msg); goto error; } if (*c == 'w') dev->info.mode = O_RDWR; else dev->info.mode = O_RDONLY; free(c); snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); msg = NULL; state = xenbus_read_integer(path); while (msg == NULL && state < XenbusStateConnected) msg = xenbus_wait_for_state_change(path, &state, &dev->events); if (msg != NULL || state != XenbusStateConnected) { minios_printk("backend not available, state=%d\n", state); xenbus_unwatch_path_token(XBT_NIL, path, path); goto error; } snprintf(path, sizeof(path), "%s/info", dev->backend); dev->info.info = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/sectors", dev->backend); // FIXME: read_integer returns an int, so disk size limited to 1TB for now dev->info.sectors = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/sector-size", dev->backend); dev->info.sector_size = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/feature-barrier", dev->backend); dev->info.barrier = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/feature-flush-cache", dev->backend); dev->info.flush = xenbus_read_integer(path); *info = dev->info; } minios_unmask_evtchn(dev->evtchn); minios_printk("blkfront: %u sectors\n", dev->info.sectors); return dev; error: free(msg); free(err); free_blkfront(dev); return NULL; }
int blkfront_aio_poll(struct blkfront_dev *dev) { RING_IDX rp, cons; struct blkif_response *rsp; int more; int nr_consumed; moretodo: rp = dev->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ cons = dev->ring.rsp_cons; nr_consumed = 0; while ((cons != rp)) { struct blkfront_aiocb *aiocbp; int status; rsp = RING_GET_RESPONSE(&dev->ring, cons); nr_consumed++; aiocbp = (void*) (uintptr_t) rsp->id; status = rsp->status; if (status != BLKIF_RSP_OKAY) minios_printk("block error %d for op %d\n", status, rsp->operation); switch (rsp->operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: { int j; for (j = 0; j < aiocbp->n; j++) gnttab_end_access(aiocbp->gref[j]); break; } case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_FLUSH_DISKCACHE: break; default: minios_printk("unrecognized block operation %d response\n", rsp->operation); } dev->ring.rsp_cons = ++cons; /* Nota: callback frees aiocbp itself */ if (aiocbp && aiocbp->aio_cb) aiocbp->aio_cb(aiocbp, status ? -EIO : 0); if (dev->ring.rsp_cons != cons) /* We reentered, we must not continue here */ break; } RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more); if (more) goto moretodo; return nr_consumed; }
static void xenbus_thread_func(void *ign) { struct xsd_sockmsg msg; unsigned prod = xenstore_buf->rsp_prod; for (;;) { minios_wait_event(xb_waitq, prod != xenstore_buf->rsp_prod); while (1) { prod = xenstore_buf->rsp_prod; DEBUG("Rsp_cons %d, rsp_prod %d.\n", xenstore_buf->rsp_cons, xenstore_buf->rsp_prod); if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg)) { minios_notify_remote_via_evtchn(start_info.store_evtchn); break; } rmb(); memcpy_from_ring(xenstore_buf->rsp, &msg, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), sizeof(msg)); DEBUG("Msg len %d, %d avail, id %d.\n", msg.len + sizeof(msg), xenstore_buf->rsp_prod - xenstore_buf->rsp_cons, msg.req_id); if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg) + msg.len) { minios_notify_remote_via_evtchn(start_info.store_evtchn); break; } DEBUG("Message is good.\n"); if(msg.type == XS_WATCH_EVENT) { struct xenbus_event *event = bmk_xmalloc_bmk(sizeof(*event) + msg.len); struct xenbus_event_queue *events = NULL; char *data = (char*)event + sizeof(*event); struct xenbus_watch *watch; memcpy_from_ring(xenstore_buf->rsp, data, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons + sizeof(msg)), msg.len); event->path = data; event->token = event->path + bmk_strlen(event->path) + 1; mb(); xenstore_buf->rsp_cons += msg.len + sizeof(msg); spin_lock(&xenbus_req_lock); MINIOS_LIST_FOREACH(watch, &watches, entry) if (!bmk_strcmp(watch->token, event->token)) { event->watch = watch; events = watch->events; break; } if (events) { queue_event(events, event); } else { minios_printk("unexpected watch token %s\n", event->token); bmk_memfree(event, BMK_MEMWHO_WIREDBMK); } spin_unlock(&xenbus_req_lock); } else { req_info[msg.req_id].for_queue->reply = bmk_xmalloc_bmk(sizeof(msg) + msg.len); memcpy_from_ring(xenstore_buf->rsp, req_info[msg.req_id].for_queue->reply, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), msg.len + sizeof(msg)); mb(); xenstore_buf->rsp_cons += msg.len + sizeof(msg); spin_lock(&xenbus_req_lock); queue_event(req_info[msg.req_id].reply_queue, req_info[msg.req_id].for_queue); spin_unlock(&xenbus_req_lock); } wmb(); minios_notify_remote_via_evtchn(start_info.store_evtchn); } }