static bool xencons_resume(device_t dev, const pmf_qual_t *qual) { int evtch = -1; if (xendomain_is_dom0()) { /* dom0 console resume is required only during first start-up */ if (cold) { evtch = bind_virq_to_evtch(VIRQ_CONSOLE); event_set_handler(evtch, xencons_intr, xencons_console_device, IPL_TTY, "xencons"); } } else { evtch = xen_start_info.console_evtchn; event_set_handler(evtch, xencons_handler, xencons_console_device, IPL_TTY, "xencons"); } if (evtch != -1) { aprint_verbose_dev(dev, "using event channel %d\n", evtch); hypervisor_enable_event(evtch); } return true; }
void xenconscn_putc(dev_t dev, int c) { int s = spltty(); XENCONS_RING_IDX cons, prod; if (xendomain_is_dom0()) { u_char buf[1]; buf[0] = c; (void)HYPERVISOR_console_io(CONSOLEIO_write, 1, buf); } else { XENPRINTK(("xenconscn_putc(%c)\n", c)); cons = xencons_interface->out_cons; prod = xencons_interface->out_prod; xen_rmb(); while (prod == cons + sizeof(xencons_interface->out)) { cons = xencons_interface->out_cons; prod = xencons_interface->out_prod; xen_rmb(); } xencons_interface->out[MASK_XENCONS_IDX(xencons_interface->out_prod, xencons_interface->out)] = c; xen_rmb(); xencons_interface->out_prod++; xen_rmb(); hypervisor_notify_via_evtchn(xen_start_info.console.domU.evtchn); splx(s); } }
static int wake_waiting(void *arg) { if (__predict_false(xenstored_ready == 0 && xendomain_is_dom0())) { xenstored_ready = 1; wakeup(&xenstored_ready); } wakeup(&xenstore_interface); return 1; }
int xenconscn_getc(dev_t dev) { char c; int s = spltty(); XENCONS_RING_IDX cons, prod; if (xencons_console_device && xencons_console_device->polling == 0) { printf("xenconscn_getc() but not polling\n"); splx(s); return 0; } if (xendomain_is_dom0()) { while (HYPERVISOR_console_io(CONSOLEIO_read, 1, &c) == 0) ; cn_check_magic(dev, c, xencons_cnm_state); splx(s); return c; } if (xencons_console_device == NULL) { printf("xenconscn_getc(): not console\n"); while (1) ; /* loop here instead of in ddb */ splx(s); return 0; } if (xencons_console_device->polling == 0) { printf("xenconscn_getc() but not polling\n"); splx(s); return 0; } cons = xencons_interface->in_cons; prod = xencons_interface->in_prod; xen_rmb(); while (cons == prod) { HYPERVISOR_yield(); prod = xencons_interface->in_prod; } xen_rmb(); c = xencons_interface->in[MASK_XENCONS_IDX(xencons_interface->in_cons, xencons_interface->in)]; xen_rmb(); xencons_interface->in_cons = cons + 1; cn_check_magic(dev, c, xencons_cnm_state); splx(s); return c; }
static bool xencons_suspend(device_t dev, const pmf_qual_t *qual) { int evtch; /* dom0 console should not be suspended */ if (!xendomain_is_dom0()) { evtch = xen_start_info.console_evtchn; hypervisor_mask_event(evtch); if (event_remove_handler(evtch, xencons_handler, xencons_console_device) != 0) { aprint_error_dev(dev, "can't remove handler: xencons_handler\n"); } aprint_verbose_dev(dev, "removed event channel %d\n", evtch); } return true; }
void xencons_start(struct tty *tp) { struct clist *cl; int s; s = spltty(); if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) goto out; tp->t_state |= TS_BUSY; splx(s); /* * We need to do this outside spl since it could be fairly * expensive and we don't want our serial ports to overflow. */ cl = &tp->t_outq; if (xendomain_is_dom0()) { int len, r; u_char buf[XENCONS_BURST+1]; len = q_to_b(cl, buf, XENCONS_BURST); while (len > 0) { r = HYPERVISOR_console_io(CONSOLEIO_write, len, buf); if (r <= 0) break; len -= r; } } else { XENCONS_RING_IDX cons, prod, len; #define XNC_OUT (xencons_interface->out) cons = xencons_interface->out_cons; prod = xencons_interface->out_prod; xen_rmb(); while (prod != cons + sizeof(xencons_interface->out)) { if (MASK_XENCONS_IDX(prod, XNC_OUT) < MASK_XENCONS_IDX(cons, XNC_OUT)) { len = MASK_XENCONS_IDX(cons, XNC_OUT) - MASK_XENCONS_IDX(prod, XNC_OUT); } else { len = sizeof(XNC_OUT) - MASK_XENCONS_IDX(prod, XNC_OUT); } len = q_to_b(cl, __UNVOLATILE( &XNC_OUT[MASK_XENCONS_IDX(prod, XNC_OUT)]), len); if (len == 0) break; prod = prod + len; } xen_wmb(); xencons_interface->out_prod = prod; xen_wmb(); hypervisor_notify_via_evtchn(xen_start_info.console.domU.evtchn); #undef XNC_OUT } s = spltty(); tp->t_state &= ~TS_BUSY; if (ttypull(tp)) { tp->t_state |= TS_TIMEOUT; callout_schedule(&tp->t_rstrt_ch, 1); } out: splx(s); }
vaddr_t xen_pmap_bootstrap(void) { int count, oldcount; long mapsize; vaddr_t bootstrap_tables, init_tables; memset(xpq_idx_array, 0, sizeof xpq_idx_array); xpmap_phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list; init_tables = xen_start_info.pt_base; __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables)); /* Space after Xen boostrap tables should be free */ bootstrap_tables = xen_start_info.pt_base + (xen_start_info.nr_pt_frames * PAGE_SIZE); /* * Calculate how many space we need * first everything mapped before the Xen bootstrap tables */ mapsize = init_tables - KERNTEXTOFF; /* after the tables we'll have: * - UAREA * - dummy user PGD (x86_64) * - HYPERVISOR_shared_info * - early_zerop * - ISA I/O mem (if needed) */ mapsize += UPAGES * NBPG; #ifdef __x86_64__ mapsize += NBPG; #endif mapsize += NBPG; mapsize += NBPG; #ifdef DOM0OPS if (xendomain_is_dom0()) { /* space for ISA I/O mem */ mapsize += IOM_SIZE; } #endif /* at this point mapsize doens't include the table size */ #ifdef __x86_64__ count = TABLE_L2_ENTRIES; #else count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT; #endif /* __x86_64__ */ /* now compute how many L2 pages we need exactly */ XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count)); while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF > ((long)count << L2_SHIFT) + KERNBASE) { count++; } #ifndef __x86_64__ /* * one more L2 page: we'll alocate several pages after kva_start * in pmap_bootstrap() before pmap_growkernel(), which have not been * counted here. It's not a big issue to allocate one more L2 as * pmap_growkernel() will be called anyway. */ count++; nkptp[1] = count; #endif /* * install bootstrap pages. We may need more L2 pages than will * have the final table here, as it's installed after the final table */ oldcount = count; bootstrap_again: XENPRINTK(("bootstrap_again oldcount %d\n", oldcount)); /* * Xen space we'll reclaim may not be enough for our new page tables, * move bootstrap tables if necessary */ if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE)) bootstrap_tables = init_tables + ((count + l2_4_count) * PAGE_SIZE); /* make sure we have enough to map the bootstrap_tables */ if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > ((long)oldcount << L2_SHIFT) + KERNBASE) { oldcount++; goto bootstrap_again; } /* Create temporary tables */ xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables, xen_start_info.nr_pt_frames, oldcount, 0); /* Create final tables */ xen_bootstrap_tables(bootstrap_tables, init_tables, oldcount + l2_4_count, count, 1); /* zero out free space after tables */ memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0, (UPAGES + 1) * NBPG); /* Finally, flush TLB. */ xpq_queue_tlb_flush(); return (init_tables + ((count + l2_4_count) * PAGE_SIZE)); }
/* * Attach the mainbus. */ void mainbus_attach(device_t parent, device_t self, void *aux) { union mainbus_attach_args mba; #if defined(DOM0OPS) && defined(XEN3) int numcpus = 0; #ifdef MPBIOS int mpbios_present = 0; #endif #if NACPI > 0 || defined(MPBIOS) int numioapics = 0; #endif #endif /* defined(DOM0OPS) && defined(XEN3) */ aprint_naive("\n"); aprint_normal("\n"); #ifndef XEN3 memset(&mba.mba_caa, 0, sizeof(mba.mba_caa)); mba.mba_caa.cpu_number = 0; mba.mba_caa.cpu_role = CPU_ROLE_SP; mba.mba_caa.cpu_func = 0; config_found_ia(self, "cpubus", &mba.mba_caa, mainbus_print); #else /* XEN3 */ #ifdef DOM0OPS if (xendomain_is_dom0()) { #ifdef MPBIOS mpbios_present = mpbios_probe(self); #endif #if NPCI > 0 /* ACPI needs to be able to access PCI configuration space. */ pci_mode = pci_mode_detect(); #ifdef PCI_BUS_FIXUP pci_maxbus = pci_bus_fixup(NULL, 0); aprint_debug_dev(self, "PCI bus max, after pci_bus_fixup: %i\n", pci_maxbus); #ifdef PCI_ADDR_FIXUP pciaddr.extent_port = NULL; pciaddr.extent_mem = NULL; pci_addr_fixup(NULL, pci_maxbus); #endif /* PCI_ADDR_FIXUP */ #endif /* PCI_BUS_FIXUP */ #if NACPI > 0 acpi_present = acpi_probe(); if (acpi_present) mpacpi_active = mpacpi_scan_apics(self, &numcpus, &numioapics); if (!mpacpi_active) #endif { #ifdef MPBIOS if (mpbios_present) mpbios_scan(self, &numcpus, &numioapics); else #endif if (numcpus == 0) { memset(&mba.mba_caa, 0, sizeof(mba.mba_caa)); mba.mba_caa.cpu_number = 0; mba.mba_caa.cpu_role = CPU_ROLE_SP; mba.mba_caa.cpu_func = 0; config_found_ia(self, "cpubus", &mba.mba_caa, mainbus_print); } } #if NIOAPIC > 0 ioapic_enable(); #endif #endif /* NPCI */ } #endif /* DOM0OPS */ #endif /* XEN3 */ #if NIPMI > 0 memset(&mba.mba_ipmi, 0, sizeof(mba.mba_ipmi)); mba.mba_ipmi.iaa_iot = X86_BUS_SPACE_IO; mba.mba_ipmi.iaa_memt = X86_BUS_SPACE_MEM; if (ipmi_probe(&mba.mba_ipmi)) config_found_ia(self, "ipmibus", &mba.mba_ipmi, 0); #endif #if NHYPERVISOR > 0 mba.mba_haa.haa_busname = "hypervisor"; config_found_ia(self, "hypervisorbus", &mba.mba_haa, mainbus_print); #endif }
/* * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page * tables. */ vaddr_t xen_locore(void) { size_t count, oldcount, mapsize; vaddr_t bootstrap_tables, init_tables; xen_init_features(); memset(xpq_idx_array, 0, sizeof(xpq_idx_array)); xpmap_phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list; /* Space after Xen boostrap tables should be free */ init_tables = xen_start_info.pt_base; bootstrap_tables = init_tables + (xen_start_info.nr_pt_frames * PAGE_SIZE); /* * Calculate how much space we need. First, everything mapped before * the Xen bootstrap tables. */ mapsize = init_tables - KERNTEXTOFF; /* after the tables we'll have: * - UAREA * - dummy user PGD (x86_64) * - HYPERVISOR_shared_info * - early_zerop * - ISA I/O mem (if needed) */ mapsize += UPAGES * PAGE_SIZE; #ifdef __x86_64__ mapsize += PAGE_SIZE; #endif mapsize += PAGE_SIZE; mapsize += PAGE_SIZE; #ifdef DOM0OPS if (xendomain_is_dom0()) { mapsize += IOM_SIZE; } #endif /* * At this point, mapsize doesn't include the table size. */ #ifdef __x86_64__ count = TABLE_L2_ENTRIES; #else count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT; #endif /* * Now compute how many L2 pages we need exactly. This is useful only * on i386, since the initial count for amd64 is already enough. */ while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE > KERNBASE + (count << L2_SHIFT)) { count++; } #ifndef __x86_64__ /* * One more L2 page: we'll allocate several pages after kva_start * in pmap_bootstrap() before pmap_growkernel(), which have not been * counted here. It's not a big issue to allocate one more L2 as * pmap_growkernel() will be called anyway. */ count++; nkptp[1] = count; #endif /* * Install bootstrap pages. We may need more L2 pages than will * have the final table here, as it's installed after the final table. */ oldcount = count; bootstrap_again: /* * Xen space we'll reclaim may not be enough for our new page tables, * move bootstrap tables if necessary. */ if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE)) bootstrap_tables = init_tables + ((count + l2_4_count) * PAGE_SIZE); /* * Make sure the number of L2 pages we have is enough to map everything * from KERNBASE to the bootstrap tables themselves. */ if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > KERNBASE + (oldcount << L2_SHIFT)) { oldcount++; goto bootstrap_again; } /* Create temporary tables */ xen_bootstrap_tables(init_tables, bootstrap_tables, xen_start_info.nr_pt_frames, oldcount, false); /* Create final tables */ xen_bootstrap_tables(bootstrap_tables, init_tables, oldcount + l2_4_count, count, true); /* Zero out free space after tables */ memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0, (UPAGES + 1) * PAGE_SIZE); /* Finally, flush TLB. */ xpq_queue_tlb_flush(); return (init_tables + ((count + l2_4_count) * PAGE_SIZE)); }