static int gnttab_setup(void) { int rc; unsigned int max_nr_gframes; max_nr_gframes = gnttab_max_grant_frames(); if (max_nr_gframes < nr_grant_frames) return -ENOSYS; if (xen_pv_domain() && xen_feature(XENFEAT_auto_translated_physmap) && !gnttab_shared.addr) { rc = xlated_setup_gnttab_pages((unsigned long)max_nr_gframes, &gnttab_shared.addr); if (rc != 0) return rc; } if (xen_pv_domain()) return gnttab_map(0, nr_grant_frames - 1); if (gnttab_shared.addr == NULL) { gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes); if (gnttab_shared.addr == NULL) { printk(KERN_WARNING "Failed to ioremap gnttab share frames!"); return -ENOMEM; } } gnttab_map(0, nr_grant_frames - 1); return 0; }
static void xen_hvm_disable_emulated_devices(void) { u_short disable_devs = 0; if (xen_pv_domain()) { /* * No emulated devices in the PV case, so no need to unplug * anything. */ if (xen_disable_pv_disks != 0 || xen_disable_pv_nics != 0) printf("PV devices cannot be disabled in PV guests\n"); return; } if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC) return; if (xen_disable_pv_disks == 0) { if (bootverbose) printf("XEN: disabling emulated disks\n"); disable_devs |= XMI_UNPLUG_IDE_DISKS; } if (xen_disable_pv_nics == 0) { if (bootverbose) printf("XEN: disabling emulated nics\n"); disable_devs |= XMI_UNPLUG_NICS; } if (disable_devs != 0) outw(XEN_MAGIC_IOPORT, disable_devs); }
static void xen_hvm_init_shared_info_page(void) { struct xen_add_to_physmap xatp; if (xen_pv_domain()) { /* * Already setup in the PV case, shared_info is passed inside * of the start_info struct at start of day. */ return; } if (HYPERVISOR_shared_info == NULL) { HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT); if (HYPERVISOR_shared_info == NULL) panic("Unable to allocate Xen shared info page"); } xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) panic("HYPERVISOR_memory_op failed"); }
static void __init xen_hvm_guest_init(void) { if (xen_pv_domain()) return; init_hvm_pv_info(); xen_hvm_init_shared_info(); xen_panic_handler_init(); if (xen_feature(XENFEAT_hvm_callback_vector)) xen_have_vector_callback = 1; xen_hvm_smp_init(); WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm)); xen_unplug_emulated_devices(); x86_init.irqs.intr_init = xen_init_IRQ; xen_hvm_init_time_ops(); xen_hvm_init_mmu_ops(); if (xen_pvh_domain()) machine_ops.emergency_restart = xen_emergency_restart; #ifdef CONFIG_KEXEC_CORE machine_ops.shutdown = xen_hvm_shutdown; machine_ops.crash_shutdown = xen_hvm_crash_shutdown; #endif }
/* * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary * * This returns non-zero if we are forced to use xen_swiotlb (by the boot * option). */ int __init pci_xen_swiotlb_detect(void) { if (!xen_pv_domain()) return 0; /* If running as PV guest, either iommu=soft, or swiotlb=force will * activate this IOMMU. If running as PV privileged, activate it * irregardless. */ if ((xen_initial_domain() || swiotlb || swiotlb_force)) xen_swiotlb = 1; /* If we are running under Xen, we MUST disable the native SWIOTLB. * Don't worry about swiotlb_force flag activating the native, as * the 'swiotlb' flag is the only one turning it on. */ swiotlb = 0; #ifdef CONFIG_X86_64 /* pci_swiotlb_detect_4gb turns on native SWIOTLB if no_iommu == 0 * (so no iommu=X command line over-writes). * Considering that PV guests do not want the *native SWIOTLB* but * only Xen SWIOTLB it is not useful to us so set no_iommu=1 here. */ if (max_pfn > MAX_DMA32_PFN) no_iommu = 1; #endif return xen_swiotlb; }
int gnttab_resume(void) { unsigned int max_nr_gframes; max_nr_gframes = gnttab_max_grant_frames(); if (max_nr_gframes < nr_grant_frames) return -ENOSYS; if (xen_pv_domain()) return gnttab_map(0, nr_grant_frames - 1); if (!shared) { shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { #ifdef CONFIG_DEBUG_PRINTK printk(KERN_WARNING "Failed to ioremap gnttab share frames!"); #else ; #endif return -ENOMEM; } } gnttab_map(0, nr_grant_frames - 1); return 0; }
bool xen_has_pv_devices(void) { if (!xen_domain()) return false; /* PV domains always have them. */ if (xen_pv_domain()) return true; /* And user has xen_platform_pci=0 set in guest config as * driver did not modify the value. */ if (xen_platform_pci_unplug == 0) return false; if (xen_platform_pci_unplug & XEN_UNPLUG_NEVER) return false; if (xen_platform_pci_unplug & XEN_UNPLUG_ALL) return true; /* This is an odd one - we are going to run legacy * and PV drivers at the same time. */ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) return true; /* And the caller has to follow with xen_pv_{disk,nic}_devices * to be certain which driver can load. */ return false; }
static int setup_cpu_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int cpu; static struct xenbus_watch cpu_watch = { .node = "cpu", .callback = handle_vcpu_hotplug_event}; (void)register_xenbus_watch(&cpu_watch); for_each_possible_cpu(cpu) { if (vcpu_online(cpu) == 0) { (void)cpu_down(cpu); set_cpu_present(cpu, false); } } return NOTIFY_DONE; } static int __init setup_vcpu_hotplug_event(void) { static struct notifier_block xsn_cpu = { .notifier_call = setup_cpu_watcher }; if (!xen_pv_domain()) return -ENODEV; register_xenstore_notifier(&xsn_cpu); return 0; } arch_initcall(setup_vcpu_hotplug_event);
int gnttab_resume(void) { unsigned int max_nr_gframes; gnttab_request_version(); max_nr_gframes = gnttab_max_grant_frames(); if (max_nr_gframes < nr_grant_frames) return -ENOSYS; if (xen_pv_domain()) return gnttab_map(0, nr_grant_frames - 1); if (gnttab_shared.addr == NULL) { gnttab_shared.addr = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes); if (gnttab_shared.addr == NULL) { printk(KERN_WARNING "Failed to ioremap gnttab share frames!"); return -ENOMEM; } } gnttab_map(0, nr_grant_frames - 1); return 0; }
static uint32_t __init xen_platform_hvm(void) { if (xen_pv_domain() || xen_nopv) return 0; return xen_cpuid_base(); }
static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity) { int rc, irq; struct physdev_setup_gsi setup_gsi; if (!xen_pv_domain()) return -1; printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", gsi, triggering, polarity); irq = xen_register_pirq(gsi, gsi_override, triggering, true); setup_gsi.gsi = gsi; setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); if (rc == -EEXIST) printk(KERN_INFO "Already setup the GSI :%d\n", gsi); else if (rc) { printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", gsi, rc); } return irq; }
static int xen_apic_probe_pv(void) { if (xen_pv_domain()) return 1; return 0; }
void xen_arch_post_suspend(int cancelled) { if (xen_pv_domain()) xen_pv_post_suspend(cancelled); else xen_hvm_post_suspend(cancelled); }
static int __init xen_hvc_init(void) { struct hvc_struct *hp; struct hv_ops *ops; if (!xen_pv_domain()) return -ENODEV; if (xen_initial_domain()) { ops = &dom0_hvc_ops; xencons_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); } else { if (!xen_start_info->console.domU.evtchn) return -ENODEV; ops = &domU_hvc_ops; xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); } if (xencons_irq < 0) xencons_irq = 0; /* NO_IRQ */ else irq_set_noprobe(xencons_irq); hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); if (IS_ERR(hp)) return PTR_ERR(hp); hvc = hp; console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn); return 0; }
int arch_gnttab_init(unsigned long nr_shared) { if (!xen_pv_domain()) return 0; return arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared); }
static int setup_cpu_watcher(struct notifier_block *notifier, unsigned long event, void *data) { static struct xenbus_watch cpu_watch = { .node = "cpu", .callback = handle_vcpu_hotplug_event}; (void)register_xenbus_watch(&cpu_watch); return NOTIFY_DONE; } static int __init setup_vcpu_hotplug_event(void) { static struct notifier_block xsn_cpu = { .notifier_call = setup_cpu_watcher }; if (!xen_pv_domain()) return -ENODEV; register_xenstore_notifier(&xsn_cpu); return 0; } arch_initcall(setup_vcpu_hotplug_event);
static int xen_cons_init(void) { if (!xen_pv_domain()) return 0; hvc_instantiate(HVC_COOKIE, 0, &hvc_ops); return 0; }
static void xen_cpu_die(unsigned int cpu) { while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); xen_uninit_lock_cpu(cpu); if (xen_pv_domain()) xen_teardown_timer(cpu); if (num_online_cpus() == 1) alternatives_smp_switch(0); }
const char __init * acpi_get_sysname(void) { unsigned long rsdp_phys; struct acpi_table_rsdp *rsdp; struct acpi_table_xsdt *xsdt; struct acpi_table_header *hdr; #ifdef CONFIG_INTEL_IOMMU u64 i, nentries; #endif rsdp_phys = acpi_find_rsdp(); if (!rsdp_phys) { printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n"); return "dig"; } rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys); if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) { printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); return "dig"; } xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address); hdr = &xsdt->header; if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) { printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); return "dig"; } if (!strcmp(hdr->oem_id, "HP")) { return "hpzx1"; } else if (!strcmp(hdr->oem_id, "SGI")) { if (!strcmp(hdr->oem_table_id + 4, "UV")) return "uv"; else return "sn2"; } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { return "xen"; } #ifdef CONFIG_INTEL_IOMMU /* Look for Intel IOMMU */ nentries = (hdr->length - sizeof(*hdr)) / sizeof(xsdt->table_offset_entry[0]); for (i = 0; i < nentries; i++) { hdr = __va(xsdt->table_offset_entry[i]); if (strncmp(hdr->signature, ACPI_SIG_DMAR, sizeof(ACPI_SIG_DMAR) - 1) == 0) return "dig_vtd"; } #endif return "dig"; }
static void xc_cnprobe(struct consdev *cp) { if (!xen_pv_domain()) return; cp->cn_pri = CN_REMOTE; sprintf(cp->cn_name, "%s0", driver_name); }
/* * Xen nexus(4) driver. */ static int nexus_xen_probe(device_t dev) { if (!xen_pv_domain()) return (ENXIO); return (BUS_PROBE_SPECIFIC); }
/* * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary * * This returns non-zero if we are forced to use xen_swiotlb (by the boot * option). */ int __init pci_xen_swiotlb_detect(void) { /* If running as PV guest, either iommu=soft, or swiotlb=force will * activate this IOMMU. If running as PV privileged, activate it * irregardlesss. */ if ((xen_initial_domain() || swiotlb || swiotlb_force) && (xen_pv_domain())) xen_swiotlb = 1; /* If we are running under Xen, we MUST disable the native SWIOTLB. * Don't worry about swiotlb_force flag activating the native, as * the 'swiotlb' flag is the only one turning it on. */ if (xen_pv_domain()) swiotlb = 0; return xen_swiotlb; }
static int __init xenkbd_init(void) { if (!xen_pv_domain()) return -ENODEV; /* Nothing to do if running in dom0. */ if (xen_initial_domain()) return -ENODEV; return xenbus_register_frontend(&xenkbd_driver); }
static int __gnttab_init(void) { /* Delay grant-table initialization in the PV on HVM case */ if (xen_hvm_domain()) return 0; if (!xen_pv_domain()) return -ENODEV; return gnttab_init(); }
static int xen_pci_probe(device_t dev) { if (!xen_pv_domain()) return (ENXIO); device_set_desc(dev, "Xen PCI bus"); return (BUS_PROBE_DEFAULT); }
bool xen_hvm_need_lapic(void) { if (xen_nopv) return false; if (xen_pv_domain()) return false; if (!xen_hvm_domain()) return false; if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) return false; return true; }
static void xc_identify(driver_t *driver, device_t parent) { device_t child; if (!xen_pv_domain()) return; child = BUS_ADD_CHILD(parent, 0, driver_name, 0); device_set_driver(child, driver); device_set_desc(child, "Xen Console"); }
/* * This one is odd - it determines whether you want to run PV _and_ * legacy (IDE) drivers together. This combination is only possible * under HVM. */ bool xen_has_pv_and_legacy_disk_devices(void) { if (!xen_domain()) return false; /* N.B. This is only ever used in HVM mode */ if (xen_pv_domain()) return false; if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) return true; return false; }
static int xen_cons_init(void) { struct hv_ops *ops; if (!xen_pv_domain()) return 0; if (xen_initial_domain()) ops = &dom0_hvc_ops; else ops = &domU_hvc_ops; hvc_instantiate(HVC_COOKIE, 0, ops); return 0; }
static void xen_cpu_die(unsigned int cpu) { while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); if (!xen_hvm_domain()) unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); }