/** * intel_gvt_init_host - Load MPT modules and detect if we're running in host * @gvt: intel gvt device * * This function is called at the driver loading stage. If failed to find a * loadable MPT module or detect currently we're running in a VM, then GVT-g * will be disabled * * Returns: * Zero on success, negative error code if failed. * */ int intel_gvt_init_host(void) { if (intel_gvt_host.initialized) return 0; /* Xen DOM U */ if (xen_domain() && !xen_initial_domain()) return -ENODEV; /* Try to load MPT modules for hypervisors */ if (xen_initial_domain()) { /* In Xen dom0 */ intel_gvt_host.mpt = try_then_request_module( symbol_get(xengt_mpt), "xengt"); intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN; } else { #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) /* not in Xen. Try KVMGT */ intel_gvt_host.mpt = try_then_request_module( symbol_get(kvmgt_mpt), "kvmgt"); intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; #endif } /* Fail to load MPT modules - bail out */ if (!intel_gvt_host.mpt) return -EINVAL; gvt_dbg_core("Running with hypervisor %s in host mode\n", supported_hypervisors[intel_gvt_host.hypervisor_type]); intel_gvt_host.initialized = true; return 0; }
static int nexus_xen_attach(device_t dev) { int error; device_t acpi_dev = NULL; nexus_init_resources(); bus_generic_probe(dev); if (xen_initial_domain()) { /* Disable some ACPI devices that are not usable by Dom0 */ acpi_cpu_disabled = true; acpi_hpet_disabled = true; acpi_timer_disabled = true; acpi_dev = BUS_ADD_CHILD(dev, 10, "acpi", 0); if (acpi_dev == NULL) panic("Unable to add ACPI bus to Xen Dom0"); } error = bus_generic_attach(dev); if (xen_initial_domain() && (error == 0)) acpi_install_wakeup_handler(device_get_softc(acpi_dev)); return (error); }
static void __init xen_filter_cpu_maps(void) { int i, rc; unsigned int subtract = 0; if (!xen_initial_domain()) return; num_processors = 0; disabled_cpus = 0; for (i = 0; i < nr_cpu_ids; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; set_cpu_possible(i, true); } else { set_cpu_possible(i, false); set_cpu_present(i, false); subtract++; } } #ifdef CONFIG_HOTPLUG_CPU if (subtract) nr_cpu_ids = nr_cpu_ids - subtract; #endif }
static void __init xen_init_cpuid_mask(void) { unsigned int ax, bx, cx, dx; unsigned int xsave_mask; cpuid_leaf1_edx_mask = ~((1 << X86_FEATURE_MCE) | /* disable MCE */ (1 << X86_FEATURE_MCA) | /* disable MCA */ (1 << X86_FEATURE_MTRR) | /* disable MTRR */ (1 << X86_FEATURE_ACC)); /* thermal monitoring */ if (!xen_initial_domain()) cpuid_leaf1_edx_mask &= ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ (1 << X86_FEATURE_ACPI)); /* disable ACPI */ ax = 1; xen_cpuid(&ax, &bx, &cx, &dx); xsave_mask = (1 << (X86_FEATURE_XSAVE % 32)) | (1 << (X86_FEATURE_OSXSAVE % 32)); /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ if ((cx & xsave_mask) != xsave_mask) cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ }
static efi_system_table_t __init *xen_efi_probe(void) { struct xen_platform_op op = { .cmd = XENPF_firmware_info, .u.firmware_info = { .type = XEN_FW_EFI_INFO, .index = XEN_FW_EFI_CONFIG_TABLE } }; union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info; if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0) return NULL; /* Here we know that Xen runs on EFI platform. */ efi.get_time = xen_efi_get_time; efi.set_time = xen_efi_set_time; efi.get_wakeup_time = xen_efi_get_wakeup_time; efi.set_wakeup_time = xen_efi_set_wakeup_time; efi.get_variable = xen_efi_get_variable; efi.get_next_variable = xen_efi_get_next_variable; efi.set_variable = xen_efi_set_variable; efi.query_variable_info = xen_efi_query_variable_info; efi.update_capsule = xen_efi_update_capsule; efi.query_capsule_caps = xen_efi_query_capsule_caps; efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count; efi.reset_system = xen_efi_reset_system; efi_systab_xen.tables = info->cfg.addr; efi_systab_xen.nr_tables = info->cfg.nent; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_VENDOR; info->vendor.bufsz = sizeof(vendor); set_xen_guest_handle(info->vendor.name, vendor); if (HYPERVISOR_platform_op(&op) == 0) { efi_systab_xen.fw_vendor = __pa_symbol(vendor); efi_systab_xen.fw_revision = info->vendor.revision; } else efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN"); op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_VERSION; if (HYPERVISOR_platform_op(&op) == 0) efi_systab_xen.hdr.revision = info->version; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION; if (HYPERVISOR_platform_op(&op) == 0) efi.runtime_version = info->version; return &efi_systab_xen; }
static __init void xen_init_cpuid_mask(void) { unsigned int ax, bx, cx, dx; cpuid_leaf1_edx_mask = ~((1 << X86_FEATURE_MCE) | /* disable MCE */ (1 << X86_FEATURE_MCA) | /* disable MCA */ (1 << X86_FEATURE_ACC)); /* thermal monitoring */ if (!xen_initial_domain()) cpuid_leaf1_edx_mask &= ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ (1 << X86_FEATURE_ACPI)); /* disable ACPI */ ax = 1; cx = 0; xen_cpuid(&ax, &bx, &cx, &dx); /* cpuid claims we support xsave; try enabling it to see what happens */ if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { unsigned long cr4; set_in_cr4(X86_CR4_OSXSAVE); cr4 = read_cr4(); if ((cr4 & X86_CR4_OSXSAVE) == 0) cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); clear_in_cr4(X86_CR4_OSXSAVE); } }
static u32 xen_apic_read(u32 reg) { struct xen_platform_op op = { .cmd = XENPF_get_cpuinfo, .interface_version = XENPF_INTERFACE_VERSION, .u.pcpu_info.xen_cpuid = 0, }; int ret = 0; if (!xen_initial_domain() || smp_processor_id()) return 0; if (reg == APIC_LVR) return 0x10; if (reg != APIC_ID) return 0; ret = HYPERVISOR_dom0_op(&op); if (ret) return 0; return op.u.pcpu_info.apic_id << 24; } static void xen_apic_write(u32 reg, u32 val) { WARN_ON(1); }
static void __init xen_filter_cpu_maps(void) { int i, rc; unsigned int subtract = 0; if (!xen_initial_domain()) return; num_processors = 0; disabled_cpus = 0; for (i = 0; i < nr_cpu_ids; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; set_cpu_possible(i, true); } else { set_cpu_possible(i, false); set_cpu_present(i, false); subtract++; } } #ifdef CONFIG_HOTPLUG_CPU /* This is akin to using 'nr_cpus' on the Linux command line. * Which is OK as when we use 'dom0_max_vcpus=X' we can only * have up to X, while nr_cpu_ids is greater than X. This * normally is not a problem, except when CPU hotplugging * is involved and then there might be more than X CPUs * in the guest - which will not work as there is no * hypercall to expand the max number of VCPUs an already * running guest has. So cap it up to X. */ if (subtract) nr_cpu_ids = nr_cpu_ids - subtract; #endif }
static int xc_attach(device_t dev) { int error; xencons_dev = dev; xccons = tty_alloc(&xc_ttydevsw, NULL); tty_makedev(xccons, NULL, "xc%r", 0); callout_init(&xc_callout, 0); xencons_ring_init(); cnsl_evt_reg = true; callout_reset(&xc_callout, XC_POLLTIME, xc_timeout, xccons); if (xen_initial_domain()) { error = xen_intr_bind_virq(dev, VIRQ_CONSOLE, 0, NULL, xencons_priv_interrupt, NULL, INTR_TYPE_TTY, &xen_intr_handle); KASSERT(error >= 0, ("can't register console interrupt")); } /* register handler to flush console on shutdown */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, xc_shutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("xencons: shutdown event registration failed!\n"); return (0); }
static u32 xen_apic_read(u32 reg) { struct xen_platform_op op = { .cmd = XENPF_get_cpuinfo, .interface_version = XENPF_INTERFACE_VERSION, .u.pcpu_info.xen_cpuid = 0, }; int ret = 0; /* Shouldn't need this as APIC is turned off for PV, and we only * get called on the bootup processor. But just in case. */ if (!xen_initial_domain() || smp_processor_id()) return 0; if (reg == APIC_LVR) return 0x10; if (reg != APIC_ID) return 0; ret = HYPERVISOR_dom0_op(&op); if (ret) return 0; return op.u.pcpu_info.apic_id << 24; } static void xen_apic_write(u32 reg, u32 val) { /* Warn to see if there's any stray references */ WARN_ON(1); }
static unsigned long __init xen_get_max_pages(void) { unsigned long max_pages, limit; domid_t domid = DOMID_SELF; long ret; limit = xen_get_pages_limit(); max_pages = limit; /* * For the initial domain we use the maximum reservation as * the maximum page. * * For guest domains the current maximum reservation reflects * the current maximum rather than the static maximum. In this * case the e820 map provided to us will cover the static * maximum region. */ if (xen_initial_domain()) { ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); if (ret > 0) max_pages = ret; } return min(max_pages, limit); }
/* * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary * * This returns non-zero if we are forced to use xen_swiotlb (by the boot * option). */ int __init pci_xen_swiotlb_detect(void) { if (!xen_pv_domain()) return 0; /* If running as PV guest, either iommu=soft, or swiotlb=force will * activate this IOMMU. If running as PV privileged, activate it * irregardless. */ if ((xen_initial_domain() || swiotlb || swiotlb_force)) xen_swiotlb = 1; /* If we are running under Xen, we MUST disable the native SWIOTLB. * Don't worry about swiotlb_force flag activating the native, as * the 'swiotlb' flag is the only one turning it on. */ swiotlb = 0; #ifdef CONFIG_X86_64 /* pci_swiotlb_detect_4gb turns on native SWIOTLB if no_iommu == 0 * (so no iommu=X command line over-writes). * Considering that PV guests do not want the *native SWIOTLB* but * only Xen SWIOTLB it is not useful to us so set no_iommu=1 here. */ if (max_pfn > MAX_DMA32_PFN) no_iommu = 1; #endif return xen_swiotlb; }
static int __init xen_hvc_init(void) { struct hvc_struct *hp; struct hv_ops *ops; if (!xen_pv_domain()) return -ENODEV; if (xen_initial_domain()) { ops = &dom0_hvc_ops; xencons_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); } else { if (!xen_start_info->console.domU.evtchn) return -ENODEV; ops = &domU_hvc_ops; xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); } if (xencons_irq < 0) xencons_irq = 0; /* NO_IRQ */ else irq_set_noprobe(xencons_irq); hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); if (IS_ERR(hp)) return PTR_ERR(hp); hvc = hp; console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn); return 0; }
static int __init register_xen_amba_notifier(void) { if (!xen_initial_domain() || acpi_disabled) return 0; return bus_register_notifier(&amba_bustype, &amba_device_nb); }
static int __init register_xen_platform_notifier(void) { if (!xen_initial_domain() || acpi_disabled) return 0; return bus_register_notifier(&platform_bus_type, &platform_device_nb); }
int xen_stub_processor_init(void) { if (!xen_initial_domain()) return -ENODEV; /* just reserve space for Xen, block native driver loaded */ return acpi_bus_register_driver(&xen_stub_processor_driver); }
void omx_xenfront_exit(void) { /* Never succeed */ if (xen_initial_domain()) return; xenbus_unregister_driver(&omx_xenfront_driver); printk_inf("exit\n"); }
static void xc_cnputc(struct consdev *dev, int c) { if (xen_initial_domain()) xc_cnputc_dom0(dev, c); else xc_cnputc_domu(dev, c); }
void __init xen_init_apic(void) { x86_io_apic_ops.read = xen_io_apic_read; /* On PV guests the APIC CPUID bit is disabled so none of the * routines end up executing. */ if (!xen_initial_domain()) apic = &xen_pv_apic; x86_platform.apic_post_init = xen_apic_check; }
static ssize_t capabilities_read(struct file *file, char __user *buf, size_t size, loff_t *off) { char *tmp = ""; if (xen_initial_domain()) tmp = "control_d\n"; return simple_read_from_buffer(buf, size, off, tmp, strlen(tmp)); }
static int __init xenkbd_init(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain()) return -ENODEV; return xenbus_register_frontend(&xenkbd_driver); }
static int __init xenkbd_init(void) { if (!xen_domain()) return -ENODEV; /* Nothing to do if running in dom0. */ if (xen_initial_domain()) return -ENODEV; return xenbus_register_frontend(&xenkbd_driver); }
static int __init xenbus_probe_initcall(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain() || xen_hvm_domain()) return 0; xenbus_probe(NULL); return 0; }
static int __init xenbus_backend_init(void) { int err; if (!xen_initial_domain()) return -ENODEV; err = misc_register(&xenbus_backend_dev); if (err) printk(KERN_ERR "Could not register xenbus backend device\n"); return err; }
static int __init xen_late_init_mcelog(void) { /* Only DOM0 is responsible for MCE logging */ if (xen_initial_domain()) { /* register character device /dev/mcelog for xen mcelog */ if (misc_register(&xen_mce_chrdev_device)) return -ENODEV; return bind_virq_for_mce(); } return -ENODEV; }
static unsigned long __init xen_get_pages_limit(void) { unsigned long limit; #ifdef CONFIG_X86_32 limit = GB(64) / PAGE_SIZE; #else limit = MAXMEM / PAGE_SIZE; if (!xen_initial_domain() && xen_512gb_limit) limit = GB(512) / PAGE_SIZE; #endif return limit; }
static u32 xen_apic_read(u32 reg) { struct xen_platform_op op = { .cmd = XENPF_get_cpuinfo, .interface_version = XENPF_INTERFACE_VERSION, .u.pcpu_info.xen_cpuid = 0, }; int ret = 0; /* Shouldn't need this as APIC is turned off for PV, and we only * get called on the bootup processor. But just in case. */ if (!xen_initial_domain() || smp_processor_id()) return 0; if (reg == APIC_LVR) return 0x10; #ifdef CONFIG_X86_32 if (reg == APIC_LDR) return SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); #endif if (reg != APIC_ID) return 0; ret = HYPERVISOR_dom0_op(&op); if (ret) op.u.pcpu_info.apic_id = BAD_APICID; return op.u.pcpu_info.apic_id << 24; } static void xen_apic_write(u32 reg, u32 val) { if (reg == APIC_LVTPC) { (void)pmu_apic_update(reg); return; } /* Warn to see if there's any stray references */ WARN(1,"register: %x, value: %x\n", reg, val); } static u64 xen_apic_icr_read(void) { return 0; } static void xen_apic_icr_write(u32 low, u32 id) { /* Warn to see if there's any stray references */ WARN_ON(1); }
static void __init xen_fill_possible_map(void) { int i, rc; if (xen_initial_domain()) return; for (i = 0; i < nr_cpu_ids; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; set_cpu_possible(i, true); } } }
static int xen_cons_init(void) { struct hv_ops *ops; if (!xen_pv_domain()) return 0; if (xen_initial_domain()) ops = &dom0_hvc_ops; else ops = &domU_hvc_ops; hvc_instantiate(HVC_COOKIE, 0, ops); return 0; }
static void __exit xen_acpi_processor_exit(void) { if (!xen_initial_domain()) return; acpi_processor_uninstall_hotplug_notify(); acpi_bus_unregister_driver(&xen_acpi_processor_driver); /* * stub reserve space again to prevent any chance of native * driver loading. */ xen_stub_processor_init(); return; }