/* * Tell the hypervisor how to contact us for event channel callbacks. */ void xen_hvm_set_callback(device_t dev) { struct xen_hvm_param xhp; int irq; if (xen_vector_callback_enabled) return; xhp.domid = DOMID_SELF; xhp.index = HVM_PARAM_CALLBACK_IRQ; if (xen_feature(XENFEAT_hvm_callback_vector) != 0) { int error; error = set_percpu_callback(0); if (error == 0) { xen_evtchn_needs_ack = true; /* Trick toolstack to think we are enlightened */ xhp.value = 1; } else xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN); error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp); if (error == 0) { xen_vector_callback_enabled = 1; return; } else if (xen_evtchn_needs_ack) panic("Unable to setup fake HVM param: %d", error); printf("Xen HVM callback vector registration failed (%d). " "Falling back to emulated device interrupt\n", error); } xen_vector_callback_enabled = 0; if (dev == NULL) { /* * Called from early boot or resume. * xenpci will invoke us again later. */ return; } irq = pci_get_irq(dev); if (irq < 16) { xhp.value = HVM_CALLBACK_GSI(irq); } else { u_int slot; u_int pin; slot = pci_get_slot(dev); pin = pci_get_intpin(dev) - 1; xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin); } if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0) panic("Can't set evtchn callback"); }
static void xen_hvm_exit_mmap(struct mm_struct *mm) { struct xen_hvm_pagetable_dying a; int rc; a.domid = DOMID_SELF; a.gpa = __pa(mm->pgd); rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); WARN_ON_ONCE(rc < 0); }
static int hvm_get_param(int param_id, uint64_t *val) { struct xen_hvm_param xhp; xhp.domid = DOMID_SELF; xhp.index = param_id; if ((HYPERVISOR_hvm_op(HVMOP_get_param, &xhp) < 0)) return (-1); *val = xhp.value; return (0); }
static int set_percpu_callback(unsigned int vcpu) { struct xen_hvm_evtchn_upcall_vector vec; int error; vec.vcpu = vcpu; vec.vector = IDT_EVTCHN; error = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &vec); return (error != 0 ? xen_translate_error(error) : 0); }
static int is_pagetable_dying_supported(void) { struct xen_hvm_pagetable_dying a; int rc = 0; a.domid = DOMID_SELF; a.gpa = 0x00; rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); if (rc < 0) { printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); return 0; } return 1; }
static int privcmd_HYPERVISOR_hvm_op(int cmd, void *arg) { int error; int size = 0; import_export_t arg_ie; uint32_t flags = IE_IMPORT; switch (cmd) { case HVMOP_set_param: case HVMOP_get_param: size = sizeof (struct xen_hvm_param); flags = IE_IMPEXP; break; case HVMOP_set_pci_intx_level: size = sizeof (struct xen_hvm_set_pci_intx_level); break; case HVMOP_set_isa_irq_level: size = sizeof (struct xen_hvm_set_isa_irq_level); break; case HVMOP_set_pci_link_route: size = sizeof (struct xen_hvm_set_pci_link_route); break; case HVMOP_track_dirty_vram: size = sizeof (struct xen_hvm_track_dirty_vram); break; case HVMOP_modified_memory: size = sizeof (struct xen_hvm_modified_memory); break; case HVMOP_set_mem_type: size = sizeof (struct xen_hvm_set_mem_type); break; default: #ifdef DEBUG printf("unrecognized HVM op 0x%x\n", cmd); #endif return (-X_EINVAL); } error = import_buffer(&arg_ie, arg, NULL, size, flags); if (error == 0) error = HYPERVISOR_hvm_op(cmd, arg_ie.ie_kaddr); export_buffer(&arg_ie, &error); return (error); }
int hvm_get_parameter(int idx, uint64_t *value) { int r; struct xen_hvm_param xhv; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk("Cannot get hvm parameter %d: %d!\n", idx, r); } else { *value = xhv.value; } return r; }
/* * Tell the hypervisor how to contact us for event channel callbacks. */ static void xenpci_set_callback(device_t dev) { int irq; uint64_t callback; struct xen_hvm_param xhp; irq = pci_get_irq(dev); if (irq < 16) { callback = irq; } else { callback = (pci_get_intpin(dev) - 1) & 3; callback |= pci_get_slot(dev) << 11; callback |= 1ull << 56; } xhp.domid = DOMID_SELF; xhp.index = HVM_PARAM_CALLBACK_IRQ; xhp.value = callback; if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp)) panic("Can't set evtchn callback"); }
/* * This function is used in two contexts: * - the kdump kernel has to check whether a pfn of the crashed kernel * was a ballooned page. vmcore is using this function to decide * whether to access a pfn of the crashed kernel. * - the kexec kernel has to check whether a pfn was ballooned by the * previous kernel. If the pfn is ballooned, handle it properly. * Returns 0 if the pfn is not backed by a RAM page, the caller may * handle the pfn special in this case. */ static int xen_oldmem_pfn_is_ram(unsigned long pfn) { struct xen_hvm_get_mem_type a = { .domid = DOMID_SELF, .pfn = pfn, }; int ram; if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) return -ENXIO; switch (a.mem_type) { case HVMMEM_mmio_dm: ram = 0; break; case HVMMEM_ram_rw: case HVMMEM_ram_ro: default: ram = 1; break; } return ram; }