static void gnttab_request_version(void) { int rc; struct gnttab_set_version gsv; if (xen_hvm_domain()) gsv.version = 1; else gsv.version = 2; rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); if (rc == 0 && gsv.version == 2) { grant_table_version = 2; gnttab_interface = &gnttab_v2_ops; } else if (grant_table_version == 2) { /* * If we've already used version 2 features, * but then suddenly discover that they're not * available (e.g. migrating to an older * version of Xen), almost unbounded badness * can happen. */ panic("we need grant tables version 2, but only version 1 is available"); } else { grant_table_version = 1; gnttab_interface = &gnttab_v1_ops; } printk(KERN_INFO "Grant tables using version %d layout.\n", grant_table_version); }
static bool __xen_has_pv_device(int state) { /* HVM domains might or might not */ if (xen_hvm_domain() && (xen_platform_pci_unplug & state)) return true; return xen_has_pv_devices(); }
void xen_vcpu_setup(int cpu) { struct vcpu_register_vcpu_info info; int err; struct vcpu_info *vcpup; BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); /* * This path is called twice on PVHVM - first during bootup via * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being * hotplugged: cpu_up -> xen_hvm_cpu_notify. * As we can only do the VCPUOP_register_vcpu_info once lets * not over-write its result. * * For PV it is called during restore (xen_vcpu_restore) and bootup * (xen_setup_vcpu_info_placement). The hotplug mechanism does not * use this function. */ if (xen_hvm_domain()) { if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) return; } if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)]; if (!xen_have_vcpu_info_placement) { if (cpu >= MAX_VIRT_CPUS) clamp_max_cpus(); return; } vcpup = &per_cpu(xen_vcpu_info, cpu); info.mfn = arbitrary_virt_to_mfn(vcpup); info.offset = offset_in_page(vcpup); /* Check to see if the hypervisor will put the vcpu_info structure where we want it, which allows direct access via a percpu-variable. N.B. This hypercall can _only_ be called once per CPU. Subsequent calls will error out with -EINVAL. This is due to the fact that hypervisor has no unregister variant and this hypercall does not allow to over-write info.mfn and info.offset. */ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu), &info); if (err) { printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); xen_have_vcpu_info_placement = 0; clamp_max_cpus(); } else { /* This cpu is using the registered vcpu info, even if later ones fail to. */ per_cpu(xen_vcpu, cpu) = vcpup; } }
static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, int trigger, int polarity) { if (!xen_hvm_domain()) return -1; return xen_register_pirq(gsi, -1 /* no GSI override */, trigger, false /* no mapping of GSI to PIRQ */); }
static int __init boot_wait_for_devices(void) { if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; ready_to_wait_for_devices = 1; wait_for_devices(NULL); return 0; }
static int __init xenkbd_init(void) { if (!xen_domain() || xen_hvm_domain()) return -ENODEV; /* Nothing to do if running in dom0. */ if (xen_initial_domain()) return -ENODEV; return xenbus_register_frontend(&xenkbd_driver); }
static int __init xenbus_probe_initcall(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain() || xen_hvm_domain()) return 0; xenbus_probe(NULL); return 0; }
static int __gnttab_init(void) { /* Delay grant-table initialization in the PV on HVM case */ if (xen_hvm_domain()) return 0; if (!xen_pv_domain()) return -ENODEV; return gnttab_init(); }
bool xen_hvm_need_lapic(void) { if (xen_nopv) return false; if (xen_pv_domain()) return false; if (!xen_hvm_domain()) return false; if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) return false; return true; }
static void xendebug_identify(driver_t *driver, device_t parent) { KASSERT(xen_domain(), ("Trying to add Xen debug device to non-xen guest")); if (xen_hvm_domain() && !xen_vector_callback_enabled) return; if (BUS_ADD_CHILD(parent, 0, "debug", 0) == NULL) panic("Unable to add Xen debug device."); }
static int __init xenbus_init(void) { int err = 0; if (!xen_domain()) return -ENODEV; if (xen_hvm_domain()) { uint64_t v = 0; err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; xen_store_mfn = (unsigned long)v; xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); } else { xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; if (xen_store_evtchn) xenstored_ready = 1; else { err = xenstored_local_init(); if (err) goto out_error; } xen_store_interface = mfn_to_virt(xen_store_mfn); } /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto out_error; } #ifdef CONFIG_XEN_COMPAT_XENFS /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif out_error: return err; }
static void xen_cpu_die(unsigned int cpu) { while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); if (!xen_hvm_domain()) unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); }
static int xenbus_resume_cb(struct notifier_block *nb, unsigned long action, void *data) { int err = 0; if (xen_hvm_domain()) { uint64_t v = 0; err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (!err && v) xen_store_evtchn = v; else pr_warn("Cannot update xenstore event channel: %d\n", err); } else xen_store_evtchn = xen_start_info->store_evtchn; return err; }
static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); spin_lock_irqsave(&balloon_lock, flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); /* Link back into the page tables if not highmem. */ #ifdef CONFIG_PVM if (!xen_hvm_domain() && pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), mfn_pte(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); __free_page(page); } balloon_stats.current_pages += rc; if (old_totalram_pages + rc < totalram_pages) { printk(KERN_INFO "old_totalram=%luKB, totalram_pages=%luKB\n", old_totalram_pages*4, totalram_pages*4); balloon_stats.current_pages = totalram_pages + totalram_bias; printk(KERN_INFO "when ballooning, the mem online! totalram=%luKB, current=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4); } old_totalram_pages = totalram_pages; out: spin_unlock_irqrestore(&balloon_lock, flags); return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); scrub_page(page); if (!xen_hvm_domain() && !PageHighMem(page)) { ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), __pte_ma(0), 0); BUG_ON(ret); } } /* Ensure that ballooned highmem pages don't have kmaps. */ #ifdef CONFIG_PVM kmap_flush_unused(); flush_tlb_all(); #endif spin_lock_irqsave(&balloon_lock, flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); balloon_stats.current_pages -= nr_pages; if(old_totalram_pages < totalram_pages + nr_pages) { printk(KERN_INFO "old_totalram=%luKB, totalram_pages=%luKB\n", old_totalram_pages*4, totalram_pages*4); balloon_stats.current_pages = totalram_pages + totalram_bias; printk(KERN_INFO "when ballooning, the mem online! totalram=%luKB, current=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4); } old_totalram_pages = totalram_pages; spin_unlock_irqrestore(&balloon_lock, flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static void balloon_process(struct work_struct *work) { int need_sleep = 0; long credit; long total_increase = 0; char buffer[16]; mutex_lock(&balloon_mutex); printk(KERN_INFO "totalram_pages=%luKB, current_pages=%luKB,totalram_bias=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4, totalram_bias*4); if (totalram_pages > old_totalram_pages) { //TODO:Just know that totalram_pages will increase. total_increase = (totalram_pages - old_totalram_pages) % GB2PAGE; if (totalram_bias > total_increase ) { totalram_bias = totalram_bias - total_increase; } balloon_stats.current_pages = totalram_pages + totalram_bias; old_totalram_pages = totalram_pages; } printk(KERN_INFO "totalram_pages=%luKB, current_pages=%luKB, totalram_bias=%luKB,total_increase=%ld\n", totalram_pages*4, balloon_stats.current_pages*4, totalram_bias*4, total_increase*4); xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - balloon_stats.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if (current_target() != balloon_stats.current_pages) { mod_timer(&balloon_timer, jiffies + HZ); sprintf(buffer,"%lu",balloon_stats.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); mutex_unlock(&balloon_mutex); }
static void do_suspend(void) { int err; struct suspend_info si; shutting_down = SHUTDOWN_SUSPEND; #ifdef CONFIG_PREEMPT /* If the kernel is preemptible, we need to freeze all the processes to prevent them from being in the middle of a pagetable update during suspend. */ err = freeze_processes(); if (err) { printk(KERN_ERR "xen suspend: freeze failed %d\n", err); goto out; } #endif err = dpm_suspend_start(PMSG_FREEZE); if (err) { printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err); goto out_thaw; } printk(KERN_DEBUG "suspending xenstore...\n"); xs_suspend(); err = dpm_suspend_noirq(PMSG_FREEZE); if (err) { printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); goto out_resume; } si.cancelled = 1; if (xen_hvm_domain()) { si.arg = 0UL; si.pre = NULL; si.post = &xen_hvm_post_suspend; } else { si.arg = virt_to_mfn(xen_start_info); si.pre = &xen_pre_suspend; si.post = &xen_post_suspend; } err = stop_machine(xen_suspend, &si, cpumask_of(0)); dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE); if (err) { printk(KERN_ERR "failed to start xen_suspend: %d\n", err); si.cancelled = 1; } out_resume: if (!si.cancelled) { xen_arch_resume(); xs_resume(); } else xs_suspend_cancel(); dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); /* Make sure timer events get retriggered on all CPUs */ clock_was_set(); out_thaw: #ifdef CONFIG_PREEMPT thaw_processes(); out: #endif shutting_down = SHUTDOWN_INVALID; }
static void do_suspend(void) { int err; struct suspend_info si; shutting_down = SHUTDOWN_SUSPEND; err = freeze_processes(); if (err) { pr_err("%s: freeze failed %d\n", __func__, err); goto out; } err = dpm_suspend_start(PMSG_FREEZE); if (err) { pr_err("%s: dpm_suspend_start %d\n", __func__, err); goto out_thaw; } printk(KERN_DEBUG "suspending xenstore...\n"); xs_suspend(); err = dpm_suspend_end(PMSG_FREEZE); if (err) { pr_err("dpm_suspend_end failed: %d\n", err); si.cancelled = 0; goto out_resume; } si.cancelled = 1; if (xen_hvm_domain()) { si.arg = 0UL; si.pre = NULL; si.post = &xen_hvm_post_suspend; } else { si.arg = virt_to_mfn(xen_start_info); si.pre = &xen_pre_suspend; si.post = &xen_post_suspend; } err = stop_machine(xen_suspend, &si, cpumask_of(0)); dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); if (err) { pr_err("failed to start xen_suspend: %d\n", err); si.cancelled = 1; } out_resume: if (!si.cancelled) { xen_arch_resume(); xs_resume(); } else xs_suspend_cancel(); dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); out_thaw: thaw_processes(); out: shutting_down = SHUTDOWN_INVALID; }
static int __init xenbus_init(void) { int err = 0; uint64_t v = 0; xen_store_domain_type = XS_UNKNOWN; if (!xen_domain()) return -ENODEV; xenbus_ring_ops_init(); if (xen_pv_domain()) xen_store_domain_type = XS_PV; if (xen_hvm_domain()) xen_store_domain_type = XS_HVM; if (xen_hvm_domain() && xen_initial_domain()) xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && !xen_start_info->store_evtchn) xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) xenstored_ready = 1; switch (xen_store_domain_type) { case XS_LOCAL: err = xenstored_local_init(); if (err) goto out_error; xen_store_interface = gfn_to_virt(xen_store_gfn); break; case XS_PV: xen_store_evtchn = xen_start_info->store_evtchn; xen_store_gfn = xen_start_info->store_mfn; xen_store_interface = gfn_to_virt(xen_store_gfn); break; case XS_HVM: err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; xen_store_gfn = (unsigned long)v; xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE); break; default: pr_warn("Xenstore state unknown\n"); break; } /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { pr_warn("Error initializing xenstore comms: %i\n", err); goto out_error; } if ((xen_store_domain_type != XS_LOCAL) && (xen_store_domain_type != XS_UNKNOWN)) xen_resume_notifier_register(&xenbus_resume_nb); #ifdef CONFIG_XEN_COMPAT_XENFS /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif out_error: return err; }
static int __init xenbus_init(void) { int err = 0; DPRINTK(""); err = -ENODEV; if (!xen_domain()) goto out_error; /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_frontend.bus); if (err) goto out_error; err = xenbus_backend_bus_register(); if (err) goto out_unreg_front; /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (xen_initial_domain()) { /* dom0 not yet supported */ } else { if (xen_hvm_domain()) { uint64_t v = 0; err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; xen_store_mfn = (unsigned long)v; xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); } else { xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); xenstored_ready = 1; } } /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto out_unreg_back; } #ifdef CONFIG_XEN_COMPAT_XENFS /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif return 0; out_unreg_back: xenbus_backend_bus_unregister(); out_unreg_front: bus_unregister(&xenbus_frontend.bus); out_error: return err; }
static int __init xenbus_init(void) { int err = 0; enum xenstore_init usage = UNKNOWN; uint64_t v = 0; if (!xen_domain()) return -ENODEV; xenbus_ring_ops_init(); if (xen_pv_domain()) usage = PV; if (xen_hvm_domain()) usage = HVM; if (xen_hvm_domain() && xen_initial_domain()) usage = LOCAL; if (xen_pv_domain() && !xen_start_info->store_evtchn) usage = LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) xenstored_ready = 1; switch (usage) { case LOCAL: err = xenstored_local_init(); if (err) goto out_error; xen_store_interface = mfn_to_virt(xen_store_mfn); break; case PV: xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); break; case HVM: err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; xen_store_mfn = (unsigned long)v; xen_store_interface = xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); break; default: pr_warn("Xenstore state unknown\n"); break; } /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto out_error; } #ifdef CONFIG_XEN_COMPAT_XENFS /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif out_error: return err; }
static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long start_gpfn = 0; xen_pfn_t *frames; unsigned int nr_gframes = end_idx + 1; int rc; if (xen_hvm_domain() || xen_feature(XENFEAT_auto_translated_physmap)) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; rc = 0; if (xen_hvm_domain()) start_gpfn = xen_hvm_resume_frames >> PAGE_SHIFT; /* * Loop backwards, so that the first hypercall has the largest * index, ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; if (xen_hvm_domain()) xatp.gpfn = start_gpfn + i; else xatp.gpfn = pvh_get_grant_pfn(i); rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); if (rc != 0) { printk(KERN_WARNING "grant table add_to_physmap failed, err=%d\n", rc); break; } } while (i-- > start_idx); return rc; } /* No need for kzalloc as it is initialized in following hypercall * GNTTABOP_setup_table. */ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); rc = gnttab_interface->map_frames(frames, nr_gframes); kfree(frames); return rc; }
static int xen_smp_intr_init(unsigned int cpu) { int rc; const char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, resched_name, NULL); if (rc < 0) goto fail; per_cpu(xen_resched_irq, cpu) = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(xen_callfunc_irq, cpu) = rc; debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, debug_name, NULL); if (rc < 0) goto fail; per_cpu(xen_debug_irq, cpu) = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(xen_callfuncsingle_irq, cpu) = rc; /* * The IRQ worker on PVHVM goes through the native path and uses the * IPI mechanism. */ if (xen_hvm_domain()) return 0; callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, xen_irq_work_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(xen_irq_work, cpu) = rc; return 0; fail: if (per_cpu(xen_resched_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); if (per_cpu(xen_callfunc_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); if (per_cpu(xen_debug_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); if (xen_hvm_domain()) return rc; if (per_cpu(xen_irq_work, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); return rc; }