static int xen_smp_intr_init(unsigned int cpu) { int rc; const char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, resched_name, NULL); if (rc < 0) goto fail; per_cpu(resched_irq, cpu) = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(callfunc_irq, cpu) = rc; debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, debug_name, NULL); if (rc < 0) goto fail; per_cpu(debug_irq, cpu) = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(callfuncsingle_irq, cpu) = rc; return 0; fail: if (per_cpu(resched_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); if (per_cpu(callfunc_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); if (per_cpu(debug_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); if (per_cpu(callfuncsingle_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); return rc; }
static void xen_hvm_cpu_die(unsigned int cpu) { unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); native_cpu_die(cpu); }
static void xen_cpu_die(unsigned int cpu) { while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); }
void xen_teardown_timer(int cpu) { struct clock_event_device *evt; BUG_ON(cpu == 0); evt = &per_cpu(xen_clock_events, cpu); unbind_from_irqhandler(evt->irq, NULL); }
void xen_smp_intr_free_pv(unsigned int cpu) { if (per_cpu(xen_irq_work, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); per_cpu(xen_irq_work, cpu).irq = -1; kfree(per_cpu(xen_irq_work, cpu).name); per_cpu(xen_irq_work, cpu).name = NULL; } if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); per_cpu(xen_pmu_irq, cpu).irq = -1; kfree(per_cpu(xen_pmu_irq, cpu).name); per_cpu(xen_pmu_irq, cpu).name = NULL; } }
static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev) { spin_lock(&pdev->dev_lock); /* Ensure the guest can't trigger our handler before removing devices */ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) { unbind_from_irqhandler(pdev->evtchn_irq, pdev); pdev->evtchn_irq = INVALID_EVTCHN_IRQ; } spin_unlock(&pdev->dev_lock); /* If the driver domain started an op, make sure we complete it * before releasing the shared memory */ /* Note, the workqueue does not use spinlocks at all.*/ flush_workqueue(xen_pcibk_wq); spin_lock(&pdev->dev_lock); if (pdev->sh_info != NULL) { xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info); pdev->sh_info = NULL; } spin_unlock(&pdev->dev_lock); }
static void xen_cpu_die(unsigned int cpu) { while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); if (num_online_cpus() == 1) alternatives_smp_switch(0); }
int xencons_ring_init(void) { int irq; if (xencons_irq) unbind_from_irqhandler(xencons_irq, NULL); xencons_irq = 0; if (!is_running_on_xen() || is_initial_xendomain() || !xen_start_info->console.domU.evtchn) return -ENODEV; irq = bind_caller_port_to_irqhandler( xen_start_info->console.domU.evtchn, handle_input, 0, "xencons", NULL); if (irq < 0) { printk(KERN_ERR "XEN console request irq failed %i\n", irq); return irq; } xencons_irq = irq; /* In case we have in-flight data after save/restore... */ notify_daemon(); return 0; }
static void evtchn_unbind_from_user(struct per_user_data *u, int port) { int irq = irq_from_evtchn(port); unbind_from_irqhandler(irq, (void *)(unsigned long)port); set_port_user(port, NULL); }
static int xen_smp_intr_init(unsigned int cpu) { int rc; unsigned int irq; per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1; sprintf(resched_name[cpu], "resched%u", cpu); rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu, resched_name[cpu], smp_reschedule_interrupt, INTR_TYPE_TTY, &irq); printf("[XEN] IPI cpu=%d irq=%d vector=RESCHEDULE_VECTOR (%d)\n", cpu, irq, RESCHEDULE_VECTOR); per_cpu(resched_irq, cpu) = irq; sprintf(callfunc_name[cpu], "callfunc%u", cpu); rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR, cpu, callfunc_name[cpu], smp_call_function_interrupt, INTR_TYPE_TTY, &irq); if (rc < 0) goto fail; per_cpu(callfunc_irq, cpu) = irq; printf("[XEN] IPI cpu=%d irq=%d vector=CALL_FUNCTION_VECTOR (%d)\n", cpu, irq, CALL_FUNCTION_VECTOR); if ((cpu != 0) && ((rc = ap_cpu_initclocks(cpu)) != 0)) goto fail; return 0; fail: if (per_cpu(resched_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(resched_irq, cpu)); if (per_cpu(callfunc_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(callfunc_irq, cpu)); return rc; }
void xencons_suspend(void) { if (!xen_start_info->console_evtchn) return; unbind_from_irqhandler(console_irq); }
static void xenkbd_disconnect_backend(struct xenkbd_info *info) { if (info->irq >= 0) unbind_from_irqhandler(info->irq, info); info->irq = -1; if (info->gref >= 0) gnttab_end_foreign_access_ref(info->gref, 0); info->gref = -1; }
void xen_uninit_lock_cpu(int cpu) { if (!xen_pvspin) return; unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); per_cpu(lock_kicker_irq, cpu) = -1; kfree(per_cpu(irq_name, cpu)); per_cpu(irq_name, cpu) = NULL; }
static void unbind_virq(void) { int i; for_each_cpu(i) { if (ovf_irq[i] >= 0) { unbind_from_irqhandler(ovf_irq[i], NULL); ovf_irq[i] = -1; } } }
static void evtchn_unbind_from_user(struct per_user_data *u, int port) { int irq = irq_from_evtchn(port); unbind_from_irqhandler(irq, (void *)(unsigned long)port); /* make sure we unbind the irq handler before clearing the port */ barrier(); port_user[port] = NULL; }
/** * Handles a backend disconnect. * * @param info The information structure for the device to be disconnected. */ static void oxtkbd_disconnect_backend(struct openxt_kbd_info *info) { //If we had an input IRQ registered, tear it down. if (info->irq >= 0) unbind_from_irqhandler(info->irq, info); info->irq = -1; //... and if we have a shared page for our ring, tear it down. if (info->gref >= 0) gnttab_end_foreign_access(info->gref, 0, 0UL); info->gref = -1; }
static void cleanup_vnic(struct netback_accel *bend) { struct xenbus_device *dev; dev = (struct xenbus_device *)bend->hdev_data; DPRINTK("%s: bend %p dev %p\n", __FUNCTION__, bend, dev); DPRINTK("%s: Remove %p's mac from fwd table...\n", __FUNCTION__, bend); netback_accel_fwd_remove(bend->mac, bend->fwd_priv); /* Free buffer table allocations */ netback_accel_remove_buffers(bend); DPRINTK("%s: Release hardware resources...\n", __FUNCTION__); if (bend->accel_shutdown) bend->accel_shutdown(bend); if (bend->net_channel_irq) { unbind_from_irqhandler(bend->net_channel_irq, dev); bend->net_channel_irq = 0; } if (bend->msg_channel_irq) { unbind_from_irqhandler(bend->msg_channel_irq, dev); bend->msg_channel_irq = 0; } if (bend->sh_pages_unmap) { DPRINTK("%s: Unmap grants %p\n", __FUNCTION__, bend->sh_pages_unmap); net_accel_unmap_grants_contig(dev, bend->sh_pages_unmap); bend->sh_pages_unmap = NULL; bend->shared_page = NULL; } }
static void free_blkif(void *arg) { blkif_t *blkif = (blkif_t *)arg; if (blkif->irq) unbind_from_irqhandler(blkif->irq, blkif); if (blkif->blk_ring.sring) { unmap_frontend_page(blkif); free_vm_area(blkif->blk_ring_area); blkif->blk_ring.sring = NULL; } kmem_cache_free(blkif_cachep, blkif); }
static void ring_free(struct tpm_private *priv) { if (!priv) return; if (priv->ring_ref) gnttab_end_foreign_access(priv->ring_ref, 0, (unsigned long)priv->shr); else free_page((unsigned long)priv->shr); if (priv->irq) unbind_from_irqhandler(priv->irq, priv); kfree(priv); }
static void ixp_free(struct ixpfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ info->connected = suspend ? IXP_STATE_SUSPENDED : IXP_STATE_DISCONNECTED; /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, 0, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->evtchn = info->irq = 0; }
static void netif_free(netif_t *netif) { atomic_dec(&netif->refcnt); wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0); if (netif->irq) unbind_from_irqhandler(netif->irq, netif); unregister_netdev(netif->dev); if (netif->tx.sring) { unmap_frontend_pages(netif); free_vm_area(netif->tx_comms_area); free_vm_area(netif->rx_comms_area); } free_netdev(netif->dev); }
/* Set up interrupt handler off store event channel. */ int xb_init_comms(void) { int err; if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_evtchn_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; return 0; }
static void destroy_rings(struct usbfront_info *info) { if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; if (info->urb_ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->urb_ring_ref, (unsigned long)info->urb_ring.sring); info->urb_ring_ref = GRANT_INVALID_REF; } info->urb_ring.sring = NULL; if (info->conn_ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->conn_ring_ref, (unsigned long)info->conn_ring.sring); info->conn_ring_ref = GRANT_INVALID_REF; } info->conn_ring.sring = NULL; }
static void free_pdev(struct pcifront_device *pdev) { dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev); pcifront_free_roots(pdev); /*For PCIE_AER error handling job*/ flush_scheduled_work(); unbind_from_irqhandler(pdev->evtchn, pdev); if (pdev->evtchn != INVALID_EVTCHN) xenbus_free_evtchn(pdev->xdev, pdev->evtchn); if (pdev->gnt_ref != INVALID_GRANT_REF) gnttab_end_foreign_access(pdev->gnt_ref, (unsigned long)pdev->sh_info); dev_set_drvdata(&pdev->xdev->dev, NULL); kfree(pdev); }
static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); (void)snprintf(portstr,sizeof(portstr), "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; }
void scsiback_disconnect(struct vscsibk_info *info) { if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } wait_event(info->waiting_to_free, atomic_read(&info->nr_unreplied_reqs) == 0); if (info->irq) { unbind_from_irqhandler(info->irq, info); info->irq = 0; } if (info->ring.sring) { unmap_frontend_page(info); free_vm_area(info->ring_area); info->ring.sring = NULL; } }
static void xen_blkif_disconnect(struct xen_blkif *blkif) { if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); blkif->xenblkd = NULL; } atomic_dec(&blkif->refcnt); wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0); atomic_inc(&blkif->refcnt); if (blkif->irq) { unbind_from_irqhandler(blkif->irq, blkif); blkif->irq = 0; } if (blkif->blk_rings.common.sring) { xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring); blkif->blk_rings.common.sring = NULL; } }
/** * cancel connection * unbind irq * unmap ring **/ static void xen_chrif_disconnect(struct xen_chrif *chrif) { unmap_frontend_pages(chrif); xen_chrbk_unmap(chrif); if(chrif->xenchrd){ kthread_stop(chrif->xenchrd); chrif->xenchrd = NULL; } if(chrif->irq){ unbind_from_irqhandler(chrif->irq, chrif); chrif->irq = 0; } if(chrif->chr_ring.sring){ xenbus_unmap_ring_vfree(chrif->be->dev, &chrif->chr_ring); chrif->chr_ring.sring = NULL; } printk("\nxen: dom0: xen chrif disconnrct finished"); }
static int __devinit unbind_evtchn_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; if (action == CPU_DEAD) { /* Unregister evtchn. */ if (per_cpu(xen_cpep_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), NULL); per_cpu(xen_cpep_irq, cpu) = -1; } if (per_cpu(xen_cmcp_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), NULL); per_cpu(xen_cmcp_irq, cpu) = -1; } if (per_cpu(xen_cmc_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); per_cpu(xen_cmc_irq, cpu) = -1; } if (per_cpu(xen_ipi_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); per_cpu(xen_ipi_irq, cpu) = -1; } if (per_cpu(xen_resched_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); per_cpu(xen_resched_irq, cpu) = -1; } if (per_cpu(xen_timer_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), NULL); per_cpu(xen_timer_irq, cpu) = -1; } } return NOTIFY_OK; }
static void xenkbd_disconnect_backend(struct xenkbd_info *info) { if (info->irq >= 0) unbind_from_irqhandler(info->irq, info); info->irq = -1; }