static int xen_smp_intr_init(unsigned int cpu) { int rc; const char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, resched_name, NULL); if (rc < 0) goto fail; per_cpu(resched_irq, cpu) = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(callfunc_irq, cpu) = rc; debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, debug_name, NULL); if (rc < 0) goto fail; per_cpu(debug_irq, cpu) = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(callfuncsingle_irq, cpu) = rc; return 0; fail: if (per_cpu(resched_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); if (per_cpu(callfunc_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); if (per_cpu(debug_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); if (per_cpu(callfuncsingle_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); return rc; }
int xen_smp_intr_init_pv(unsigned int cpu) { int rc; char *callfunc_name, *pmu_name; callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, xen_irq_work_interrupt, IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(xen_irq_work, cpu).irq = rc; per_cpu(xen_irq_work, cpu).name = callfunc_name; if (is_xen_pmu(cpu)) { pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, xen_pmu_irq_handler, IRQF_PERCPU|IRQF_NOBALANCING, pmu_name, NULL); if (rc < 0) goto fail; per_cpu(xen_pmu_irq, cpu).irq = rc; per_cpu(xen_pmu_irq, cpu).name = pmu_name; } return 0; fail: xen_smp_intr_free_pv(cpu); return rc; }
int __init bind_virq_for_mce(void) { int ret; xen_mc_t mc_op; g_mi = kmalloc(sizeof(*g_mi), GFP_KERNEL); if (!g_mi) return -ENOMEM; /* fetch physical CPU count */ mc_op.cmd = XEN_MC_physcpuinfo; mc_op.interface_version = XEN_MCA_INTERFACE_VERSION; set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, NULL); ret = HYPERVISOR_mca(&mc_op); if (ret) { printk(KERN_ERR "MCE: Failed to get physical CPU count\n"); kfree(g_mi); return ret; } /* fetch CPU physical info for later reference */ ncpus = mc_op.u.mc_physcpuinfo.ncpus; g_physinfo = kmalloc(sizeof(*g_physinfo) * ncpus, GFP_KERNEL); if (!g_physinfo) { kfree(g_mi); return -ENOMEM; } set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo); ret = HYPERVISOR_mca(&mc_op); if (ret) { printk(KERN_ERR "MCE: Failed to get physical CPUs' info\n"); kfree(g_mi); kfree(g_physinfo); return ret; } ret = bind_virq_to_irqhandler(VIRQ_MCA, 0, mce_dom0_interrupt, 0, "mce", NULL); if (ret < 0) { printk(KERN_ERR "MCE: Failed to bind vIRQ for Dom0\n"); kfree(g_mi); kfree(g_physinfo); return ret; } /* Log the machine checks left over from the previous reset. */ mce_dom0_interrupt(VIRQ_MCA, NULL); return 0; }
static int bind_virq_for_mce(void) { int ret; struct xen_mc mc_op; memset(&mc_op, 0, sizeof(struct xen_mc)); /* Fetch physical CPU Numbers */ mc_op.cmd = XEN_MC_physcpuinfo; mc_op.interface_version = XEN_MCA_INTERFACE_VERSION; set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo); ret = HYPERVISOR_mca(&mc_op); if (ret) { pr_err("Failed to get CPU numbers\n"); return ret; } /* Fetch each CPU Physical Info for later reference*/ ncpus = mc_op.u.mc_physcpuinfo.ncpus; g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu), GFP_KERNEL); if (!g_physinfo) return -ENOMEM; set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo); ret = HYPERVISOR_mca(&mc_op); if (ret) { pr_err("Failed to get CPU info\n"); kfree(g_physinfo); return ret; } ret = bind_virq_to_irqhandler(VIRQ_MCA, 0, xen_mce_interrupt, 0, "mce", NULL); if (ret < 0) { pr_err("Failed to bind virq\n"); kfree(g_physinfo); return ret; } return 0; }
static int bind_virq(void) { int i, result; for_each_cpu(i) { result = bind_virq_to_irqhandler(VIRQ_XENOPROF, i, xenoprof_ovf_interrupt, SA_INTERRUPT, "xenoprof", NULL); if (result < 0) { unbind_virq(); return result; } ovf_irq[i] = result; } return 0; }
void xen_setup_timer(int cpu) { const char *name; struct clock_event_device *evt; int irq; printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); name = kasprintf(GFP_KERNEL, "timer%d", cpu); if (!name) name = "<timer kasprintf failed>"; irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, name, NULL); evt = &per_cpu(xen_clock_events, cpu); memcpy(evt, xen_clockevent, sizeof(*evt)); evt->cpumask = cpumask_of(cpu); evt->irq = irq; }
/* * This is xen version percpu irq registration, which needs bind * to xen specific evtchn sub-system. One trick here is that xen * evtchn binding interface depends on kmalloc because related * port needs to be freed at device/cpu down. So we cache the * registration on BSP before slab is ready and then deal them * at later point. For rest instances happening after slab ready, * we hook them to xen evtchn immediately. * * FIXME: MCA is not supported by far, and thus "nomca" boot param is * required. */ static void __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, struct irqaction *action, int save) { struct irq_desc *desc; int irq = 0; if (xen_slab_ready) { switch (vec) { case IA64_TIMER_VECTOR: snprintf(per_cpu(xen_timer_name, cpu), sizeof(per_cpu(xen_timer_name, cpu)), "%s%d", action->name, cpu); irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, action->handler, action->flags, per_cpu(xen_timer_name, cpu), action->dev_id); per_cpu(xen_timer_irq, cpu) = irq; break; case IA64_IPI_RESCHEDULE: snprintf(per_cpu(xen_resched_name, cpu), sizeof(per_cpu(xen_resched_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_resched_name, cpu), action->dev_id); per_cpu(xen_resched_irq, cpu) = irq; break; case IA64_IPI_VECTOR: snprintf(per_cpu(xen_ipi_name, cpu), sizeof(per_cpu(xen_ipi_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_ipi_name, cpu), action->dev_id); per_cpu(xen_ipi_irq, cpu) = irq; break; case IA64_CMC_VECTOR: snprintf(per_cpu(xen_cmc_name, cpu), sizeof(per_cpu(xen_cmc_name, cpu)), "%s%d", action->name, cpu); irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, action->handler, action->flags, per_cpu(xen_cmc_name, cpu), action->dev_id); per_cpu(xen_cmc_irq, cpu) = irq; break; case IA64_CMCP_VECTOR: snprintf(per_cpu(xen_cmcp_name, cpu), sizeof(per_cpu(xen_cmcp_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_cmcp_name, cpu), action->dev_id); per_cpu(xen_cmcp_irq, cpu) = irq; break; case IA64_CPEP_VECTOR: snprintf(per_cpu(xen_cpep_name, cpu), sizeof(per_cpu(xen_cpep_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_cpep_name, cpu), action->dev_id); per_cpu(xen_cpep_irq, cpu) = irq; break; case IA64_CPE_VECTOR: case IA64_MCA_RENDEZ_VECTOR: case IA64_PERFMON_VECTOR: case IA64_MCA_WAKEUP_VECTOR: case IA64_SPURIOUS_INT_VECTOR: /* No need to complain, these aren't supported. */ break; default: printk(KERN_WARNING "Percpu irq %d is unsupported " "by xen!\n", vec); break; } BUG_ON(irq < 0); if (irq > 0) { /* * Mark percpu. Without this, migrate_irqs() will * mark the interrupt for migrations and trigger it * on cpu hotplug. */ desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; } } /* For BSP, we cache registered percpu irqs, and then re-walk * them when initializing APs */ if (!cpu && save) { BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); saved_percpu_irqs[saved_irq_cnt].irq = vec; saved_percpu_irqs[saved_irq_cnt].action = action; saved_irq_cnt++; if (!xen_slab_ready) late_irq_cnt++; } }
static void __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, struct irqaction *action, int save) { int irq = 0; if (xen_slab_ready) { switch (vec) { case IA64_TIMER_VECTOR: snprintf(per_cpu(xen_timer_name, cpu), sizeof(per_cpu(xen_timer_name, cpu)), "%s%d", action->name, cpu); irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, action->handler, action->flags, per_cpu(xen_timer_name, cpu), action->dev_id); per_cpu(xen_timer_irq, cpu) = irq; break; case IA64_IPI_RESCHEDULE: snprintf(per_cpu(xen_resched_name, cpu), sizeof(per_cpu(xen_resched_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_resched_name, cpu), action->dev_id); per_cpu(xen_resched_irq, cpu) = irq; break; case IA64_IPI_VECTOR: snprintf(per_cpu(xen_ipi_name, cpu), sizeof(per_cpu(xen_ipi_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_ipi_name, cpu), action->dev_id); per_cpu(xen_ipi_irq, cpu) = irq; break; case IA64_CMC_VECTOR: snprintf(per_cpu(xen_cmc_name, cpu), sizeof(per_cpu(xen_cmc_name, cpu)), "%s%d", action->name, cpu); irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, action->handler, action->flags, per_cpu(xen_cmc_name, cpu), action->dev_id); per_cpu(xen_cmc_irq, cpu) = irq; break; case IA64_CMCP_VECTOR: snprintf(per_cpu(xen_cmcp_name, cpu), sizeof(per_cpu(xen_cmcp_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_cmcp_name, cpu), action->dev_id); per_cpu(xen_cmcp_irq, cpu) = irq; break; case IA64_CPEP_VECTOR: snprintf(per_cpu(xen_cpep_name, cpu), sizeof(per_cpu(xen_cpep_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_cpep_name, cpu), action->dev_id); per_cpu(xen_cpep_irq, cpu) = irq; break; case IA64_CPE_VECTOR: case IA64_MCA_RENDEZ_VECTOR: case IA64_PERFMON_VECTOR: case IA64_MCA_WAKEUP_VECTOR: case IA64_SPURIOUS_INT_VECTOR: /* */ break; default: printk(KERN_WARNING "Percpu irq %d is unsupported " "by xen!\n", vec); break; } BUG_ON(irq < 0); if (irq > 0) { /* */ irq_set_status_flags(irq, IRQ_PER_CPU); } } /* */ if (!cpu && save) { BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); saved_percpu_irqs[saved_irq_cnt].irq = vec; saved_percpu_irqs[saved_irq_cnt].action = action; saved_irq_cnt++; if (!xen_slab_ready) late_irq_cnt++; } }
static int __init xencons_init(void) { int rc; if (!is_running_on_xen()) return -ENODEV; if (xc_mode == XC_OFF) return 0; if (!is_initial_xendomain()) { rc = xencons_ring_init(); if (rc) return rc; } xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ? MAX_NR_CONSOLES : 1); if (xencons_driver == NULL) return -ENOMEM; DRV(xencons_driver)->name = "xencons"; DRV(xencons_driver)->major = TTY_MAJOR; DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL; DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL; DRV(xencons_driver)->init_termios = tty_std_termios; DRV(xencons_driver)->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS; DRV(xencons_driver)->termios = xencons_termios; DRV(xencons_driver)->termios_locked = xencons_termios_locked; switch (xc_mode) { case XC_XVC: DRV(xencons_driver)->name = "xvc"; DRV(xencons_driver)->major = XEN_XVC_MAJOR; DRV(xencons_driver)->minor_start = XEN_XVC_MINOR; DRV(xencons_driver)->name_base = xc_num; break; case XC_SERIAL: DRV(xencons_driver)->name = "ttyS"; DRV(xencons_driver)->minor_start = 64 + xc_num; DRV(xencons_driver)->name_base = xc_num; break; default: DRV(xencons_driver)->name = "tty"; DRV(xencons_driver)->minor_start = 1; DRV(xencons_driver)->name_base = 1; break; } tty_set_operations(xencons_driver, &xencons_ops); if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) { printk("WARNING: Failed to register Xen virtual " "console driver as '%s%d'\n", DRV(xencons_driver)->name, DRV(xencons_driver)->name_base); put_tty_driver(xencons_driver); xencons_driver = NULL; return rc; } tty_register_device(xencons_driver, 0, NULL); if (is_initial_xendomain()) { xencons_priv_irq = bind_virq_to_irqhandler( VIRQ_CONSOLE, 0, xencons_priv_interrupt, 0, "console", NULL); BUG_ON(xencons_priv_irq < 0); } printk("Xen virtual console successfully installed as %s%d\n", DRV(xencons_driver)->name, xc_num); /* Check about framebuffer messing up the console */ if (!is_initial_xendomain() && !xenbus_exists(XBT_NIL, "device", "vfb")) { /* FIXME: this is ugly */ unregister_console(&kcons_info); kcons_info.flags |= CON_CONSDEV; register_console(&kcons_info); } return 0; }
static int xen_smp_intr_init(unsigned int cpu) { int rc; const char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, resched_name, NULL); if (rc < 0) goto fail; per_cpu(xen_resched_irq, cpu) = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(xen_callfunc_irq, cpu) = rc; debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, debug_name, NULL); if (rc < 0) goto fail; per_cpu(xen_debug_irq, cpu) = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(xen_callfuncsingle_irq, cpu) = rc; /* * The IRQ worker on PVHVM goes through the native path and uses the * IPI mechanism. */ if (xen_hvm_domain()) return 0; callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, xen_irq_work_interrupt, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) goto fail; per_cpu(xen_irq_work, cpu) = rc; return 0; fail: if (per_cpu(xen_resched_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); if (per_cpu(xen_callfunc_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); if (per_cpu(xen_debug_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); if (xen_hvm_domain()) return rc; if (per_cpu(xen_irq_work, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); return rc; }