/* * do_cio_interrupt() handles all normal I/O device IRQ's */ static irqreturn_t do_cio_interrupt(int irq, void *dummy) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; set_cpu_flag(CIF_NOHZ_DELAY); tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; trace_s390_cio_interrupt(tpi_info); irb = this_cpu_ptr(&cio_irb); sch = (struct subchannel *)(unsigned long) tpi_info->intparm; if (!sch) { /* Clear pending interrupt condition. */ inc_irq_stat(IRQIO_CIO); tsch(tpi_info->schid, irb); return IRQ_HANDLED; } spin_lock(sch->lock); /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) == 0) { /* Keep subchannel information word up to date. */ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); /* Call interrupt handler if there is one. */ if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else inc_irq_stat(IRQIO_CIO); } else inc_irq_stat(IRQIO_CIO); spin_unlock(sch->lock); return IRQ_HANDLED; }
void __irq_entry do_IRQ(struct pt_regs *regs) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; struct pt_regs *old_regs; old_regs = set_irq_regs(regs); s390_idle_check(regs, S390_lowcore.int_clock, S390_lowcore.async_enter_timer); irq_enter(); __get_cpu_var(s390_idle).nohz_delay = 1; if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) /* Serve timer interrupts first. */ clock_comparator_work(); /* * Get interrupt information from lowcore */ tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; irb = (struct irb *)&S390_lowcore.irb; do { kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; /* * Non I/O-subchannel thin interrupts are processed differently */ if (tpi_info->adapter_IO == 1 && tpi_info->int_type == IO_INTERRUPT_TYPE) { do_adapter_IO(tpi_info->isc); continue; } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (!sch) { /* Clear pending interrupt condition. */ tsch(tpi_info->schid, irb); continue; } spin_lock(sch->lock); /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) == 0) { /* Keep subchannel information word up to date. */ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); /* Call interrupt handler if there is one. */ if (sch->driver && sch->driver->irq) sch->driver->irq(sch); } spin_unlock(sch->lock); /* * Are more interrupts pending? * If so, the tpi instruction will update the lowcore * to hold the info for the next interrupt. * We don't do this for VM because a tpi drops the cpu * out of the sie which costs more cycles than it saves. */ } while (MACHINE_IS_LPAR && tpi(NULL) != 0); irq_exit(); set_irq_regs(old_regs); }
/* * Use cio_tsch to update the subchannel status and call the interrupt handler * if status had been pending. Called with the subchannel's lock held. */ void cio_tsch(struct subchannel *sch) { struct irb *irb; int irq_context; irb = this_cpu_ptr(&cio_irb); /* Store interrupt response block to lowcore. */ if (tsch(sch->schid, irb) != 0) /* Not status pending or not operational. */ return; memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); /* Call interrupt handler with updated status. */ irq_context = in_interrupt(); if (!irq_context) { local_bh_disable(); irq_enter(); } kstat_incr_irq_this_cpu(IO_INTERRUPT); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else inc_irq_stat(IRQIO_CIO); if (!irq_context) { irq_exit(); _local_bh_enable(); } }
/* * Use cio_tsch to update the subchannel status and call the interrupt handler * if status had been pending. Called with the console_subchannel lock. */ static void cio_tsch(struct subchannel *sch) { struct irb *irb; int irq_context; irb = (struct irb *)&S390_lowcore.irb; /* Store interrupt response block to lowcore. */ if (tsch(sch->schid, irb) != 0) /* Not status pending or not operational. */ return; memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); /* Call interrupt handler with updated status. */ irq_context = in_interrupt(); if (!irq_context) { local_bh_disable(); irq_enter(); } if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; if (!irq_context) { irq_exit(); _local_bh_enable(); } }
/** * cio_disable_subchannel - disable a subchannel. * @sch: subchannel to disable */ int cio_disable_subchannel(struct subchannel *sch) { int retry; int ret; CIO_TRACE_EVENT(2, "dissch"); CIO_TRACE_EVENT(2, dev_name(&sch->dev)); if (sch_is_pseudo_sch(sch)) return 0; if (cio_update_schib(sch)) return -ENODEV; sch->config.ena = 0; for (retry = 0; retry < 3; retry++) { ret = cio_commit_config(sch); if (ret == -EBUSY) { struct irb irb; if (tsch(sch->schid, &irb) != 0) break; } else break; } CIO_HEX_EVENT(2, &ret, sizeof(ret)); return ret; }
/** * cio_enable_subchannel - enable a subchannel. * @sch: subchannel to be enabled * @intparm: interruption parameter to set */ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { int retry; int ret; CIO_TRACE_EVENT(2, "ensch"); CIO_TRACE_EVENT(2, dev_name(&sch->dev)); if (sch_is_pseudo_sch(sch)) return -EINVAL; if (cio_update_schib(sch)) return -ENODEV; sch->config.ena = 1; sch->config.isc = sch->isc; sch->config.intparm = intparm; for (retry = 0; retry < 3; retry++) { ret = cio_commit_config(sch); if (ret == -EIO) { /* * Got a program check in msch. Try without * the concurrent sense bit the next time. */ sch->config.csense = 0; } else if (ret == -EBUSY) { struct irb irb; if (tsch(sch->schid, &irb) != 0) break; } else break; } CIO_HEX_EVENT(2, &ret, sizeof(ret)); return ret; }
static int cio_tpi(void) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; int irq_context; tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; if (tpi(NULL) != 1) return 0; irb = (struct irb *)&S390_lowcore.irb; /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) != 0) /* Not status pending or not operational. */ return 1; sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (!sch) return 1; irq_context = in_interrupt(); if (!irq_context) local_bh_disable(); irq_enter(); spin_lock(sch->lock); memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); spin_unlock(sch->lock); irq_exit(); if (!irq_context) _local_bh_enable(); return 1; }
/* * Use tpi to get a pending interrupt, call the interrupt handler and * return a pointer to the subchannel structure. */ static inline int cio_tpi(void) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; if (tpi (NULL) != 1) return 0; irb = (struct irb *) __LC_IRB; /* Store interrupt response block to lowcore. */ if (tsch (tpi_info->irq, irb) != 0) /* Not status pending or not operational. */ return 1; sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (!sch) return 1; local_bh_disable(); irq_enter (); spin_lock(&sch->lock); memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); if (sch->driver && sch->driver->irq) sch->driver->irq(&sch->dev); spin_unlock(&sch->lock); irq_exit (); __local_bh_enable(); return 1; }
void __irq_entry do_IRQ(struct pt_regs *regs) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; struct pt_regs *old_regs; old_regs = set_irq_regs(regs); irq_enter(); __this_cpu_write(s390_idle.nohz_delay, 1); if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) clock_comparator_work(); tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; irb = (struct irb *)&S390_lowcore.irb; do { kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; if (tpi_info->adapter_IO) { do_adapter_IO(tpi_info->isc); continue; } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (!sch) { kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; tsch(tpi_info->schid, irb); continue; } spin_lock(sch->lock); if (tsch(tpi_info->schid, irb) == 0) { memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; } else kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; spin_unlock(sch->lock); } while (MACHINE_IS_LPAR && tpi(NULL) != 0); irq_exit(); set_irq_regs(old_regs); }
/* * do_IRQ() handles all normal I/O device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). * */ void do_IRQ (struct pt_regs *regs) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; irq_enter (); asm volatile ("mc 0,0"); if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) /** * Make sure that the i/o interrupt did not "overtake" * the last HZ timer interrupt. */ account_ticks(regs); /* * Get interrupt information from lowcore */ tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; irb = (struct irb *) __LC_IRB; do { kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; /* * Non I/O-subchannel thin interrupts are processed differently */ if (tpi_info->adapter_IO == 1 && tpi_info->int_type == IO_INTERRUPT_TYPE) { do_adapter_IO(); continue; } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (sch) spin_lock(&sch->lock); /* Store interrupt response block to lowcore. */ if (tsch (tpi_info->irq, irb) == 0 && sch) { /* Keep subchannel information word up to date. */ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); /* Call interrupt handler if there is one. */ if (sch->driver && sch->driver->irq) sch->driver->irq(&sch->dev); } if (sch) spin_unlock(&sch->lock); /* * Are more interrupts pending? * If so, the tpi instruction will update the lowcore * to hold the info for the next interrupt. * We don't do this for VM because a tpi drops the cpu * out of the sie which costs more cycles than it saves. */ } while (!MACHINE_IS_VM && tpi (NULL) != 0); irq_exit (); }
static inline int __clear_subchannel_easy(unsigned int schid) { int retry; if (csch(schid)) return -ENODEV; for (retry=0;retry<20;retry++) { struct tpi_info ti; if (tpi(&ti)) { tsch(schid, (struct irb *)__LC_IRB); return 0; } udelay(100); } return -EBUSY; }
/** * cio_enable_subchannel - enable a subchannel. * @sch: subchannel to be enabled * @intparm: interruption parameter to set */ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { char dbf_txt[15]; int ccode; int retry; int ret; CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT(2, dev_name(&sch->dev)); if (sch_is_pseudo_sch(sch)) return -EINVAL; ccode = stsch (sch->schid, &sch->schib); if (ccode) return -ENODEV; for (retry = 5, ret = 0; retry > 0; retry--) { sch->schib.pmcw.ena = 1; sch->schib.pmcw.isc = sch->isc; sch->schib.pmcw.intparm = intparm; ret = cio_modify(sch); if (ret == -ENODEV) break; if (ret == -EIO) /* * Got a program check in cio_modify. Try without * the concurrent sense bit the next time. */ sch->schib.pmcw.csense = 0; if (ret == 0) { stsch (sch->schid, &sch->schib); if (sch->schib.pmcw.ena) break; } if (ret == -EBUSY) { struct irb irb; if (tsch(sch->schid, &irb) != 0) break; } } sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); return ret; }
static int __clear_io_subchannel_easy(struct subchannel_id schid) { int retry; if (csch(schid)) return -ENODEV; for (retry=0;retry<20;retry++) { struct tpi_info ti; if (tpi(&ti)) { tsch(ti.schid, this_cpu_ptr(&cio_irb)); if (schid_equal(&ti.schid, &schid)) return 0; } udelay_simple(100); } return -EBUSY; }
/* * Use cio_tpi to get a pending interrupt and call the interrupt handler. * Return non-zero if an interrupt was processed, zero otherwise. */ static int cio_tpi(void) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; int irq_context; tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; if (tpi(NULL) != 1) return 0; kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; if (tpi_info->adapter_IO) { do_adapter_IO(tpi_info->isc); return 1; } irb = (struct irb *)&S390_lowcore.irb; /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) != 0) { /* Not status pending or not operational. */ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; return 1; } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (!sch) { kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; return 1; } irq_context = in_interrupt(); if (!irq_context) local_bh_disable(); irq_enter(); spin_lock(sch->lock); memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; spin_unlock(sch->lock); irq_exit(); if (!irq_context) _local_bh_enable(); return 1; }
/* * Enable subchannel. */ int cio_enable_subchannel (struct subchannel *sch, unsigned int isc) { char dbf_txt[15]; int ccode; int retry; int ret; CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); ccode = stsch (sch->irq, &sch->schib); if (ccode) return -ENODEV; for (retry = 5, ret = 0; retry > 0; retry--) { sch->schib.pmcw.ena = 1; sch->schib.pmcw.isc = isc; sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; ret = cio_modify(sch); if (ret == -ENODEV) break; if (ret == -EIO) /* * Got a program check in cio_modify. Try without * the concurrent sense bit the next time. */ sch->schib.pmcw.csense = 0; if (ret == 0) { stsch (sch->irq, &sch->schib); if (sch->schib.pmcw.ena) break; } if (ret == -EBUSY) { struct irb irb; if (tsch(sch->irq, &irb) != 0) break; } } sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); return ret; }
/* * cio_commit_config - apply configuration to the subchannel */ int cio_commit_config(struct subchannel *sch) { int ccode, retry, ret = 0; struct schib schib; struct irb irb; if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; for (retry = 0; retry < 5; retry++) { /* copy desired changes to local schib */ cio_apply_config(sch, &schib); ccode = msch(sch->schid, &schib); if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { case 0: /* successful */ if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; if (cio_check_config(sch, &schib)) { /* commit changes from local schib */ memcpy(&sch->schib, &schib, sizeof(schib)); return 0; } ret = -EAGAIN; break; case 1: /* status pending */ ret = -EBUSY; if (tsch(sch->schid, &irb)) return ret; break; case 2: /* busy */ udelay(100); /* allow for recovery */ ret = -EBUSY; break; case 3: /* not operational */ return -ENODEV; } } return ret; }