static int cio_console_irq(void) { int irq; if (console_irq != -1) { /* VM provided us with the irq number of the console. */ if (stsch(console_irq, &console_subchannel.schib) != 0 || !console_subchannel.schib.pmcw.dnv) return -1; console_devno = console_subchannel.schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { if (stsch(irq, &console_subchannel.schib) != 0) break; if (console_subchannel.schib.pmcw.dnv && console_subchannel.schib.pmcw.dev == console_devno) { console_irq = irq; break; } } if (console_irq == -1) return -1; } else { /* unlike in 2.4, we cannot autoprobe here, since * the channel subsystem is not fully initialized. * With some luck, the HWC console can take over */ printk(KERN_WARNING "No ccw console found!\n"); return -1; } return console_irq; }
/* Make sure all subchannels are quiet before we re-ipl an lpar. */ void reipl(unsigned long devno) { unsigned int schid; local_irq_disable(); for (schid=0;schid<=highest_subchannel;schid++) { struct schib schib; if (stsch(schid, &schib)) goto out; if (!schib.pmcw.ena) continue; switch(__disable_subchannel_easy(schid, &schib)) { case 0: case -ENODEV: break; default: /* -EBUSY */ if (__clear_subchannel_easy(schid)) break; /* give up... */ stsch(schid, &schib); __disable_subchannel_easy(schid, &schib); } } out: do_reipl(devno); }
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) { int ret; int retry; struct subchannel *sch; struct schib *schib; sch = to_subchannel(cdev->dev.parent); schib = &sch->schib; /* msch can silently fail, so do it again if necessary */ for (retry = 0; retry < 3; retry++) { /* prepare schib */ stsch(sch->schid, schib); schib->pmcw.mme = mme; schib->pmcw.mbfc = mbfc; /* address can be either a block address or a block index */ if (mbfc) schib->mba = address; else schib->pmcw.mbi = address; /* try to submit it */ switch(ret = msch_err(sch->schid, schib)) { case 0: break; case 1: case 2: /* in I/O or status pending */ ret = -EBUSY; break; case 3: /* subchannel is no longer valid */ ret = -ENODEV; break; default: /* msch caught an exception */ ret = -EINVAL; break; } stsch(sch->schid, schib); /* restore the schib */ if (ret) break; /* check if it worked */ if (schib->pmcw.mme == mme && schib->pmcw.mbfc == mbfc && (mbfc ? (schib->mba == address) : (schib->pmcw.mbi == address))) return 0; ret = -EINVAL; } return ret; }
static int cio_get_console_sch_no(void) { struct subchannel_id schid; init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ schid.sch_no = console_irq; if (stsch(schid, &console_subchannel.schib) != 0 || !console_subchannel.schib.pmcw.dnv) return -1; console_devno = console_subchannel.schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ for_each_subchannel(cio_test_for_console, NULL); if (console_irq == -1) return -1; } else { /* unlike in 2.4, we cannot autoprobe here, since * the channel subsystem is not fully initialized. * With some luck, the HWC console can take over */ printk(KERN_WARNING "No ccw console found!\n"); return -1; } return console_irq; }
int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) { static struct chsc_sda_area sda_area __initdata; struct subchannel_id schid; struct schib schib; schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; if (!schid.one) return -ENODEV; if (schid.ssid) { /* * Firmware should have already enabled MSS but whoever started * the kernel might have initiated a channel subsystem reset. * Ensure that MSS is enabled. */ memset(&sda_area, 0, sizeof(sda_area)); if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS)) return -ENODEV; } if (stsch(schid, &schib)) return -ENODEV; if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) return -ENODEV; if (!schib.pmcw.dnv) return -ENODEV; iplinfo->ssid = schid.ssid; iplinfo->devno = schib.pmcw.dev; iplinfo->is_qdio = schib.pmcw.qf; return 0; }
/* * Function: cio_cancel * Issues a "Cancel Subchannel" on the specified subchannel * Note: We don't need any fancy intparms and flags here * since xsch is executed synchronously. * Only for common I/O internal use as for now. */ int cio_cancel (struct subchannel *sch) { char dbf_txt[15]; int ccode; if (!sch) return -ENODEV; CIO_TRACE_EVENT (2, "cancelIO"); CIO_TRACE_EVENT (2, sch->dev.bus_id); ccode = xsch (sch->irq); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); switch (ccode) { case 0: /* success */ /* Update information in scsw. */ stsch (sch->irq, &sch->schib); return 0; case 1: /* status pending */ return -EBUSY; case 2: /* not applicable */ return -EINVAL; default: /* not oper */ return -ENODEV; } }
static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) { struct schib schib; if (stsch_reset(schid, &schib)) return -ENXIO; if (!schib.pmcw.ena) return 0; switch(__disable_subchannel_easy(schid, &schib)) { case 0: case -ENODEV: break; default: /* -EBUSY */ switch (schib.pmcw.st) { case SUBCHANNEL_TYPE_IO: if (__clear_io_subchannel_easy(schid)) goto out; /* give up... */ break; case SUBCHANNEL_TYPE_CHSC: __clear_chsc_subchannel_easy(); break; default: /* No default clear strategy */ break; } stsch(schid, &schib); __disable_subchannel_easy(schid, &schib); } out: return 0; }
/** * cio_disable_subchannel - disable a subchannel. * @sch: subchannel to disable */ int cio_disable_subchannel(struct subchannel *sch) { char dbf_txt[15]; int ccode; int retry; int ret; CIO_TRACE_EVENT (2, "dissch"); CIO_TRACE_EVENT(2, dev_name(&sch->dev)); if (sch_is_pseudo_sch(sch)) return 0; ccode = stsch (sch->schid, &sch->schib); if (ccode == 3) /* Not operational. */ return -ENODEV; if (scsw_actl(&sch->schib.scsw) != 0) /* * the disable function must not be called while there are * requests pending for completion ! */ return -EBUSY; for (retry = 5, ret = 0; retry > 0; retry--) { sch->schib.pmcw.ena = 0; ret = cio_modify(sch); if (ret == -ENODEV) break; if (ret == -EBUSY) /* * The subchannel is busy or status pending. * We'll disable when the next interrupt was delivered * via the state machine. */ break; if (ret == 0) { stsch (sch->schid, &sch->schib); if (!sch->schib.pmcw.ena) break; } } sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); return ret; }
/** * cio_enable_subchannel - enable a subchannel. * @sch: subchannel to be enabled * @intparm: interruption parameter to set */ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { char dbf_txt[15]; int ccode; int retry; int ret; CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT(2, dev_name(&sch->dev)); if (sch_is_pseudo_sch(sch)) return -EINVAL; ccode = stsch (sch->schid, &sch->schib); if (ccode) return -ENODEV; for (retry = 5, ret = 0; retry > 0; retry--) { sch->schib.pmcw.ena = 1; sch->schib.pmcw.isc = sch->isc; sch->schib.pmcw.intparm = intparm; ret = cio_modify(sch); if (ret == -ENODEV) break; if (ret == -EIO) /* * Got a program check in cio_modify. Try without * the concurrent sense bit the next time. */ sch->schib.pmcw.csense = 0; if (ret == 0) { stsch (sch->schid, &sch->schib); if (sch->schib.pmcw.ena) break; } if (ret == -EBUSY) { struct irb irb; if (tsch(sch->schid, &irb) != 0) break; } } sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); return ret; }
/** * cio_update_schib - Perform stsch and update schib if subchannel is valid. * @sch: subchannel on which to perform stsch * Return zero on success, -ENODEV otherwise. */ int cio_update_schib(struct subchannel *sch) { struct schib schib; if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; memcpy(&sch->schib, &schib, sizeof(schib)); return 0; }
/* * Enable subchannel. */ int cio_enable_subchannel (struct subchannel *sch, unsigned int isc) { char dbf_txt[15]; int ccode; int retry; int ret; CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); ccode = stsch (sch->irq, &sch->schib); if (ccode) return -ENODEV; for (retry = 5, ret = 0; retry > 0; retry--) { sch->schib.pmcw.ena = 1; sch->schib.pmcw.isc = isc; sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; ret = cio_modify(sch); if (ret == -ENODEV) break; if (ret == -EIO) /* * Got a program check in cio_modify. Try without * the concurrent sense bit the next time. */ sch->schib.pmcw.csense = 0; if (ret == 0) { stsch (sch->irq, &sch->schib); if (sch->schib.pmcw.ena) break; } if (ret == -EBUSY) { struct irb irb; if (tsch(sch->irq, &irb) != 0) break; } } sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); return ret; }
/* * cio_commit_config - apply configuration to the subchannel */ int cio_commit_config(struct subchannel *sch) { int ccode, retry, ret = 0; struct schib schib; struct irb irb; if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; for (retry = 0; retry < 5; retry++) { /* copy desired changes to local schib */ cio_apply_config(sch, &schib); ccode = msch(sch->schid, &schib); if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { case 0: /* successful */ if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; if (cio_check_config(sch, &schib)) { /* commit changes from local schib */ memcpy(&sch->schib, &schib, sizeof(schib)); return 0; } ret = -EAGAIN; break; case 1: /* status pending */ ret = -EBUSY; if (tsch(sch->schid, &irb)) return ret; break; case 2: /* busy */ udelay(100); /* allow for recovery */ ret = -EBUSY; break; case 3: /* not operational */ return -ENODEV; } } return ret; }
static int cio_test_for_console(struct subchannel_id schid, void *data) { struct schib schib; if (stsch(schid, &schib) != 0) return -ENXIO; if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && (schib.pmcw.dev == console_devno)) { console_irq = schid.sch_no; return 1; /* found */ } return 0; }
/* * Disable subchannel. */ int cio_disable_subchannel (struct subchannel *sch) { char dbf_txt[15]; int ccode; int retry; int ret; sprintf (dbf_txt, "dissch%x", sch->irq); CIO_TRACE_EVENT (2, dbf_txt); ccode = stsch (sch->irq, &sch->schib); if (ccode == 3) /* Not operational. */ return -ENODEV; if (sch->schib.scsw.actl != 0) /* * the disable function must not be called while there are * requests pending for completion ! */ return -EBUSY; sch->schib.pmcw.ena = 0; for (retry = 5, ret = 0; retry > 0; retry--) { ret = cio_modify(sch); if (ret == -ENODEV) break; if (ret == 0) { stsch (sch->irq, &sch->schib); if (!sch->schib.pmcw.ena) break; } } sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); return ret; }
/* Clear all subchannels. */ void clear_all_subchannels(void) { unsigned int schid; local_irq_disable(); for (schid=0;schid<=highest_subchannel;schid++) { struct schib schib; if (stsch(schid, &schib)) break; /* break out of the loop */ if (!schib.pmcw.ena) continue; switch(__disable_subchannel_easy(schid, &schib)) { case 0: case -ENODEV: break; default: /* -EBUSY */ if (__clear_subchannel_easy(schid)) break; /* give up... jump out of switch */ stsch(schid, &schib); __disable_subchannel_easy(schid, &schib); } } }
static int stsch_reset(struct subchannel_id schid, struct schib *addr) { int rc; pgm_check_occured = 0; s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; rc = stsch(schid, addr); s390_base_pgm_handler_fn = NULL; /* The program check handler could have changed pgm_check_occured. */ barrier(); if (pgm_check_occured) return -EIO; else return rc; }
static inline int __disable_subchannel_easy(unsigned int schid, struct schib *schib) { int retry, cc; cc = 0; for (retry=0;retry<3;retry++) { schib->pmcw.ena = 0; cc = msch(schid, schib); if (cc) return (cc==3?-ENODEV:-EBUSY); stsch(schid, schib); if (!schib->pmcw.ena) return 0; } return -EBUSY; /* uhm... */ }
static inline int css_get_subchannel_status(struct subchannel *sch, int schid) { struct schib schib; int cc; cc = stsch(schid, &schib); if (cc) return CIO_GONE; if (!schib.pmcw.dnv) return CIO_GONE; if (sch && sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) return CIO_REVALIDATE; if (sch && !sch->lpm) return CIO_NO_PATH; return CIO_OPER; }
static int __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { int retry, cc; cc = 0; for (retry=0;retry<3;retry++) { schib->pmcw.ena = 0; cc = msch(schid, schib); if (cc) return (cc==3?-ENODEV:-EBUSY); if (stsch(schid, schib) || !css_sch_is_valid(schib)) return -ENODEV; if (!schib->pmcw.ena) return 0; } return -EBUSY; /* uhm... */ }
static int cio_get_console_sch_no(void) { struct subchannel_id schid; struct schib schib; init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ schid.sch_no = console_irq; if (stsch(schid, &schib) != 0 || (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv) return -1; console_devno = schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ for_each_subchannel(cio_test_for_console, NULL); } return console_irq; }
/** * cio_validate_subchannel - basic validation of subchannel * @sch: subchannel structure to be filled out * @schid: subchannel id * * Find out subchannel type and initialize struct subchannel. * Return codes: * 0 on success * -ENXIO for non-defined subchannels * -ENODEV for invalid subchannels or blacklisted devices * -EIO for subchannels in an invalid subchannel set */ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; int err; sprintf(dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT(4, dbf_txt); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. * If stsch gets an exception, it means the current subchannel set * is not valid. */ ccode = stsch(schid, &sch->schib); if (ccode) { err = (ccode == 3) ? -ENXIO : ccode; goto out; } sch->st = sch->schib.pmcw.st; sch->schid = schid; switch (sch->st) { case SUBCHANNEL_TYPE_IO: case SUBCHANNEL_TYPE_MSG: if (!css_sch_is_valid(&sch->schib)) err = -ENODEV; else err = cio_check_devno_blacklisted(sch); break; default: err = 0; } if (err) goto out; CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", sch->schid.ssid, sch->schid.sch_no, sch->st); out: return err; }
static inline int cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) { char dbf_text[15]; if (lpm != 0) sch->lpm &= ~lpm; else sch->lpm = 0; stsch (sch->irq, &sch->schib); CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " "subchannel %04x!\n", sch->irq); sprintf(dbf_text, "no%s", sch->dev.bus_id); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); return (sch->lpm ? -EACCES : -ENODEV); }
static int cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) { char dbf_text[15]; if (lpm != 0) sch->lpm &= ~lpm; else sch->lpm = 0; stsch (sch->schid, &sch->schib); CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " "subchannel 0.%x.%04x!\n", sch->schid.ssid, sch->schid.sch_no); sprintf(dbf_text, "no%s", dev_name(&sch->dev)); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); return (sch->lpm ? -EACCES : -ENODEV); }
static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) { struct schib schib; if (stsch_err(schid, &schib)) return -ENXIO; if (!schib.pmcw.ena) return 0; switch(__disable_subchannel_easy(schid, &schib)) { case 0: case -ENODEV: break; default: /* -EBUSY */ if (__clear_subchannel_easy(schid)) break; /* give up... */ stsch(schid, &schib); __disable_subchannel_easy(schid, &schib); } return 0; }
/* * cio_validate_subchannel() * * Find out subchannel type and initialize struct subchannel. * Return codes: * SUBCHANNEL_TYPE_IO for a normal io subchannel * SUBCHANNEL_TYPE_CHSC for a chsc subchannel * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel * -ENXIO for non-defined subchannels * -ENODEV for subchannels with invalid device number or blacklisted devices */ int cio_validate_subchannel (struct subchannel *sch, unsigned int irq) { char dbf_txt[15]; int ccode; sprintf (dbf_txt, "valsch%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* Nuke all fields. */ memset(sch, 0, sizeof(struct subchannel)); spin_lock_init(&sch->lock); /* Set a name for the subchannel */ snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. */ sch->irq = irq; ccode = stsch (irq, &sch->schib); if (ccode) return -ENXIO; /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; /* * ... just being curious we check for non I/O subchannels */ if (sch->st != 0) { CIO_DEBUG(KERN_INFO, 0, "Subchannel %04X reports " "non-I/O subchannel type %04X\n", sch->irq, sch->st); /* We stop here for non-io subchannels. */ return sch->st; } /* Initialization for io subchannels. */ if (!sch->schib.pmcw.dnv) /* io subchannel but device number is invalid. */ return -ENODEV; /* Devno is valid. */ if (is_blacklisted (sch->schib.pmcw.dev)) { /* * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ CIO_MSG_EVENT(0, "Blacklisted device detected " "at devno %04X\n", sch->schib.pmcw.dev); return -ENODEV; } sch->opm = 0xff; chsc_validate_chpids(sch); sch->lpm = sch->schib.pmcw.pim & sch->schib.pmcw.pam & sch->schib.pmcw.pom & sch->opm; CIO_DEBUG(KERN_INFO, 0, "Detected device %04X on subchannel %04X" " - PIM = %02X, PAM = %02X, POM = %02X\n", sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); /* * We now have to initially ... * ... set "interruption subclass" * ... enable "concurrent sense" * ... enable "multipath mode" if more than one * CHPID is available. This is done regardless * whether multiple paths are available for us. */ sch->schib.pmcw.isc = 3; /* could be smth. else */ sch->schib.pmcw.csense = 1; /* concurrent sense */ sch->schib.pmcw.ena = 0; if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; /* multipath mode */ return 0; }
} __setup("ccw_timeout_log", ccw_timeout_log_setup); static void ccw_timeout_log(struct ccw_device *cdev) { struct schib schib; struct subchannel *sch; struct io_subchannel_private *private; union orb *orb; int cc; sch = to_subchannel(cdev->dev.parent); private = to_io_private(sch); orb = &private->orb; cc = stsch(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " "device information:\n", get_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, orb, sizeof(*orb), 0); printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); if (orb->tm.b) { printk(KERN_WARNING "cio: orb indicates transport mode\n"); printk(KERN_WARNING "cio: last tcw:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,