/** * chsc_scm_info() - store SCM information (SSI) * @scm_area: request and response block for SSI * @token: continuation token * * Returns 0 on success. */ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) { int ccode, ret; memset(scm_area, 0, sizeof(*scm_area)); scm_area->request.length = 0x0020; scm_area->request.code = 0x004C; scm_area->reqtok = token; ccode = chsc(scm_area); if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out; } ret = chsc_error_from_response(scm_area->response.code); if (ret != 0) CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", scm_area->response.code); out: return ret; }
static inline int cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) { char dbf_text[15]; if (lpm != 0) sch->lpm &= ~lpm; else sch->lpm = 0; stsch (sch->schid, &sch->schib); CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " "subchannel 0.%x.%04x!\n", sch->schid.ssid, sch->schid.sch_no); sprintf(dbf_text, "no%s", sch->dev.bus_id); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); return (sch->lpm ? -EACCES : -ENODEV); }
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) { struct schib schib; if (!slow) { /* Will be done on the slow path. */ return -EAGAIN; } if (stsch_err(schid, &schib)) { /* Subchannel is not provided. */ return -ENXIO; } if (!css_sch_is_valid(&schib)) { /* Unusable - ignore. */ return 0; } CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, schid.sch_no); return css_probe_device(schid); }
int css_register_subchannel(struct subchannel *sch) { int ret; /* Initialize the subchannel structure */ sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; sch->dev.groups = default_subch_attr_groups; /* * We don't want to generate uevents for I/O subchannels that don't * have a working ccw device behind them since they will be * unregistered before they can be used anyway, so we delay the add * uevent until after device recognition was successful. * Note that we suppress the uevent for all subchannel types; * the subchannel driver can decide itself when it wants to inform * userspace of its existence. */ dev_set_uevent_suppress(&sch->dev, 1); css_update_ssd_info(sch); /* make it known to the system */ ret = css_sch_device_register(sch); if (ret) { CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", sch->schid.ssid, sch->schid.sch_no, ret); return ret; } if (!sch->driver) { /* * No driver matched. Generate the uevent now so that * a fitting driver module may be loaded based on the * modalias. */ dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); } return ret; }
/** * chp_new - register a new channel-path * @chpid - channel-path ID * * Create and register data structure representing new channel-path. Return * zero on success, non-zero otherwise. */ int chp_new(struct chp_id chpid) { struct channel_path *chp; int ret; if (chp_is_registered(chpid)) return 0; chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); if (!chp) return -ENOMEM; /* fill in status, etc. */ chp->chpid = chpid; chp->state = 1; chp->dev.parent = &channel_subsystems[chpid.cssid]->device; chp->dev.groups = chp_attr_groups; chp->dev.release = chp_release; mutex_init(&chp->lock); /* Obtain channel path description and fill it in. */ ret = chp_update_desc(chp); if (ret) goto out_free; if ((chp->desc.flags & 0x80) == 0) { ret = -ENODEV; goto out_free; } /* Get channel-measurement characteristics. */ if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) { ret = chsc_get_channel_measurement_chars(chp); if (ret) goto out_free; } else { chp->cmg = -1; } dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id); /* make it known to the system */ ret = device_register(&chp->dev); if (ret) { CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n", chpid.cssid, chpid.id, ret); put_device(&chp->dev); goto out; } mutex_lock(&channel_subsystems[chpid.cssid]->mutex); if (channel_subsystems[chpid.cssid]->cm_enabled) { ret = chp_add_cmg_attr(chp); if (ret) { device_unregister(&chp->dev); mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); goto out; } } channel_subsystems[chpid.cssid]->chps[chpid.id] = chp; mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); goto out; out_free: kfree(chp); out: return ret; }
/* Parsing the commandline for blacklist parameters, e.g. to blacklist * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of: * - cio_ignore=1234-1236 * - cio_ignore=0x1234-0x1235,1236 * - cio_ignore=0x1234,1235-1236 * - cio_ignore=1236 cio_ignore=1234-0x1236 * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235 * - cio_ignore=0.0.1234-0.0.1236 * - cio_ignore=0.0.1234,0x1235,1236 * - ... */ static int __init blacklist_setup (char *str) { CIO_MSG_EVENT(6, "Reading blacklist parameters\n"); return blacklist_parse_parameters (str, add); }
/* * cio_validate_subchannel() * * Find out subchannel type and initialize struct subchannel. * Return codes: * SUBCHANNEL_TYPE_IO for a normal io subchannel * SUBCHANNEL_TYPE_CHSC for a chsc subchannel * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel * -ENXIO for non-defined subchannels * -ENODEV for subchannels with invalid device number or blacklisted devices */ int cio_validate_subchannel (struct subchannel *sch, unsigned int irq) { char dbf_txt[15]; int ccode; sprintf (dbf_txt, "valsch%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* Nuke all fields. */ memset(sch, 0, sizeof(struct subchannel)); spin_lock_init(&sch->lock); /* Set a name for the subchannel */ snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. */ sch->irq = irq; ccode = stsch (irq, &sch->schib); if (ccode) return -ENXIO; /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; /* * ... just being curious we check for non I/O subchannels */ if (sch->st != 0) { CIO_DEBUG(KERN_INFO, 0, "Subchannel %04X reports " "non-I/O subchannel type %04X\n", sch->irq, sch->st); /* We stop here for non-io subchannels. */ return sch->st; } /* Initialization for io subchannels. */ if (!sch->schib.pmcw.dnv) /* io subchannel but device number is invalid. */ return -ENODEV; /* Devno is valid. */ if (is_blacklisted (sch->schib.pmcw.dev)) { /* * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ CIO_MSG_EVENT(0, "Blacklisted device detected " "at devno %04X\n", sch->schib.pmcw.dev); return -ENODEV; } sch->opm = 0xff; chsc_validate_chpids(sch); sch->lpm = sch->schib.pmcw.pim & sch->schib.pmcw.pam & sch->schib.pmcw.pom & sch->opm; CIO_DEBUG(KERN_INFO, 0, "Detected device %04X on subchannel %04X" " - PIM = %02X, PAM = %02X, POM = %02X\n", sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); /* * We now have to initially ... * ... set "interruption subclass" * ... enable "concurrent sense" * ... enable "multipath mode" if more than one * CHPID is available. This is done regardless * whether multiple paths are available for us. */ sch->schib.pmcw.isc = 3; /* could be smth. else */ sch->schib.pmcw.csense = 1; /* concurrent sense */ sch->schib.pmcw.ena = 0; if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; /* multipath mode */ return 0; }
/** * cio_validate_subchannel - basic validation of subchannel * @sch: subchannel structure to be filled out * @schid: subchannel id * * Find out subchannel type and initialize struct subchannel. * Return codes: * 0 on success * -ENXIO for non-defined subchannels * -ENODEV for invalid subchannels or blacklisted devices * -EIO for subchannels in an invalid subchannel set */ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; int err; sprintf(dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT(4, dbf_txt); /* Nuke all fields. */ memset(sch, 0, sizeof(struct subchannel)); sch->schid = schid; if (cio_is_console(schid)) { sch->lock = cio_get_console_lock(); } else { err = cio_create_sch_lock(sch); if (err) goto out; } mutex_init(&sch->reg_mutex); /* Set a name for the subchannel */ if (cio_is_console(schid)) sch->dev.init_name = cio_get_console_sch_name(schid); else dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. * If stsch gets an exception, it means the current subchannel set * is not valid. */ ccode = stsch_err (schid, &sch->schib); if (ccode) { err = (ccode == 3) ? -ENXIO : ccode; goto out; } /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; switch (sch->st) { case SUBCHANNEL_TYPE_IO: err = cio_validate_io_subchannel(sch); break; case SUBCHANNEL_TYPE_MSG: err = cio_validate_msg_subchannel(sch); break; default: err = 0; } if (err) goto out; CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", sch->schid.ssid, sch->schid.sch_no, sch->st); return 0; out: if (!cio_is_console(schid)) kfree(sch->lock); sch->lock = NULL; return err; }
static int css_evaluate_subchannel(int irq, int slow) { int event, ret, disc; struct subchannel *sch; unsigned long flags; sch = get_subchannel_by_schid(irq); disc = sch ? device_is_disconnected(sch) : 0; if (disc && slow) { if (sch) put_device(&sch->dev); return 0; /* Already processed. */ } /* * We've got a machine check, so running I/O won't get an interrupt. * Kill any pending timers. */ if (sch) device_kill_pending_timer(sch); if (!disc && !slow) { if (sch) put_device(&sch->dev); return -EAGAIN; /* Will be done on the slow path. */ } event = css_get_subchannel_status(sch, irq); CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", irq, event, sch?(disc?"disconnected":"normal"):"unknown", slow?"slow":"fast"); switch (event) { case CIO_NO_PATH: case CIO_GONE: if (!sch) { /* Never used this subchannel. Ignore. */ ret = 0; break; } if (disc && (event == CIO_NO_PATH)) { /* * Uargh, hack again. Because we don't get a machine * check on configure on, our path bookkeeping can * be out of date here (it's fine while we only do * logical varying or get chsc machine checks). We * need to force reprobing or we might miss devices * coming operational again. It won't do harm in real * no path situations. */ spin_lock_irqsave(&sch->lock, flags); device_trigger_reprobe(sch); spin_unlock_irqrestore(&sch->lock, flags); ret = 0; break; } if (sch->driver && sch->driver->notify && sch->driver->notify(&sch->dev, event)) { cio_disable_subchannel(sch); device_set_disconnected(sch); ret = 0; break; } /* * Unregister subchannel. * The device will be killed automatically. */ cio_disable_subchannel(sch); device_unregister(&sch->dev); /* Reset intparm to zeroes. */ sch->schib.pmcw.intparm = 0; cio_modify(sch); put_device(&sch->dev); ret = 0; break; case CIO_REVALIDATE: /* * Revalidation machine check. Sick. * We don't notify the driver since we have to throw the device * away in any case. */ if (!disc) { device_unregister(&sch->dev); /* Reset intparm to zeroes. */ sch->schib.pmcw.intparm = 0; cio_modify(sch); put_device(&sch->dev); ret = css_probe_device(irq); } else { /* * We can't immediately deregister the disconnected * device since it might block. */ spin_lock_irqsave(&sch->lock, flags); device_trigger_reprobe(sch); spin_unlock_irqrestore(&sch->lock, flags); ret = 0; } break; case CIO_OPER: if (disc) { spin_lock_irqsave(&sch->lock, flags); /* Get device operational again. */ device_trigger_reprobe(sch); spin_unlock_irqrestore(&sch->lock, flags); } ret = sch ? 0 : css_probe_device(irq); break; default: BUG(); ret = 0; } return ret; }