static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct subchannel *sch = to_subchannel(dev); return sprintf(buf, "css:t%01X\n", sch->st); }
static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct subchannel *sch = to_subchannel(dev); return sprintf(buf, "%01x\n", sch->st); }
static int __unset_registered(struct device *dev, void *data) { struct idset *set = data; struct subchannel *sch = to_subchannel(dev); idset_sch_del(set, sch->schid); return 0; }
static int check_subchannel(struct device * dev, void * data) { struct subchannel *sch; int irq = (unsigned long)data; sch = to_subchannel(dev); return (sch->irq == irq); }
static int check_subchannel(struct device * dev, void * data) { struct subchannel *sch; struct subchannel_id *schid = data; sch = to_subchannel(dev); return schid_equal(&sch->schid, schid); }
static void css_subchannel_release(struct device *dev) { struct subchannel *sch; sch = to_subchannel(dev); if (!cio_is_console(sch->irq)) kfree(sch); }
static void css_subchannel_release(struct device *dev) { struct subchannel *sch = to_subchannel(dev); sch->config.intparm = 0; cio_commit_config(sch); kfree(sch->lock); kfree(sch); }
struct subchannel * get_subchannel_by_schid(struct subchannel_id schid) { struct device *dev; dev = bus_find_device(&css_bus_type, NULL, (void *)&schid, check_subchannel); return dev ? to_subchannel(dev) : NULL; }
struct subchannel * get_subchannel_by_schid(int irq) { struct device *dev; dev = bus_find_device(&css_bus_type, NULL, (void *)(unsigned long)irq, check_subchannel); return dev ? to_subchannel(dev) : NULL; }
static void ccw_timeout_log(struct ccw_device *cdev) { struct schib schib; struct subchannel *sch; struct io_subchannel_private *private; union orb *orb; int cc; sch = to_subchannel(cdev->dev.parent); private = to_io_private(sch);
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) { int ret; int retry; struct subchannel *sch; struct schib *schib; sch = to_subchannel(cdev->dev.parent); schib = &sch->schib; /* msch can silently fail, so do it again if necessary */ for (retry = 0; retry < 3; retry++) { /* prepare schib */ stsch(sch->schid, schib); schib->pmcw.mme = mme; schib->pmcw.mbfc = mbfc; /* address can be either a block address or a block index */ if (mbfc) schib->mba = address; else schib->pmcw.mbi = address; /* try to submit it */ switch(ret = msch_err(sch->schid, schib)) { case 0: break; case 1: case 2: /* in I/O or status pending */ ret = -EBUSY; break; case 3: /* subchannel is no longer valid */ ret = -ENODEV; break; default: /* msch caught an exception */ ret = -EINVAL; break; } stsch(sch->schid, schib); /* restore the schib */ if (ret) break; /* check if it worked */ if (schib->pmcw.mme == mme && schib->pmcw.mbfc == mbfc && (mbfc ? (schib->mba == address) : (schib->pmcw.mbi == address))) return 0; ret = -EINVAL; } return ret; }
static int call_fn_known_sch(struct device *dev, void *data) { struct subchannel *sch = to_subchannel(dev); struct cb_data *cb = data; int rc = 0; if (cb->set) idset_sch_del(cb->set, sch->schid); if (cb->fn_known_sch) rc = cb->fn_known_sch(sch, cb->data); return rc; }
/** * ccw_request_cancel - cancel running I/O request * @cdev: ccw device * * Cancel the I/O request specified by cdev->req. Return non-zero if request * has already finished, zero otherwise. */ int ccw_request_cancel(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; int rc; if (req->done) return 1; req->cancel = 1; rc = cio_clear(sch); if (rc) ccwreq_stop(cdev, rc); return 0; }
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) { struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); sch->config.mme = mme; sch->config.mbfc = mbfc; /* address can be either a block address or a block index */ if (mbfc) sch->config.mba = address; else sch->config.mbi = address; return cio_commit_config(sch); }
/* * (Re-)Start the operation until retries and paths are exhausted. */ static void ccwreq_do(struct ccw_device *cdev) { struct ccw_request *req = &cdev->private->req; struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw1 *cp = req->cp; int rc = -EACCES; while (req->mask) { if (req->retries-- == 0) { /* Retries exhausted, try next path. */ ccwreq_next_path(cdev); continue; } /* Perform start function. */ memset(&cdev->private->irb, 0, sizeof(struct irb)); rc = cio_start(sch, cp, (u8) req->mask); if (rc == 0) { /* I/O started successfully. */ ccw_device_set_timeout(cdev, req->timeout); return; } if (rc == -ENODEV) { /* Permanent device error. */ break; } if (rc == -EACCES) { /* Permant path error. */ ccwreq_next_path(cdev); continue; } /* Temporary improper status. */ rc = cio_clear(sch); if (rc) break; return; } ccwreq_stop(cdev, rc); }
static void ccwreq_do(struct ccw_device *cdev) { struct ccw_request *req = &cdev->private->req; struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw1 *cp = req->cp; int rc = -EACCES; while (req->mask) { if (req->retries-- == 0) { ccwreq_next_path(cdev); continue; } memset(&cdev->private->irb, 0, sizeof(struct irb)); rc = cio_start(sch, cp, (u8) req->mask); if (rc == 0) { ccw_device_set_timeout(cdev, req->timeout); return; } if (rc == -ENODEV) { break; } if (rc == -EACCES) { ccwreq_next_path(cdev); continue; } rc = cio_clear(sch); if (rc) break; return; } ccwreq_stop(cdev, rc); }
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) { struct subchannel *sch = to_subchannel(cdev->dev.parent); int ret; sch->config.mme = mme; sch->config.mbfc = mbfc; /* address can be either a block address or a block index */ if (mbfc) sch->config.mba = address; else sch->config.mbi = address; ret = cio_commit_config(sch); if (!mme && ret == -ENODEV) { /* * The task was to disable measurement block updates but * the subchannel is already gone. Report success. */ ret = 0; } return ret; }