/* * Set the number of lines for the PORTS mux. This will add or remove * cards as necessary. The number of lines must be a multiple of 4. */ t_stat ports_setnl(UNIT *uptr, int32 val, CONST char *cptr, void *desc) { int32 newln, i, t; t_stat r = SCPE_OK; if (cptr == NULL) { return SCPE_ARG; } newln = (int32) get_uint(cptr, 10, (MAX_PORTS_CARDS * PORTS_LINES), &r); if ((r != SCPE_OK) || (newln == ports_desc.lines)) { return r; } if ((newln == 0) || LPORT(newln) != 0) { return SCPE_ARG; } if (newln < ports_desc.lines) { for (i = newln, t = 0; i < ports_desc.lines; i++) { t = t | ports_ldsc[i].conn; } if (t && !get_yn("This will disconnect users; proceed [N]?", FALSE)) { return SCPE_OK; } for (i = newln; i < ports_desc.lines; i++) { if (ports_ldsc[i].conn) { tmxr_linemsg(&ports_ldsc[i], "\r\nOperator disconnected line\r\n"); tmxr_send_buffered_data(&ports_ldsc[i]); } /* completely reset line */ tmxr_detach_ln(&ports_ldsc[i]); if (LPORT(i) == (PORTS_LINES - 1)) { /* Also drop the corresponding card from the CIO array */ cio_clear(LCID(i)); } } } ports_desc.ldsc = ports_ldsc = (TMLN *)realloc(ports_ldsc, newln*sizeof(*ports_ldsc)); ports_state = (PORTS_LINE_STATE *)realloc(ports_state, newln*sizeof(*ports_state)); if (ports_desc.lines < newln) { memset(ports_ldsc + ports_desc.lines, 0, sizeof(*ports_ldsc)*(newln-ports_desc.lines)); memset(ports_state + ports_desc.lines, 0, sizeof(*ports_state)*(newln-ports_desc.lines)); } ports_desc.lines = newln; /* setup lines and auto config */ ports_conf = FALSE; return ports_reset(&ports_dev); }
/** * ccw_request_cancel - cancel running I/O request * @cdev: ccw device * * Cancel the I/O request specified by cdev->req. Return non-zero if request * has already finished, zero otherwise. */ int ccw_request_cancel(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; int rc; if (req->done) return 1; req->cancel = 1; rc = cio_clear(sch); if (rc) ccwreq_stop(cdev, rc); return 0; }
/* * (Re-)Start the operation until retries and paths are exhausted. */ static void ccwreq_do(struct ccw_device *cdev) { struct ccw_request *req = &cdev->private->req; struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw1 *cp = req->cp; int rc = -EACCES; while (req->mask) { if (req->retries-- == 0) { /* Retries exhausted, try next path. */ ccwreq_next_path(cdev); continue; } /* Perform start function. */ memset(&cdev->private->irb, 0, sizeof(struct irb)); rc = cio_start(sch, cp, (u8) req->mask); if (rc == 0) { /* I/O started successfully. */ ccw_device_set_timeout(cdev, req->timeout); return; } if (rc == -ENODEV) { /* Permanent device error. */ break; } if (rc == -EACCES) { /* Permant path error. */ ccwreq_next_path(cdev); continue; } /* Temporary improper status. */ rc = cio_clear(sch); if (rc) break; return; } ccwreq_stop(cdev, rc); }
static void ccwreq_do(struct ccw_device *cdev) { struct ccw_request *req = &cdev->private->req; struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw1 *cp = req->cp; int rc = -EACCES; while (req->mask) { if (req->retries-- == 0) { ccwreq_next_path(cdev); continue; } memset(&cdev->private->irb, 0, sizeof(struct irb)); rc = cio_start(sch, cp, (u8) req->mask); if (rc == 0) { ccw_device_set_timeout(cdev, req->timeout); return; } if (rc == -ENODEV) { break; } if (rc == -EACCES) { ccwreq_next_path(cdev); continue; } rc = cio_clear(sch); if (rc) break; return; } ccwreq_stop(cdev, rc); }