static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) { struct channel_path *chp; struct chp_id chpid; u8 *data; int num; CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); if (sei_area->rs != 0) return; data = sei_area->ccdf; chp_id_init(&chpid); for (num = 0; num <= __MAX_CHPID; num++) { if (!chp_test_bit(data, num)) continue; chpid.id = num; CIO_CRW_EVENT(4, "Update information for channel path " "%x.%02x\n", chpid.cssid, chpid.id); chp = chpid_to_chp(chpid); if (!chp) { chp_new(chpid); continue; } mutex_lock(&chp->lock); chsc_determine_base_channel_path_desc(chpid, &chp->desc); mutex_unlock(&chp->lock); } }
static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) { struct chp_config_data *data; struct chp_id chpid; int num; char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); if (sei_area->rs != 0) return; data = (struct chp_config_data *) &(sei_area->ccdf); chp_id_init(&chpid); for (num = 0; num <= __MAX_CHPID; num++) { if (!chp_test_bit(data->map, num)) continue; chpid.id = num; pr_notice("Processing %s for channel path %x.%02x\n", events[data->op], chpid.cssid, chpid.id); switch (data->op) { case 0: chp_cfg_schedule(chpid, 1); break; case 1: chp_cfg_schedule(chpid, 0); break; case 2: chp_cfg_cancel_deconfigure(chpid); break; } } }
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) { struct chp_config_data *data; struct chp_id chpid; int num; CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); if (sei_area->rs != 0) return; data = (struct chp_config_data *) &(sei_area->ccdf); chp_id_init(&chpid); for (num = 0; num <= __MAX_CHPID; num++) { if (!chp_test_bit(data->map, num)) continue; chpid.id = num; printk(KERN_WARNING "cio: processing configure event %d for " "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); switch (data->op) { case 0: chp_cfg_schedule(chpid, 1); break; case 1: chp_cfg_schedule(chpid, 0); break; case 2: chp_cfg_cancel_deconfigure(chpid); break; } } }
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) { struct chp_link link; struct chp_id chpid; int status; CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); if (sei_area->rs != 4) return; chp_id_init(&chpid); chpid.id = sei_area->rsid; /* allocate a new channel path structure, if needed */ status = chp_get_status(chpid); if (status < 0) chp_new(chpid); else if (!status) return; memset(&link, 0, sizeof(struct chp_link)); link.chpid = chpid; if ((sei_area->vf & 0xc0) != 0) { link.fla = sei_area->fla; if ((sei_area->vf & 0xc0) == 0xc0) /* full link address */ link.fla_mask = 0xffff; else /* link address */ link.fla_mask = 0xff00; } s390_process_res_acc(&link); }
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) { unsigned long page; struct chsc_ssd_area *ssd_area; int ccode; int ret; int i; int mask; page = get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!page) return -ENOMEM; ssd_area = (struct chsc_ssd_area *) page; ssd_area->request.length = 0x0010; ssd_area->request.code = 0x0004; ssd_area->ssid = schid.ssid; ssd_area->f_sch = schid.sch_no; ssd_area->l_sch = schid.sch_no; ccode = chsc(ssd_area); /* Check response. */ if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out_free; } ret = chsc_error_from_response(ssd_area->response.code); if (ret != 0) { CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", schid.ssid, schid.sch_no, ssd_area->response.code); goto out_free; } if (!ssd_area->sch_valid) { ret = -ENODEV; goto out_free; } /* Copy data */ ret = 0; memset(ssd, 0, sizeof(struct chsc_ssd_info)); if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && (ssd_area->st != SUBCHANNEL_TYPE_MSG)) goto out_free; ssd->path_mask = ssd_area->path_mask; ssd->fla_valid_mask = ssd_area->fla_valid_mask; for (i = 0; i < 8; i++) { mask = 0x80 >> i; if (ssd_area->path_mask & mask) { chp_id_init(&ssd->chpid[i]); ssd->chpid[i].id = ssd_area->chpid[i]; } if (ssd_area->fla_valid_mask & mask) ssd->fla[i] = ssd_area->fla[i]; } out_free: free_page(page); return ret; }
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) { struct chsc_ssd_area *ssd_area; unsigned long flags; int ccode; int ret; int i; int mask; spin_lock_irqsave(&chsc_page_lock, flags); memset(chsc_page, 0, PAGE_SIZE); ssd_area = chsc_page; ssd_area->request.length = 0x0010; ssd_area->request.code = 0x0004; ssd_area->ssid = schid.ssid; ssd_area->f_sch = schid.sch_no; ssd_area->l_sch = schid.sch_no; ccode = chsc(ssd_area); /* Check response. */ if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out; } ret = chsc_error_from_response(ssd_area->response.code); if (ret != 0) { CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", schid.ssid, schid.sch_no, ssd_area->response.code); goto out; } if (!ssd_area->sch_valid) { ret = -ENODEV; goto out; } /* Copy data */ ret = 0; memset(ssd, 0, sizeof(struct chsc_ssd_info)); if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && (ssd_area->st != SUBCHANNEL_TYPE_MSG)) goto out; ssd->path_mask = ssd_area->path_mask; ssd->fla_valid_mask = ssd_area->fla_valid_mask; for (i = 0; i < 8; i++) { mask = 0x80 >> i; if (ssd_area->path_mask & mask) { chp_id_init(&ssd->chpid[i]); ssd->chpid[i].id = ssd_area->chpid[i]; } if (ssd_area->fla_valid_mask & mask) ssd->fla[i] = ssd_area->fla[i]; } out: spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; }
/** * chp_process_crw - process channel-path status change * @id: channel-path ID number * @status: non-zero if channel-path has become available, zero otherwise * * Handle channel-report-words indicating that the status of a channel-path * has changed. */ void chp_process_crw(int id, int status) { struct chp_id chpid; chp_id_init(&chpid); chpid.id = id; if (status) { if (!chp_is_registered(chpid)) chp_new(chpid); chsc_chp_online(chpid); } else chsc_chp_offline(chpid); }
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) { int i; int mask; memset(ssd, 0, sizeof(struct chsc_ssd_info)); ssd->path_mask = pmcw->pim; for (i = 0; i < 8; i++) { mask = 0x80 >> i; if (pmcw->pim & mask) { chp_id_init(&ssd->chpid[i]); ssd->chpid[i].id = pmcw->chpid[i]; } } }
/** * chp_get_sch_opm - return opm for subchannel * @sch: subchannel * * Calculate and return the operational path mask (opm) based on the chpids * used by the subchannel and the status of the associated channel-paths. */ u8 chp_get_sch_opm(struct subchannel *sch) { struct chp_id chpid; int opm; int i; opm = 0; chp_id_init(&chpid); for (i = 0; i < 8; i++) { opm <<= 1; chpid.id = sch->schib.pmcw.chpid[i]; if (chp_get_status(chpid) != 0) opm |= 1; } return opm; }
static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) { struct chp_id chpid; int id; CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", sei_area->rs, sei_area->rsid); if (sei_area->rs != 4) return; id = __get_chpid_from_lir(sei_area->ccdf); if (id < 0) CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); else { chp_id_init(&chpid); chpid.id = id; chsc_chp_offline(chpid); } }
/** * chp_process_crw - process channel-path status change * @crw0: channel report-word to handler * @crw1: second channel-report word (always NULL) * @overflow: crw overflow indication * * Handle channel-report-words indicating that the status of a channel-path * has changed. */ static void chp_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chp_id chpid; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); /* * Check for solicited machine checks. These are * created by reset channel path and need not be * handled here. */ if (crw0->slct) { CIO_CRW_EVENT(2, "solicited machine check for " "channel path %02X\n", crw0->rsid); return; } chp_id_init(&chpid); chpid.id = crw0->rsid; switch (crw0->erc) { case CRW_ERC_IPARM: /* Path has come. */ if (!chp_is_registered(chpid)) chp_new(chpid); chsc_chp_online(chpid); break; case CRW_ERC_PERRI: /* Path has gone. */ case CRW_ERC_PERRN: chsc_chp_offline(chpid); break; default: CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n", crw0->erc); } }
static void css_reset(void) { int i, ret; unsigned long long timeout; struct chp_id chpid; /* Reset subchannels. */ for_each_subchannel(__shutdown_subchannel_easy, NULL); /* Reset channel paths. */ s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; /* Enable channel report machine checks. */ __ctl_set_bit(14, 28); /* Temporarily reenable machine checks. */ local_mcck_enable(); chp_id_init(&chpid); for (i = 0; i <= __MAX_CHPID; i++) { chpid.id = i; ret = rchp(chpid); if ((ret == 0) || (ret == 2)) /* * rchp either succeeded, or another rchp is already * in progress. In either case, we'll get a crw. */ atomic_inc(&chpid_reset_count); } /* Wait for machine check for all channel paths. */ timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12); while (atomic_read(&chpid_reset_count) != 0) { if (get_tod_clock_fast() > timeout) break; cpu_relax(); } /* Disable machine checks again. */ local_mcck_disable(); /* Disable channel report machine checks. */ __ctl_clear_bit(14, 28); s390_base_mcck_handler_fn = NULL; }