static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) { /* which kind of information was stored? */ switch (sei_area->cc) { case 1: /* link incident*/ chsc_process_sei_link_incident(sei_area); break; case 2: /* i/o resource accessibility */ chsc_process_sei_res_acc(sei_area); break; case 7: /* channel-path-availability information */ chsc_process_sei_chp_avail(sei_area); break; case 8: /* channel-path-configuration notification */ chsc_process_sei_chp_config(sei_area); break; case 12: /* scm change notification */ chsc_process_sei_scm_change(sei_area); break; case 14: /* scm available notification */ chsc_process_sei_scm_avail(sei_area); break; default: /* other stuff */ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); break; } /* Check if we might have lost some information. */ if (sei_area->flags & 0x40) { CIO_CRW_EVENT(2, "chsc: event overflow\n"); css_schedule_eval_all(); } }
static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) { struct channel_path *chp; struct chp_id chpid; u8 *data; int num; CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); if (sei_area->rs != 0) return; data = sei_area->ccdf; chp_id_init(&chpid); for (num = 0; num <= __MAX_CHPID; num++) { if (!chp_test_bit(data, num)) continue; chpid.id = num; CIO_CRW_EVENT(4, "Update information for channel path " "%x.%02x\n", chpid.cssid, chpid.id); chp = chpid_to_chp(chpid); if (!chp) { chp_new(chpid); continue; } mutex_lock(&chp->lock); chsc_determine_base_channel_path_desc(chpid, &chp->desc); mutex_unlock(&chp->lock); } }
static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { do { memset(sei, 0, sizeof(*sei)); sei->request.length = 0x0010; sei->request.code = 0x000e; sei->ntsm = ntsm; if (chsc(sei)) break; if (sei->response.code != 0x0001) { CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei->response.code); break; } CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); switch (sei->nt) { case 0: chsc_process_sei_nt0(&sei->u.nt0_area); break; case 2: chsc_process_sei_nt2(&sei->u.nt2_area); break; default: CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); break; } } while (sei->u.nt0_area.flags & 0x80); }
/* * Called from the machine check handler for subchannel report words. */ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct subchannel_id mchk_schid; struct subchannel *sch; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); if (crw1) CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, crw1->anc, crw1->erc, crw1->rsid); init_subchannel_id(&mchk_schid); mchk_schid.sch_no = crw0->rsid; if (crw1) mchk_schid.ssid = (crw1->rsid >> 4) & 3; if (crw0->erc == CRW_ERC_PMOD) { sch = get_subchannel_by_schid(mchk_schid); if (sch) { css_update_ssd_info(sch); put_device(&sch->dev); } } /*
static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) { int ret; CIO_CRW_EVENT(4, "chsc: scm change notification\n"); if (sei_area->rs != 7) return; ret = scm_update_information(); if (ret) CIO_CRW_EVENT(0, "chsc: updating change notification" " failed (rc=%d).\n", ret); }
static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) { int ret; CIO_CRW_EVENT(4, "chsc: scm available information\n"); if (sei_area->rs != 7) return; ret = scm_process_availability_information(); if (ret) CIO_CRW_EVENT(0, "chsc: process availability information" " failed (rc=%d).\n", ret); }
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, int c, int m, void *page) { struct chsc_scpd *scpd_area; int ccode, ret; if ((rfmt == 1) && !css_general_characteristics.fcs) return -EINVAL; if ((rfmt == 2) && !css_general_characteristics.cib) return -EINVAL; memset(page, 0, PAGE_SIZE); scpd_area = page; scpd_area->request.length = 0x0010; scpd_area->request.code = 0x0002; scpd_area->cssid = chpid.cssid; scpd_area->first_chpid = chpid.id; scpd_area->last_chpid = chpid.id; scpd_area->m = m; scpd_area->c = c; scpd_area->fmt = fmt; scpd_area->rfmt = rfmt; ccode = chsc(scpd_area); if (ccode > 0) return (ccode == 3) ? -ENODEV : -EBUSY; ret = chsc_error_from_response(scpd_area->response.code); if (ret) CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", scpd_area->response.code); return ret; }
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) { struct chp_config_data *data; struct chp_id chpid; int num; CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); if (sei_area->rs != 0) return; data = (struct chp_config_data *) &(sei_area->ccdf); chp_id_init(&chpid); for (num = 0; num <= __MAX_CHPID; num++) { if (!chp_test_bit(data->map, num)) continue; chpid.id = num; printk(KERN_WARNING "cio: processing configure event %d for " "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); switch (data->op) { case 0: chp_cfg_schedule(chpid, 1); break; case 1: chp_cfg_schedule(chpid, 0); break; case 2: chp_cfg_cancel_deconfigure(chpid); break; } } }
static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) { struct chp_config_data *data; struct chp_id chpid; int num; char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); if (sei_area->rs != 0) return; data = (struct chp_config_data *) &(sei_area->ccdf); chp_id_init(&chpid); for (num = 0; num <= __MAX_CHPID; num++) { if (!chp_test_bit(data->map, num)) continue; chpid.id = num; pr_notice("Processing %s for channel path %x.%02x\n", events[data->op], chpid.cssid, chpid.id); switch (data->op) { case 0: chp_cfg_schedule(chpid, 1); break; case 1: chp_cfg_schedule(chpid, 0); break; case 2: chp_cfg_cancel_deconfigure(chpid); break; } } }
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) { struct chp_link link; struct chp_id chpid; int status; CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); if (sei_area->rs != 4) return; chp_id_init(&chpid); chpid.id = sei_area->rsid; /* allocate a new channel path structure, if needed */ status = chp_get_status(chpid); if (status < 0) chp_new(chpid); else if (!status) return; memset(&link, 0, sizeof(struct chp_link)); link.chpid = chpid; if ((sei_area->vf & 0xc0) != 0) { link.fla = sei_area->fla; if ((sei_area->vf & 0xc0) == 0xc0) /* full link address */ link.fla_mask = 0xffff; else /* link address */ link.fla_mask = 0xff00; } s390_process_res_acc(&link); }
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, int c, int m, struct chsc_response_struct *resp) { int ccode, ret; struct { struct chsc_header request; u32 : 2; u32 m : 1; u32 c : 1; u32 fmt : 4; u32 cssid : 8; u32 : 4; u32 rfmt : 4; u32 first_chpid : 8; u32 : 24; u32 last_chpid : 8; u32 zeroes1; struct chsc_header response; u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scpd_area; if ((rfmt == 1) && !css_general_characteristics.fcs) return -EINVAL; if ((rfmt == 2) && !css_general_characteristics.cib) return -EINVAL; scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scpd_area) return -ENOMEM; scpd_area->request.length = 0x0010; scpd_area->request.code = 0x0002; scpd_area->cssid = chpid.cssid; scpd_area->first_chpid = chpid.id; scpd_area->last_chpid = chpid.id; scpd_area->m = m; scpd_area->c = c; scpd_area->fmt = fmt; scpd_area->rfmt = rfmt; ccode = chsc(scpd_area); if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out; } ret = chsc_error_from_response(scpd_area->response.code); if (ret == 0) /* Success. */ memcpy(resp, &scpd_area->response, scpd_area->response.length); else CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", scpd_area->response.code); out: free_page((unsigned long)scpd_area); return ret; }
static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) { struct chp_id chpid; int id; CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", sei_area->rs, sei_area->rsid); if (sei_area->rs != 4) return; id = __get_chpid_from_lir(sei_area->ccdf); if (id < 0) CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); else { chp_id_init(&chpid); chpid.id = id; chsc_chp_offline(chpid); } }
static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { static int ntsm_unsupported; while (true) { memset(sei, 0, sizeof(*sei)); sei->request.length = 0x0010; sei->request.code = 0x000e; if (!ntsm_unsupported) sei->ntsm = ntsm; if (chsc(sei)) break; if (sei->response.code != 0x0001) { CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", sei->response.code, sei->ntsm); if (sei->response.code == 3 && sei->ntsm) { /* Fallback for old firmware. */ ntsm_unsupported = 1; continue; } break; } CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); switch (sei->nt) { case 0: chsc_process_sei_nt0(&sei->u.nt0_area); break; case 2: chsc_process_sei_nt2(&sei->u.nt2_area); break; default: CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); break; } if (!(sei->u.nt0_area.flags & 0x80)) break; } }
int __chsc_do_secm(struct channel_subsystem *css, int enable) { struct { struct chsc_header request; u32 operation_code : 2; u32 : 30; u32 key : 4; u32 : 28; u32 zeroes1; u32 cub_addr1; u32 zeroes2; u32 cub_addr2; u32 reserved[13]; struct chsc_header response; u32 status : 8; u32 : 4; u32 fmt : 4; u32 : 16; } __attribute__ ((packed)) *secm_area; int ret, ccode; spin_lock_irq(&chsc_page_lock); memset(chsc_page, 0, PAGE_SIZE); secm_area = chsc_page; secm_area->request.length = 0x0050; secm_area->request.code = 0x0016; secm_area->key = PAGE_DEFAULT_KEY >> 4; secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; secm_area->operation_code = enable ? 0 : 1; ccode = chsc(secm_area); if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out; } switch (secm_area->response.code) { case 0x0102: case 0x0103: ret = -EINVAL; break; default: ret = chsc_error_from_response(secm_area->response.code); } if (ret != 0) CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", secm_area->response.code); out: spin_unlock_irq(&chsc_page_lock); return ret; }
/** * chp_process_crw - process channel-path status change * @crw0: channel report-word to handler * @crw1: second channel-report word (always NULL) * @overflow: crw overflow indication * * Handle channel-report-words indicating that the status of a channel-path * has changed. */ static void chp_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chp_id chpid; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); /* * Check for solicited machine checks. These are * created by reset channel path and need not be * handled here. */ if (crw0->slct) { CIO_CRW_EVENT(2, "solicited machine check for " "channel path %02X\n", crw0->rsid); return; } chp_id_init(&chpid); chpid.id = crw0->rsid; switch (crw0->erc) { case CRW_ERC_IPARM: /* Path has come. */ if (!chp_is_registered(chpid)) chp_new(chpid); chsc_chp_online(chpid); break; case CRW_ERC_PERRI: /* Path has gone. */ case CRW_ERC_PERRN: chsc_chp_offline(chpid); break; default: CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n", crw0->erc); } }
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chsc_sei_area *sei_area; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); if (!sei_page) return; /* Access to sei_page is serialized through machine check handler * thread, so no need for locking. */ sei_area = sei_page; CIO_TRACE_EVENT(2, "prcss"); do { memset(sei_area, 0, sizeof(*sei_area)); sei_area->request.length = 0x0010; sei_area->request.code = 0x000e; if (chsc(sei_area)) break; if (sei_area->response.code == 0x0001) { CIO_CRW_EVENT(4, "chsc: sei successful\n"); chsc_process_sei(sei_area); } else { CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei_area->response.code); break; } } while (sei_area->flags & 0x80); }
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { switch (sei_area->cc) { case 1: zpci_event_error(sei_area->ccdf); break; case 2: zpci_event_availability(sei_area->ccdf); break; default: CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", sei_area->cc); break; } }
/* * Handle channel subsystem related CRWs. * Use store event information to find out what's going on. * * Note: Access to sei_page is serialized through machine check handler * thread, so no need for locking. */ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chsc_sei *sei = sei_page; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); CIO_TRACE_EVENT(2, "prcss"); chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); }
int chsc_enable_facility(int operation_code) { unsigned long flags; int ret; struct { struct chsc_header request; u8 reserved1:4; u8 format:4; u8 reserved2; u16 operation_code; u32 reserved3; u32 reserved4; u32 operation_data_area[252]; struct chsc_header response; u32 reserved5:4; u32 format2:4; u32 reserved6:24; } __attribute__ ((packed)) *sda_area; spin_lock_irqsave(&chsc_page_lock, flags); memset(chsc_page, 0, PAGE_SIZE); sda_area = chsc_page; sda_area->request.length = 0x0400; sda_area->request.code = 0x0031; sda_area->operation_code = operation_code; ret = chsc(sda_area); if (ret > 0) { ret = (ret == 3) ? -ENODEV : -EBUSY; goto out; } switch (sda_area->response.code) { case 0x0101: ret = -EOPNOTSUPP; break; default: ret = chsc_error_from_response(sda_area->response.code); } if (ret != 0) CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", operation_code, sda_area->response.code); out: spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; }
int __init chsc_determine_css_characteristics(void) { int result; struct { struct chsc_header request; u32 reserved1; u32 reserved2; u32 reserved3; struct chsc_header response; u32 reserved4; u32 general_char[510]; u32 chsc_char[508]; } __attribute__ ((packed)) *scsc_area; spin_lock_irq(&chsc_page_lock); memset(chsc_page, 0, PAGE_SIZE); scsc_area = chsc_page; scsc_area->request.length = 0x0010; scsc_area->request.code = 0x0010; result = chsc(scsc_area); if (result) { result = (result == 3) ? -ENODEV : -EBUSY; goto exit; } result = chsc_error_from_response(scsc_area->response.code); if (result == 0) { memcpy(&css_general_characteristics, scsc_area->general_char, sizeof(css_general_characteristics)); memcpy(&css_chsc_characteristics, scsc_area->chsc_char, sizeof(css_chsc_characteristics)); } else CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", scsc_area->response.code); exit: spin_unlock_irq(&chsc_page_lock); return result; }
int chsc_get_channel_measurement_chars(struct channel_path *chp) { struct cmg_chars *cmg_chars; int ccode, ret; struct { struct chsc_header request; u32 : 24; u32 first_chpid : 8; u32 : 24; u32 last_chpid : 8; u32 zeroes1; struct chsc_header response; u32 zeroes2; u32 not_valid : 1; u32 shared : 1; u32 : 22; u32 chpid : 8; u32 cmcv : 5; u32 : 11; u32 cmgq : 8; u32 cmg : 8; u32 zeroes3; u32 data[NR_MEASUREMENT_CHARS]; } __attribute__ ((packed)) *scmc_area; chp->cmg_chars = NULL; cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); if (!cmg_chars) return -ENOMEM; spin_lock_irq(&chsc_page_lock); memset(chsc_page, 0, PAGE_SIZE); scmc_area = chsc_page; scmc_area->request.length = 0x0010; scmc_area->request.code = 0x0022; scmc_area->first_chpid = chp->chpid.id; scmc_area->last_chpid = chp->chpid.id; ccode = chsc(scmc_area); if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out; } ret = chsc_error_from_response(scmc_area->response.code); if (ret) { CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", scmc_area->response.code); goto out; } if (scmc_area->not_valid) { chp->cmg = -1; chp->shared = -1; goto out; } chp->cmg = scmc_area->cmg; chp->shared = scmc_area->shared; if (chp->cmg != 2 && chp->cmg != 3) { /* No cmg-dependent data. */ goto out; } chp->cmg_chars = cmg_chars; chsc_initialize_cmg_chars(chp, scmc_area->cmcv, (struct cmg_chars *) &scmc_area->data); out: spin_unlock_irq(&chsc_page_lock); if (!chp->cmg_chars) kfree(cmg_chars); return ret; }