static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) { /* which kind of information was stored? */ switch (sei_area->cc) { case 1: /* link incident*/ chsc_process_sei_link_incident(sei_area); break; case 2: /* i/o resource accessibility */ chsc_process_sei_res_acc(sei_area); break; case 7: /* channel-path-availability information */ chsc_process_sei_chp_avail(sei_area); break; case 8: /* channel-path-configuration notification */ chsc_process_sei_chp_config(sei_area); break; case 12: /* scm change notification */ chsc_process_sei_scm_change(sei_area); break; case 14: /* scm available notification */ chsc_process_sei_scm_avail(sei_area); break; default: /* other stuff */ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); break; } /* Check if we might have lost some information. */ if (sei_area->flags & 0x40) { CIO_CRW_EVENT(2, "chsc: event overflow\n"); css_schedule_eval_all(); } }
/* * Called from the machine check handler for subchannel report words. */ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct subchannel_id mchk_schid; struct subchannel *sch; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); if (crw1) CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, crw1->anc, crw1->erc, crw1->rsid); init_subchannel_id(&mchk_schid); mchk_schid.sch_no = crw0->rsid; if (crw1) mchk_schid.ssid = (crw1->rsid >> 4) & 3; if (crw0->erc == CRW_ERC_PMOD) { sch = get_subchannel_by_schid(mchk_schid); if (sch) { css_update_ssd_info(sch); put_device(&sch->dev); } } /*
/* * Handle channel subsystem related CRWs. * Use store event information to find out what's going on. * * Note: Access to sei_page is serialized through machine check handler * thread, so no need for locking. */ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chsc_sei *sei = sei_page; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); CIO_TRACE_EVENT(2, "prcss"); chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); }
/** * chp_process_crw - process channel-path status change * @crw0: channel report-word to handler * @crw1: second channel-report word (always NULL) * @overflow: crw overflow indication * * Handle channel-report-words indicating that the status of a channel-path * has changed. */ static void chp_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chp_id chpid; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); /* * Check for solicited machine checks. These are * created by reset channel path and need not be * handled here. */ if (crw0->slct) { CIO_CRW_EVENT(2, "solicited machine check for " "channel path %02X\n", crw0->rsid); return; } chp_id_init(&chpid); chpid.id = crw0->rsid; switch (crw0->erc) { case CRW_ERC_IPARM: /* Path has come. */ if (!chp_is_registered(chpid)) chp_new(chpid); chsc_chp_online(chpid); break; case CRW_ERC_PERRI: /* Path has gone. */ case CRW_ERC_PERRN: chsc_chp_offline(chpid); break; default: CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n", crw0->erc); } }
void css_schedule_eval_all_unreg(unsigned long delay) { unsigned long flags; struct idset *unreg_set; /* Find unregistered subchannels. */ unreg_set = idset_sch_new(); if (!unreg_set) { /* Fallback. */ css_schedule_eval_all(); return; } idset_fill(unreg_set); bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); /* Apply to slow_subchannel_set. */ spin_lock_irqsave(&slow_subchannel_lock, flags); idset_add_set(slow_subchannel_set, unreg_set); atomic_set(&css_eval_scheduled, 1); queue_delayed_work(cio_work_q, &slow_path_work, delay); spin_unlock_irqrestore(&slow_subchannel_lock, flags); idset_free(unreg_set); }
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chsc_sei_area *sei_area; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); if (!sei_page) return; /* Access to sei_page is serialized through machine check handler * thread, so no need for locking. */ sei_area = sei_page; CIO_TRACE_EVENT(2, "prcss"); do { memset(sei_area, 0, sizeof(*sei_area)); sei_area->request.length = 0x0010; sei_area->request.code = 0x000e; if (chsc(sei_area)) break; if (sei_area->response.code == 0x0001) { CIO_CRW_EVENT(4, "chsc: sei successful\n"); chsc_process_sei(sei_area); } else { CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei_area->response.code); break; } } while (sei_area->flags & 0x80); }