static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) { bfa_trc(pport->bfa, event); switch (event) { case BFA_PPORT_SM_LINKUP: bfa_pport_update_linkinfo(pport); bfa_sm_set_state(pport, bfa_pport_sm_linkup); bfa_assert(pport->event_cbfn); bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); bfa_pport_callback(pport, BFA_PPORT_LINKUP); bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE); /** * If QoS is enabled and it is not online, * Send a separate event. */ if ((pport->cfg.qos_enabled) && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE)) bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG); break; case BFA_PPORT_SM_LINKDOWN: /** * Possible to get link down event. */ break; case BFA_PPORT_SM_ENABLE: /** * Already enabled. */ break; case BFA_PPORT_SM_DISABLE: if (bfa_pport_send_disable(pport)) bfa_sm_set_state(pport, bfa_pport_sm_disabling); else bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); break; case BFA_PPORT_SM_STOP: bfa_sm_set_state(pport, bfa_pport_sm_stopped); break; case BFA_PPORT_SM_HWFAIL: bfa_sm_set_state(pport, bfa_pport_sm_iocdown); break; default: bfa_sm_fault(pport->bfa, event); } }
/* * Enable IOC after it is disabled. */ void bfa_iocfc_enable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Enable"); bfa_ioc_enable(&bfa->ioc); }
void bfa_iocfc_disable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Disable"); bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; bfa->rme_process = BFA_FALSE; bfa_ioc_disable(&bfa->ioc); }
static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_QRESUME: bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); bfa_fcport_send_enable(fcport); break; case BFA_FCPORT_SM_STOP: bfa_reqq_wcancel(&fcport->reqq_wait); bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_ENABLE: /** * Already enable is in progress. */ break; case BFA_FCPORT_SM_DISABLE: /** * Just send disable request to firmware when room becomes * available in request queue. */ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); bfa_reqq_wcancel(&fcport->reqq_wait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_reqq_wcancel(&fcport->reqq_wait); bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } }
static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_FWRSP: case BFA_FCPORT_SM_LINKDOWN: bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); break; case BFA_FCPORT_SM_LINKUP: bfa_fcport_update_linkinfo(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); bfa_assert(fcport->event_cbfn); bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); break; case BFA_FCPORT_SM_ENABLE: /** * Already being enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } }
static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_FWRSP: bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_DISABLE: /** * Already being disabled. */ break; case BFA_FCPORT_SM_ENABLE: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: bfa_sm_fault(fcport->bfa, event); } }
static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: /** * Ignore start event for a port that is disabled. */ break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_ENABLE: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_DISABLE: /** * Already disabled. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: bfa_sm_fault(fcport->bfa, event); } }
bfa_status_t bfad_drv_init(struct bfad_s *bfad) { bfa_status_t rc; unsigned long flags; struct bfa_fcs_driver_info_s driver_info; int i; bfad->cfg_data.rport_del_timeout = rport_del_timeout; bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; bfad->cfg_data.io_max_sge = bfa_io_max_sge; bfad->cfg_data.binding_method = FCP_PWWN_BINDING; rc = bfad_hal_mem_alloc(bfad); if (rc != BFA_STATUS_OK) { printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", bfad->inst_no); printk(KERN_WARNING "Not enough memory to attach all Brocade HBA ports," " System may need more memory.\n"); goto out_hal_mem_alloc_failure; } bfa_init_log(&bfad->bfa, bfad->logmod); bfa_init_trc(&bfad->bfa, bfad->trcmod); bfa_init_aen(&bfad->bfa, bfad->aen); INIT_LIST_HEAD(&bfad->file_q); INIT_LIST_HEAD(&bfad->file_free_q); for (i = 0; i < BFAD_AEN_MAX_APPS; i++) { bfa_q_qe_init(&bfad->file_buf[i].qe); list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q); } bfa_init_plog(&bfad->bfa, &bfad->plog_buf); bfa_plog_init(&bfad->plog_buf); bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 0, "Driver Attach"); bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, &bfad->hal_pcidev); init_completion(&bfad->comp); /* * Enable Interrupt and wait bfa_init completion */ if (bfad_setup_intr(bfad)) { printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", bfad->inst_no); goto out_setup_intr_failure; } spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_init(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* * Set up interrupt handler for each vectors */ if ((bfad->bfad_flags & BFAD_MSIX_ON) && bfad_install_msix_handler(bfad)) { printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", __func__, bfad->inst_no); } bfad_init_timer(bfad); wait_for_completion(&bfad->comp); memset(&driver_info, 0, sizeof(driver_info)); strncpy(driver_info.version, BFAD_DRIVER_VERSION, sizeof(driver_info.version) - 1); if (host_name) strncpy(driver_info.host_machine_name, host_name, sizeof(driver_info.host_machine_name) - 1); if (os_name) strncpy(driver_info.host_os_name, os_name, sizeof(driver_info.host_os_name) - 1); if (os_patch) strncpy(driver_info.host_os_patch, os_patch, sizeof(driver_info.host_os_patch) - 1); strncpy(driver_info.os_device_name, bfad->pci_name, sizeof(driver_info.os_device_name - 1)); /* * FCS INIT */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; return BFA_STATUS_OK; out_setup_intr_failure: bfa_detach(&bfad->bfa); bfad_hal_mem_release(bfad); out_hal_mem_alloc_failure: return BFA_STATUS_FAILED; }
static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_ENABLE: /** * Already enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKDOWN: bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); if (BFA_PORT_IS_DISABLED(fcport->bfa)) bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); else bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); bfa_fcport_reset_linkinfo(fcport); if (BFA_PORT_IS_DISABLED(fcport->bfa)) bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); else bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); if (BFA_PORT_IS_DISABLED(fcport->bfa)) bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); else bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); break; default: bfa_sm_fault(fcport->bfa, event); } }
static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_LINKUP: bfa_fcport_update_linkinfo(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); bfa_assert(fcport->event_cbfn); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { bfa_trc(fcport->bfa, pevent->link_state.vc_fcf.fcf.fipenabled); bfa_trc(fcport->bfa, pevent->link_state.vc_fcf.fcf.fipfailed); if (pevent->link_state.vc_fcf.fcf.fipfailed) bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_FIP_FCF_DISC, 0, "FIP FCF Discovery Failed"); else bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_FIP_FCF_DISC, 0, "FIP FCF Discovered"); } bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); /** * If QoS is enabled and it is not online, * Send a separate event. */ if ((fcport->cfg.qos_enabled) && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE)) bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG); break; case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link down event. */ break; case BFA_FCPORT_SM_ENABLE: /** * Already enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } }