Beispiel #1
0
/**
 * 		IO is not started (unallocated).
 */
static void
bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc_fp(ioim->bfa, ioim->iotag);
	bfa_trc_fp(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_START:
		if (!bfa_itnim_is_online(ioim->itnim)) {
			if (!bfa_itnim_hold_io(ioim->itnim)) {
				bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
				list_del(&ioim->qe);
				list_add_tail(&ioim->qe,
					&ioim->fcpim->ioim_comp_q);
				bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
						__bfa_cb_ioim_pathtov, ioim);
			} else {
				list_del(&ioim->qe);
				list_add_tail(&ioim->qe,
					&ioim->itnim->pending_q);
			}
			break;
		}

		if (ioim->nsges > BFI_SGE_INLINE) {
			if (!bfa_ioim_sge_setup(ioim)) {
				bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
				return;
			}
		}

		if (!bfa_ioim_send_ioreq(ioim)) {
			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
			break;
		}

		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
		break;

	case BFA_IOIM_SM_IOTOV:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
				__bfa_cb_ioim_pathtov, ioim);
		break;

	case BFA_IOIM_SM_ABORT:
		/**
		 * IO in pending queue can get abort requests. Complete abort
		 * requests immediately.
		 */
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
				ioim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #2
0
void
bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
{
	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);

	bfa_assert(nsgpg > 0);
	bfa_assert(nsgpg > mod->free_sgpgs);

	wqe->nsgpg_total = wqe->nsgpg = nsgpg;

	/**
	 * allocate any left to this one first
	 */
	if (mod->free_sgpgs) {
		/**
		 * no one else is waiting for SGPG
		 */
		bfa_assert(list_empty(&mod->sgpg_wait_q));
		bfa_q_enq_q(&wqe->sgpg_q, &mod->sgpg_q);
		wqe->nsgpg -= mod->free_sgpgs;
		mod->free_sgpgs = 0;
	}

	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
}
Beispiel #3
0
/**
 * 		IO is being aborted, waiting for completion from firmware.
 */
static void
bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_trc(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_COMP_GOOD:
	case BFA_IOIM_SM_COMP:
	case BFA_IOIM_SM_DONE:
	case BFA_IOIM_SM_FREE:
		break;

	case BFA_IOIM_SM_ABORT_DONE:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_ABORT_COMP:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_COMP_UTAG:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_CLEANUP:
		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
		ioim->iosp->abort_explicit = BFA_FALSE;

		if (bfa_ioim_send_abort(ioim))
			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
		else {
			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
			bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
					  &ioim->iosp->reqq_wait);
		}
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #4
0
/**
 * Should be called with lock protection
 */
void
bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
		    void (*timercb) (void *), void *arg, unsigned int timeout)
{

	bfa_assert(timercb != NULL);
	bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));

	timer->timeout = timeout;
	timer->timercb = timercb;
	timer->arg = arg;

	list_add_tail(&timer->qe, &mod->timer_q);
}
Beispiel #5
0
/**
 * IO bfa callback is pending. IO resource cannot be freed.
 */
static void
bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_trc(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_HCB:
		bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
		break;

	case BFA_IOIM_SM_FREE:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		break;

	case BFA_IOIM_SM_CLEANUP:
		bfa_ioim_notify_cleanup(ioim);
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #6
0
/**
 * bfa_port_disable()
 *
 *   Send the Port disable request to the f/w
 *
 * @param[in] Pointer to the Port module data structure.
 *
 * @return Status
 */
bfa_status_t
bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
		 void *cbarg)
{
	struct bfi_port_generic_req_s *m;

	/** todo Not implemented */
	bfa_assert(0);

	if (!bfa_ioc_is_operational(port->ioc)) {
		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
		return BFA_STATUS_IOC_FAILURE;
	}

	if (port->endis_pending) {
		bfa_trc(port, BFA_STATUS_DEVBUSY);
		return BFA_STATUS_DEVBUSY;
	}

	m = (struct bfi_port_generic_req_s *)port->endis_mb.msg;

	port->msgtag++;
	port->endis_cbfn = cbfn;
	port->endis_cbarg = cbarg;
	port->endis_pending = BFA_TRUE;

	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
		    bfa_ioc_portid(port->ioc));
	bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);

	return BFA_STATUS_OK;
}
Beispiel #7
0
/**
 * bfa_port_attach()
 *
 *
 * @param[in] port - Pointer to the Port module data structure
 *            ioc  - Pointer to the ioc module data structure
 *            dev  - Pointer to the device driver module data structure
 *                   The device driver specific mbox ISR functions have
 *                   this pointer as one of the parameters.
 *            trcmod -
 *            logmod -
 *
 * @return void
 */
void
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev,
		struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
{
	struct bfa_timeval_s tv;

	bfa_assert(port);

	port->dev = dev;
	port->ioc = ioc;
	port->trcmod = trcmod;
	port->logmod = logmod;

	port->stats_busy = BFA_FALSE;
	port->endis_pending = BFA_FALSE;
	port->stats_cbfn = NULL;
	port->endis_cbfn = NULL;

	bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
	bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
	bfa_ioc_hbfail_register(port->ioc, &port->hbfail);

	/**
	 * initialize time stamp for stats reset
	 */
	bfa_os_gettimeofday(&tv);
	port->stats_reset_time = tv.tv_sec;

	bfa_trc(port, 0);
}
Beispiel #8
0
/**
 * Offline state - awaiting ONLINE event from fabric SM.
 */
static void
bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
			 enum bfa_fcs_vport_event event)
{
	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
	bfa_trc(__vport_fcs(vport), event);

	switch (event) {
	case BFA_FCS_VPORT_SM_DELETE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
		bfa_fcs_port_delete(&vport->lport);
		break;

	case BFA_FCS_VPORT_SM_ONLINE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
		vport->fdisc_retries = 0;
		bfa_fcs_vport_do_fdisc(vport);
		break;

	case BFA_FCS_VPORT_SM_OFFLINE:
		/*
		 * This can happen if the vport couldn't be initialzied due
		 * the fact that the npiv was not enabled on the switch. In
		 * that case we will put the vport in offline state. However,
		 * the link can go down and cause the this event to be sent when
		 * we are already offline. Ignore it.
		 */
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #9
0
static void
bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
			     enum port_ms_event event)
{
	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
	bfa_trc(ms->port->fcs, event);

	switch (event) {
	case MSSM_EVENT_TIMEOUT:
		/*
		 * Retry Timer Expired. Re-send
		 */
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
		bfa_fcs_port_ms_send_gfn(ms, NULL);
		break;

	case MSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
		bfa_timer_stop(&ms->timer);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #10
0
/**
 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
 */
static void
bfa_iocfc_send_cfg(void *bfa_arg)
{
	struct bfa_s *bfa = bfa_arg;
	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
	struct bfi_iocfc_cfg_req_s cfg_req;
	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
	struct bfa_iocfc_cfg_s	*cfg = &iocfc->cfg;
	int		i;

	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
	bfa_trc(bfa, cfg->fwcfg.num_cqs);

	bfa_iocfc_reset_queues(bfa);

	/**
	 * initialize IOC configuration info
	 */
	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
	cfg_info->num_cqs = cfg->fwcfg.num_cqs;

	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
	/**
	 * dma map REQ and RSP circular queues and shadow pointers
	 */
	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
				    iocfc->req_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
				    iocfc->req_cq_shadow_ci[i].pa);
		cfg_info->req_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_reqq_elems);

		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
				    iocfc->rsp_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
				    iocfc->rsp_cq_shadow_pi[i].pa);
		cfg_info->rsp_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_rspq_elems);
	}

	/**
	 * Enable interrupt coalescing if it is driver init path
	 * and not ioc disable/enable path.
	 */
	if (!iocfc->cfgdone)
		cfg_info->intr_attr.coalesce = BFA_TRUE;

	iocfc->cfgdone = BFA_FALSE;

	/**
	 * dma map IOC configuration itself
	 */
	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
		    bfa_lpuid(bfa));
	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);

	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
			  sizeof(struct bfi_iocfc_cfg_req_s));
}
Beispiel #11
0
/**
 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
 * is done.
 */
static void
bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
		      enum bfa_fcs_vport_event event)
{
	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
	bfa_trc(__vport_fcs(vport), event);

	switch (event) {
	case BFA_FCS_VPORT_SM_OFFLINE:
		bfa_lps_discard(vport->lps);
		/*
		 * !!! fall through !!!
		 */

	case BFA_FCS_VPORT_SM_RSP_OK:
	case BFA_FCS_VPORT_SM_RSP_ERROR:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
		bfa_fcs_vport_free(vport);
		break;

	case BFA_FCS_VPORT_SM_DELETE:
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #12
0
/**
 * Should be called with lock protection
 */
void
bfa_timer_stop(struct bfa_timer_s *timer)
{
	bfa_assert(!list_empty(&timer->qe));

	list_del(&timer->qe);
}
Beispiel #13
0
void
bfad_hal_mem_release(struct bfad_s *bfad)
{
	int             i;
	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
	struct bfa_mem_elem_s *meminfo_elem;

	for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
		meminfo_elem = &hal_meminfo->meminfo[i];
		if (meminfo_elem->kva != NULL) {
			switch (meminfo_elem->mem_type) {
			case BFA_MEM_TYPE_KVA:
				vfree(meminfo_elem->kva);
				break;
			case BFA_MEM_TYPE_DMA:
				dma_free_coherent(&bfad->pcidev->dev,
						meminfo_elem->mem_len,
						meminfo_elem->kva,
						(dma_addr_t) meminfo_elem->dma);
				break;
			default:
				bfa_assert(0);
				break;
			}
		}
	}

	memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
}
Beispiel #14
0
static void
bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
			enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
		bfa_fcb_itnim_offline(itnim->itnim_drv);
		bfa_itnim_offline(itnim->bfa_itnim);
		if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE)
			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
		else
			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #15
0
/*
 * This state is set when a discovered rport is also in intiator mode.
 * This ITN is marked as no_op and is not active and will not be truned into
 * online state.
 */
static void
bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
			   enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcs_rport_itnim_ack(itnim->rport);
		break;

	case BFA_FCS_ITNIM_SM_RSP_ERROR:
	case BFA_FCS_ITNIM_SM_ONLINE:
	case BFA_FCS_ITNIM_SM_INITIATOR:
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #16
0
static void
bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
			    enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_TIMEOUT:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
		bfa_fcs_itnim_send_prli(itnim, NULL);
		break;

	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_timer_stop(&itnim->timer);
		bfa_fcs_rport_itnim_ack(itnim->rport);
		break;

	case BFA_FCS_ITNIM_SM_INITIATOR:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
		bfa_timer_stop(&itnim->timer);
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_timer_stop(&itnim->timer);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #17
0
static void
bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
			   enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_FRMSENT:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli);
		break;

	case BFA_FCS_ITNIM_SM_INITIATOR:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
		break;

	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
		bfa_fcs_rport_itnim_ack(itnim->rport);
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #18
0
void
bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
				 void *cbarg, bfa_status_t req_status,
				 u32 rsp_len, u32 resid_len,
				 struct fchs_s *rsp_fchs)
{
	struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
	struct fc_logi_s     *plogi_resp;
	struct fc_els_cmd_s   *els_cmd;

	bfa_trc(port->fcs, req_status);

	
	if (req_status != BFA_STATUS_OK) {
		bfa_trc(port->fcs, req_status);
		

		return;
	}

	els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
	plogi_resp = (struct fc_logi_s *) els_cmd;

	if (els_cmd->els_code == FC_ELS_ACC) {
		bfa_fcs_rport_start(port, rsp_fchs, plogi_resp);
	} else {
		bfa_trc(port->fcs, plogi_resp->els_cmd.els_code);
		bfa_assert(0);
	}
}
Beispiel #19
0
static void
bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
	struct bfa_fcs_rport_s *rport = rpf->rport;

	bfa_trc(rport->fcs, rport->pwwn);
	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPFSM_EVENT_RPORT_ONLINE :
		if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
			bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
			rpf->rpsc_retries = 0;
			bfa_fcs_rpf_send_rpsc2(rpf, NULL);
			break;
		};

	case RPFSM_EVENT_RPORT_OFFLINE :
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #20
0
static void
bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
	struct bfa_fcs_rport_s *rport = rpf->rport;

	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPFSM_EVENT_TIMEOUT :
		/* re-send the RPSC */
		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
		bfa_fcs_rpf_send_rpsc2(rpf, NULL);
		break;

	case RPFSM_EVENT_RPORT_OFFLINE :
		bfa_timer_stop(&rpf->timer);
		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
		rpf->rpsc_retries = 0;
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #21
0
/**
 * Local Functions.
 */
static bfa_status_t
bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
{
    struct fchs_s          fchs;
    struct bfa_fcxp_s *fcxp = NULL;
    int             len;

    bfa_trc(port->fcs, alpa);

    fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
                          NULL);
    bfa_assert(fcxp);

    len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
                         bfa_fcs_port_get_fcid(port), 0,
                         port->port_cfg.pwwn, port->port_cfg.nwwn,
                         bfa_fcport_get_maxfrsize(port->fcs->bfa));

    bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                  FC_CLASS_3, len, &fchs,
                  bfa_fcs_port_loop_plogi_response, (void *)port,
                  FC_MAX_PDUSZ, FC_RA_TOV);

    return BFA_STATUS_OK;
}
Beispiel #22
0
static void
bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
			   enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_RSP_ERROR:
		/*
		 * Start timer for a delayed retry
		 */
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id_retry);
		ns->port->stats.ns_retries++;
		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
				bfa_fcs_port_ns_timeout, ns,
				BFA_FCS_RETRY_TIMEOUT);
		break;

	case NSSM_EVENT_RSP_OK:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id);
		bfa_fcs_port_ns_send_rft_id(ns, NULL);
		break;

	case NSSM_EVENT_PORT_OFFLINE:
		bfa_fcxp_discard(ns->fcxp);
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #23
0
/**
 * Use this function to do attach the driver instance with the BFA
 * library. This function will not trigger any HW initialization
 * process (which will be done in bfa_init() call)
 *
 * This call will fail, if the cap is out of range compared to
 * pre-defined values within the BFA library
 *
 * @param[out]	bfa	Pointer to bfa_t.
 * @param[in]	bfad	Opaque handle back to the driver's IOC structure
 * @param[in]	cfg	Pointer to bfa_ioc_cfg_t. Should be same structure
 *			that was used in bfa_cfg_get_meminfo().
 * @param[in]	meminfo	Pointer to bfa_meminfo_t. The driver should
 *			use the bfa_cfg_get_meminfo() call to
 *			find the memory blocks required, allocate the
 *			required memory and provide the starting addresses.
 * @param[in]	pcidev	pointer to struct bfa_pcidev_s
 *
 * @return
 * void
 *
 * Special Considerations:
 *
 * @note
 *
 */
void
bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
	       struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
	int			i;
	struct bfa_mem_elem_s	*melem;

	bfa->fcs = BFA_FALSE;

	bfa_assert((cfg != NULL) && (meminfo != NULL));

	/**
	 * initialize all memory pointers for iterative allocation
	 */
	for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
		melem = meminfo->meminfo + i;
		melem->kva_curp = melem->kva;
		melem->dma_curp = melem->dma;
	}

	bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);

	for (i = 0; hal_mods[i]; i++)
		hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);

	bfa_com_port_attach(bfa, meminfo);
}
Beispiel #24
0
static void
bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
			  enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_RSP_OK:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
		break;

	case NSSM_EVENT_RSP_ERROR:
		/*
		 * TBD: for certain reject codes, we don't need to retry
		 */
		/*
		 * Start timer for a delayed retry
		 */
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft_retry);
		ns->port->stats.ns_retries++;
		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
				bfa_fcs_port_ns_timeout, ns,
				BFA_FCS_RETRY_TIMEOUT);
		break;

	case NSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		bfa_fcxp_discard(ns->fcxp);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #25
0
/**
 * Setup any additional SG pages needed.Inline SG element is setup
 * at queuing time.
 */
static bfa_boolean_t
bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
{
	u16        nsgpgs;

	bfa_assert(ioim->nsges > BFI_SGE_INLINE);

	/**
	 * allocate SG pages needed
	 */
	nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
	if (!nsgpgs)
		return BFA_TRUE;

	if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
	    != BFA_STATUS_OK) {
		bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
		return BFA_FALSE;
	}

	ioim->nsgpgs = nsgpgs;
	bfa_ioim_sgpg_setup(ioim);

	return BFA_TRUE;
}
Beispiel #26
0
/**
 * FDISC attempt failed - a timer is active to retry FDISC.
 */
static void
bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
			     enum bfa_fcs_vport_event event)
{
	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
	bfa_trc(__vport_fcs(vport), event);

	switch (event) {
	case BFA_FCS_VPORT_SM_DELETE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
		bfa_timer_stop(&vport->timer);
		bfa_fcs_port_delete(&vport->lport);
		break;

	case BFA_FCS_VPORT_SM_OFFLINE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
		bfa_timer_stop(&vport->timer);
		break;

	case BFA_FCS_VPORT_SM_TIMEOUT:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
		vport->vport_stats.fdisc_retries++;
		vport->fdisc_retries++;
		bfa_fcs_vport_do_fdisc(vport);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #27
0
static void
bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
				 enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_TIMEOUT:
		/*
		 * Retry Timer Expired. Re-send
		 */
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
		bfa_fcs_port_ns_send_rspn_id(ns, NULL);
		break;

	case NSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		bfa_timer_stop(&ns->timer);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #28
0
static void
bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
			  enum port_ms_event event)
{
	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
	bfa_trc(ms->port->fcs, event);

	switch (event) {
	case MSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
		/*
		 * now invoke MS related sub-modules
		 */
		bfa_fcs_port_fdmi_offline(ms);
		break;

	case MSSM_EVENT_PORT_FABRIC_RSCN:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
		ms->retry_cnt = 0;
		bfa_fcs_port_ms_send_gfn(ms, NULL);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #29
0
static void
bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
			  enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		break;

	case NSSM_EVENT_NS_QUERY:
		/*
		 * If the port role is Initiator Mode issue NS query.
		 * If it is Target Mode, skip this and go to online.
		 */
		if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
			bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
			bfa_fcs_port_ns_send_gid_ft(ns, NULL);
		};
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #30
0
static void
bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
{
	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
	bfa_trc(ms->port->fcs, event);

	switch (event) {
	case MSSM_EVENT_RSP_ERROR:
		/*
		 * Start timer for a delayed retry
		 */
		if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
			bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_retry);
			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
					&ms->timer, bfa_fcs_port_ms_timeout, ms,
					BFA_FCS_RETRY_TIMEOUT);
		} else {
			bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
			ms->retry_cnt = 0;
		}
		break;

	case MSSM_EVENT_RSP_OK:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
		break;

	case MSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
		bfa_fcxp_discard(ms->fcxp);
		break;

	default:
		bfa_assert(0);
	}
}