Example #1
0
/**
 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
 */
static void
bfa_iocfc_send_cfg(void *bfa_arg)
{
	struct bfa_s *bfa = bfa_arg;
	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
	struct bfi_iocfc_cfg_req_s cfg_req;
	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
	struct bfa_iocfc_cfg_s	*cfg = &iocfc->cfg;
	int		i;

	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
	bfa_trc(bfa, cfg->fwcfg.num_cqs);

	bfa_iocfc_reset_queues(bfa);

	/**
	 * initialize IOC configuration info
	 */
	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
	cfg_info->num_cqs = cfg->fwcfg.num_cqs;

	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
	/**
	 * dma map REQ and RSP circular queues and shadow pointers
	 */
	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
				    iocfc->req_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
				    iocfc->req_cq_shadow_ci[i].pa);
		cfg_info->req_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_reqq_elems);

		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
				    iocfc->rsp_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
				    iocfc->rsp_cq_shadow_pi[i].pa);
		cfg_info->rsp_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_rspq_elems);
	}

	/**
	 * Enable interrupt coalescing if it is driver init path
	 * and not ioc disable/enable path.
	 */
	if (!iocfc->cfgdone)
		cfg_info->intr_attr.coalesce = BFA_TRUE;

	iocfc->cfgdone = BFA_FALSE;

	/**
	 * dma map IOC configuration itself
	 */
	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
		    bfa_lpuid(bfa));
	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);

	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
			  sizeof(struct bfi_iocfc_cfg_req_s));
}
Example #2
0
bfa_status_t
bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
{
	struct bfa_iocfc_s		*iocfc = &bfa->iocfc;
	struct bfi_iocfc_set_intr_req_s *m;

	iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
	iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
	iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);

	if (!bfa_iocfc_is_operational(bfa))
		return BFA_STATUS_OK;

	m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
	if (!m)
		return BFA_STATUS_DEVBUSY;

	bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
		    bfa_lpuid(bfa));
	m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
	m->delay    = iocfc->cfginfo->intr_attr.delay;
	m->latency  = iocfc->cfginfo->intr_attr.latency;

	bfa_trc(bfa, attr->delay);
	bfa_trc(bfa, attr->latency);

	bfa_reqq_produce(bfa, BFA_REQQ_IOC);
	return BFA_STATUS_OK;
}
Example #3
0
static void
bfa_iocfc_send_cfg(void *bfa_arg)
{
	struct bfa_s *bfa = bfa_arg;
	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
	struct bfi_iocfc_cfg_req_s cfg_req;
	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
	struct bfa_iocfc_cfg_s  *cfg = &iocfc->cfg;
	int             i;

	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
	bfa_trc(bfa, cfg->fwcfg.num_cqs);

	iocfc->cfgdone = BFA_FALSE;
	bfa_iocfc_reset_queues(bfa);

	
	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
	cfg_info->num_cqs = cfg->fwcfg.num_cqs;

	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
	bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);

	
	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
				       iocfc->req_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
				       iocfc->req_cq_shadow_ci[i].pa);
		cfg_info->req_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_reqq_elems);

		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
				       iocfc->rsp_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
				       iocfc->rsp_cq_shadow_pi[i].pa);
		cfg_info->rsp_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_rspq_elems);
	}

	
	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
			bfa_lpuid(bfa));
	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);

	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
			sizeof(struct bfi_iocfc_cfg_req_s));
}
Example #4
0
static void
bfa_fcport_send_txcredit(void *port_cbarg)
{

	struct bfa_fcport_s *fcport = port_cbarg;
	struct bfi_fcport_set_svc_params_req_s *m;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
	if (!m) {
		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
		return;
	}

	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
			bfa_lpuid(fcport->bfa));
	m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
}
Example #5
0
u16
fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
			u16 ox_id, int num_pages,
			enum fc_tprlo_type tprlo_type, u32 tpr_id)
{
	struct fc_tprlo_s     *tprlo = (struct fc_tprlo_s *) (fchs + 1);
	int             page;

	fc_els_req_build(fchs, d_id, s_id, ox_id);
	memset(tprlo, 0, (num_pages * 16) + 4);
	tprlo->command = FC_ELS_TPRLO;
	tprlo->page_len = 0x10;
	tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);

	for (page = 0; page < num_pages; page++) {
		tprlo->tprlo_params[page].type = FC_TYPE_FCP;
		tprlo->tprlo_params[page].opa_valid = 0;
		tprlo->tprlo_params[page].rpa_valid = 0;
		tprlo->tprlo_params[page].orig_process_assc = 0;
		tprlo->tprlo_params[page].resp_process_assc = 0;
		if (tprlo_type == FC_GLOBAL_LOGO) {
			tprlo->tprlo_params[page].global_process_logout = 1;
		} else if (tprlo_type == FC_TPR_LOGO) {
			tprlo->tprlo_params[page].tpo_nport_valid = 1;
			tprlo->tprlo_params[page].tpo_nport_id = (tpr_id);
		}
	}

	return bfa_os_ntohs(tprlo->payload_len);
}
Example #6
0
/**
 * Send I/O abort request to firmware.
 */
static          bfa_boolean_t
bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
{
	struct bfi_ioim_abort_req_s *m;
	enum bfi_ioim_h2i       msgop;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
	if (!m)
		return BFA_FALSE;

	/**
	 * build i/o request message next
	 */
	if (ioim->iosp->abort_explicit)
		msgop = BFI_IOIM_H2I_IOABORT_REQ;
	else
		msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;

	bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
	m->io_tag    = bfa_os_htons(ioim->iotag);
	m->abort_tag = ++ioim->abort_tag;

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(ioim->bfa, ioim->reqq);
	return BFA_TRUE;
}
Example #7
0
void
fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
			u16 ox_id)
{
	bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
	fchs->d_id = (d_id);
	fchs->s_id = (s_id);
	fchs->ox_id = bfa_os_htons(ox_id);
}
Example #8
0
static void
fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
{
	bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
	cthdr->rev_id = CT_GS3_REVISION;
	cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
	cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
	cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
}
Example #9
0
u16
fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
		   u16 ox_id, wwn_t port_name, wwn_t node_name,
		   u16 pdu_size, u16 local_bb_credits)
{
	u32        d_id = 0;

	bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
	fc_els_rsp_build(fchs, d_id, s_id, ox_id);

	flogi->els_cmd.els_code = FC_ELS_ACC;
	flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
	flogi->port_name = port_name;
	flogi->node_name = node_name;

	flogi->csp.bbcred = bfa_os_htons(local_bb_credits);

	return sizeof(struct fc_logi_s);
}
Example #10
0
u16
fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
{
	bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
	fchs->cat_info = FC_CAT_ABTS;
	fchs->d_id = (d_id);
	fchs->s_id = (s_id);
	fchs->ox_id = bfa_os_htons(ox_id);

	return sizeof(struct fchs_s);
}
Example #11
0
u16
fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
			u32 d_id, u32 s_id, u16 ox_id,
			struct fc_rpsc_speed_info_s *oper_speed)
{
	memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));

	fc_els_rsp_build(fchs, d_id, s_id, ox_id);

	rpsc_acc->command = FC_ELS_ACC;
	rpsc_acc->num_entries = bfa_os_htons(1);

	rpsc_acc->speed_info[0].port_speed_cap =
		bfa_os_htons(oper_speed->port_speed_cap);

	rpsc_acc->speed_info[0].port_op_speed =
		bfa_os_htons(oper_speed->port_op_speed);

	return sizeof(struct fc_rpsc_acc_s);

}
Example #12
0
u16
fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
		u16 ox_id, wwn_t port_name, wwn_t node_name,
		u16 pdu_size, u8 set_npiv, u8 set_auth,
		u16 local_bb_credits)
{
	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
	u32 	*vvl_info;

	bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));

	flogi->els_cmd.els_code = FC_ELS_FLOGI;
	fc_els_req_build(fchs, d_id, s_id, ox_id);

	flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
	flogi->port_name = port_name;
	flogi->node_name = node_name;

	/*
	 * Set the NPIV Capability Bit ( word 1, bit 31) of Common
	 * Service Parameters.
	 */
	flogi->csp.ciro = set_npiv;

	/* set AUTH capability */
	flogi->csp.security = set_auth;

	flogi->csp.bbcred = bfa_os_htons(local_bb_credits);

	/* Set brcd token in VVL */
	vvl_info = (u32 *)&flogi->vvl[0];

	/* set the flag to indicate the presence of VVL */
	flogi->csp.npiv_supp    = 1; /* @todo. field name is not correct */
	vvl_info[0]	= bfa_os_htonl(FLOGI_VVL_BRCD);

	return sizeof(struct fc_logi_s);
}
Example #13
0
u16
fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id,
			 u32 s_id, u16 ox_id, u16 rrq_oxid)
{
	fc_els_req_build(fchs, d_id, s_id, ox_id);

	/*
	 * build rrq payload
	 */
	bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
	rrq->s_id = (s_id);
	rrq->ox_id = bfa_os_htons(rrq_oxid);
	rrq->rx_id = FC_RXID_ANY;

	return sizeof(struct fc_rrq_s);
}
Example #14
0
u16
fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
		u16 ox_id, wwn_t port_name, wwn_t node_name,
		u16 pdu_size)
{
	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);

	bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));

	flogi->els_cmd.els_code = FC_ELS_FDISC;
	fc_els_req_build(fchs, d_id, s_id, ox_id);

	flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
	flogi->port_name = port_name;
	flogi->node_name = node_name;

	return sizeof(struct fc_logi_s);
}
Example #15
0
u16
fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
			u16 ox_id, wwn_t port_name, wwn_t node_name,
			u16 pdu_size)
{
	struct fc_logi_s     *pdisc = (struct fc_logi_s *) (fchs + 1);

	bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));

	pdisc->els_cmd.els_code = FC_ELS_PDISC;
	fc_els_req_build(fchs, d_id, s_id, ox_id);

	pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size);
	pdisc->port_name = port_name;
	pdisc->node_name = node_name;

	return sizeof(struct fc_logi_s);
}
Example #16
0
u16
fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
			u16 ox_id)
{
	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
	u16        payldlen;

	fc_els_req_build(fchs, d_id, s_id, ox_id);
	rscn->command = FC_ELS_RSCN;
	rscn->pagelen = sizeof(rscn->event[0]);

	payldlen = sizeof(u32) + rscn->pagelen;
	rscn->payldlen = bfa_os_htons(payldlen);

	rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
	rscn->event[0].portid = s_id;

	return sizeof(struct fc_rscn_pl_s);
}
Example #17
0
u16
fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
			u32 d_id, u32 s_id, u32 *pid_list,
			u16 npids)
{
	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
	int i = 0;

	fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);

	memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));

	rpsc2->els_cmd.els_code = FC_ELS_RPSC;
	rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
	rpsc2->num_pids  = bfa_os_htons(npids);
	for (i = 0; i < npids; i++)
		rpsc2->pid_list[i].pid = pid_list[i];

	return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) *
			(sizeof(u32)));
}
Example #18
0
static void
fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
			u32 ox_id)
{
	bfa_os_memset(fchs, 0, sizeof(struct fchs_s));

	fchs->routing = FC_RTG_FC4_DEV_DATA;
	fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
	fchs->type = FC_TYPE_SERVICES;
	fchs->f_ctl =
		bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
			      FCTL_SI_XFER);
	fchs->rx_id = FC_RXID_ANY;
	fchs->d_id = (d_id);
	fchs->s_id = (s_id);
	fchs->ox_id = bfa_os_htons(ox_id);

	/**
	 * @todo no need to set ox_id for request
	 *       no need to set rx_id for response
	 */
}
Example #19
0
static          u16
fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
		 u16 ox_id, wwn_t port_name, wwn_t node_name,
		 u16 pdu_size, u8 els_code)
{
	struct fc_logi_s     *plogi = (struct fc_logi_s *) (pld);

	bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));

	plogi->els_cmd.els_code = els_code;
	if (els_code == FC_ELS_PLOGI)
		fc_els_req_build(fchs, d_id, s_id, ox_id);
	else
		fc_els_rsp_build(fchs, d_id, s_id, ox_id);

	plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size);

	bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
	bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));

	return sizeof(struct fc_logi_s);
}
Example #20
0
/**
 * Send port enable message to firmware.
 */
static          bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
{
	struct bfi_fcport_enable_req_s *m;

	/**
	 * Increment message tag before queue check, so that responses to old
	 * requests are discarded.
	 */
	fcport->msgtag++;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
	if (!m) {
		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
							&fcport->reqq_wait);
		return BFA_FALSE;
	}

	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
				bfa_lpuid(fcport->bfa));
	m->nwwn = fcport->nwwn;
	m->pwwn = fcport->pwwn;
	m->port_cfg = fcport->cfg;
	m->msgtag = fcport->msgtag;
	m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
	return BFA_TRUE;
}
Example #21
0
u16
fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
	      int num_pages)
{
	struct fc_prlo_s      *prlo = (struct fc_prlo_s *) (fchs + 1);
	int             page;

	fc_els_req_build(fchs, d_id, s_id, ox_id);
	memset(prlo, 0, (num_pages * 16) + 4);
	prlo->command = FC_ELS_PRLO;
	prlo->page_len = 0x10;
	prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);

	for (page = 0; page < num_pages; page++) {
		prlo->prlo_params[page].type = FC_TYPE_FCP;
		prlo->prlo_params[page].opa_valid = 0;
		prlo->prlo_params[page].rpa_valid = 0;
		prlo->prlo_params[page].orig_process_assc = 0;
		prlo->prlo_params[page].resp_process_assc = 0;
	}

	return bfa_os_ntohs(prlo->payload_len);
}
Example #22
0
u16
fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
			u32 d_id, u32 s_id, u16 ox_id,
			int num_pages)
{
	int             page;

	fc_els_rsp_build(fchs, d_id, s_id, ox_id);

	memset(prlo_acc, 0, (num_pages * 16) + 4);
	prlo_acc->command = FC_ELS_ACC;
	prlo_acc->page_len = 0x10;
	prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);

	for (page = 0; page < num_pages; page++) {
		prlo_acc->prlo_acc_params[page].opa_valid = 0;
		prlo_acc->prlo_acc_params[page].rpa_valid = 0;
		prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
		prlo_acc->prlo_acc_params[page].orig_process_assc = 0;
		prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
	}

	return bfa_os_ntohs(prlo_acc->payload_len);
}
Example #23
0
void
fcbuild_init(void)
{
	/*
	 * fc_els_req_tmpl
	 */
	fc_els_req_tmpl.routing = FC_RTG_EXT_LINK;
	fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST;
	fc_els_req_tmpl.type = FC_TYPE_ELS;
	fc_els_req_tmpl.f_ctl =
		bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
			      FCTL_SI_XFER);
	fc_els_req_tmpl.rx_id = FC_RXID_ANY;

	/*
	 * fc_els_rsp_tmpl
	 */
	fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK;
	fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY;
	fc_els_rsp_tmpl.type = FC_TYPE_ELS;
	fc_els_rsp_tmpl.f_ctl =
		bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
			      FCTL_END_SEQ | FCTL_SI_XFER);
	fc_els_rsp_tmpl.rx_id = FC_RXID_ANY;

	/*
	 * fc_bls_req_tmpl
	 */
	fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK;
	fc_bls_req_tmpl.type = FC_TYPE_BLS;
	fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
	fc_bls_req_tmpl.rx_id = FC_RXID_ANY;

	/*
	 * fc_bls_rsp_tmpl
	 */
	fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK;
	fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC;
	fc_bls_rsp_tmpl.type = FC_TYPE_BLS;
	fc_bls_rsp_tmpl.f_ctl =
		bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
			      FCTL_END_SEQ | FCTL_SI_XFER);
	fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY;

	/*
	 * ba_acc_tmpl
	 */
	ba_acc_tmpl.seq_id_valid = 0;
	ba_acc_tmpl.low_seq_cnt = 0;
	ba_acc_tmpl.high_seq_cnt = 0xFFFF;

	/*
	 * plogi_tmpl
	 */
	plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
	plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
	plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004);
	plogi_tmpl.csp.ciro = 0x1;
	plogi_tmpl.csp.cisc = 0x0;
	plogi_tmpl.csp.altbbcred = 0x0;
	plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF);
	plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002);
	plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000);

	plogi_tmpl.class3.class_valid = 1;
	plogi_tmpl.class3.sequential = 1;
	plogi_tmpl.class3.conseq = 0xFF;
	plogi_tmpl.class3.ospx = 1;

	/*
	 * prli_tmpl
	 */
	prli_tmpl.command = FC_ELS_PRLI;
	prli_tmpl.pglen = 0x10;
	prli_tmpl.pagebytes = bfa_os_htons(0x0014);
	prli_tmpl.parampage.type = FC_TYPE_FCP;
	prli_tmpl.parampage.imagepair = 1;
	prli_tmpl.parampage.servparams.rxrdisab = 1;

	/*
	 * rrq_tmpl
	 */
	rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;

	/*
	 * fcp_fchs_tmpl
	 */
	fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
	fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
	fcp_fchs_tmpl.type = FC_TYPE_FCP;
	fcp_fchs_tmpl.f_ctl =
		bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
	fcp_fchs_tmpl.seq_id = 1;
	fcp_fchs_tmpl.rx_id = FC_RXID_ANY;
}
Example #24
0
/**
 * Send I/O request to firmware.
 */
static          bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
{
	struct bfa_itnim_s *itnim = ioim->itnim;
	struct bfi_ioim_req_s *m;
	static struct fcp_cmnd_s cmnd_z0 = { 0 };
	struct bfi_sge_s      *sge;
	u32        pgdlen = 0;
	u64 addr;
	struct scatterlist *sg;
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
	if (!m) {
		bfa_reqq_wait(ioim->bfa, ioim->reqq,
				  &ioim->iosp->reqq_wait);
		return BFA_FALSE;
	}

	/**
	 * build i/o request message next
	 */
	m->io_tag = bfa_os_htons(ioim->iotag);
	m->rport_hdl = ioim->itnim->rport->fw_handle;
	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);

	/**
	 * build inline IO SG element here
	 */
	sge = &m->sges[0];
	if (ioim->nsges) {
		sg = (struct scatterlist *)scsi_sglist(cmnd);
		addr = bfa_os_sgaddr(sg_dma_address(sg));
		sge->sga = *(union bfi_addr_u *) &addr;
		pgdlen = sg_dma_len(sg);
		sge->sg_len = pgdlen;
		sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
		bfa_sge_to_be(sge);
		sge++;
	}

	if (ioim->nsges > BFI_SGE_INLINE) {
		sge->sga = ioim->sgpg->sgpg_pa;
	} else {
		sge->sga.a32.addr_lo = 0;
		sge->sga.a32.addr_hi = 0;
	}
	sge->sg_len = pgdlen;
	sge->flags = BFI_SGE_PGDLEN;
	bfa_sge_to_be(sge);

	/**
	 * set up I/O command parameters
	 */
	bfa_os_assign(m->cmnd, cmnd_z0);
	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
	bfa_os_assign(m->cmnd.cdb,
			*(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
	m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));

	/**
	 * set up I/O message header
	 */
	switch (m->cmnd.iodir) {
	case FCP_IODIR_READ:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, input_reqs);
		break;
	case FCP_IODIR_WRITE:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, output_reqs);
		break;
	case FCP_IODIR_RW:
		bfa_stats(itnim, input_reqs);
		bfa_stats(itnim, output_reqs);
	default:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
	}
	if (itnim->seq_rec ||
	    (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));

#ifdef IOIM_ADVANCED
	m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);

	/**
	 * Handle large CDB (>16 bytes).
	 */
	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
					FCP_CMND_CDB_LEN) / sizeof(u32);
	if (m->cmnd.addl_cdb_len) {
		bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
				m->cmnd.addl_cdb_len * sizeof(u32));
		fcp_cmnd_fcpdl(&m->cmnd) =
				bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
	}
#endif

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(ioim->bfa, ioim->reqq);
	return BFA_TRUE;
}
Example #25
0
/**
 * This routine builds Port Attribute Block that used in RPA, RPRT commands.
 */
static          u16
bfa_fcs_port_fdmi_build_portattr_block(struct bfa_fcs_port_fdmi_s *fdmi,
				       u8 *pyld)
{
	struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
	struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
	struct fdmi_attr_s    *attr;
	u8        *curr_ptr;
	u16        len;
	u8         count = 0;

	/*
	 * get port attributes
	 */
	bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);

	len = sizeof(port_attrib->attr_count);

	/*
	 * fill out the invididual entries
	 */
	curr_ptr = (u8 *) &port_attrib->port_attr;

	/*
	 * FC4 Types
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
	attr->len = sizeof(fcs_port_attr.supp_fc4_types);
	memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	++count;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * Supported Speed
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
	attr->len = sizeof(fcs_port_attr.supp_speed);
	memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	++count;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * current Port Speed
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
	attr->len = sizeof(fcs_port_attr.curr_speed);
	memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	++count;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * max frame size
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
	attr->len = sizeof(fcs_port_attr.max_frm_size);
	memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	++count;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * OS Device Name
	 */
	if (fcs_port_attr.os_device_name[0] != '\0') {
		attr = (struct fdmi_attr_s *) curr_ptr;
		attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
		attr->len = (u16) strlen(fcs_port_attr.os_device_name);
		memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
		/* variable fields need to be 4 byte aligned */
		attr->len = fc_roundup(attr->len, sizeof(u32));
		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
		len += attr->len;
		++count;
		attr->len =
			bfa_os_htons(attr->len + sizeof(attr->type) +
				     sizeof(attr->len));

	}
	/*
	 * Host Name
	 */
	if (fcs_port_attr.host_name[0] != '\0') {
		attr = (struct fdmi_attr_s *) curr_ptr;
		attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
		attr->len = (u16) strlen(fcs_port_attr.host_name);
		memcpy(attr->value, fcs_port_attr.host_name, attr->len);
		/* variable fields need to be 4 byte aligned */
		attr->len = fc_roundup(attr->len, sizeof(u32));
		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
		len += attr->len;
		++count;
		attr->len =
			bfa_os_htons(attr->len + sizeof(attr->type) +
				     sizeof(attr->len));

	}

	/*
	 * Update size of payload
	 */
	port_attrib->attr_count = bfa_os_htonl(count);
	len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
	return len;
}
Example #26
0
static          u16
bfa_fcs_port_fdmi_build_rhba_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
				  u8 *pyld)
{
	struct bfa_fcs_port_s *port = fdmi->ms->port;
	struct bfa_fcs_fdmi_hba_attr_s hba_attr;	/* @todo */
	struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr; /* @todo */
	struct fdmi_rhba_s    *rhba = (struct fdmi_rhba_s *) pyld;
	struct fdmi_attr_s    *attr;
	u8        *curr_ptr;
	u16        len, count;

	/*
	 * get hba attributes
	 */
	bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);

	rhba->hba_id = bfa_fcs_port_get_pwwn(port);
	rhba->port_list.num_ports = bfa_os_htonl(1);
	rhba->port_list.port_entry = bfa_fcs_port_get_pwwn(port);

	len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);

	count = 0;
	len += sizeof(rhba->hba_attr_blk.attr_count);

	/*
	 * fill out the invididual entries of the HBA attrib Block
	 */
	curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;

	/*
	 * Node Name
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
	attr->len = sizeof(wwn_t);
	memcpy(attr->value, &bfa_fcs_port_get_nwwn(port), attr->len);
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	count++;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * Manufacturer
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
	attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
	memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
	/* variable fields need to be 4 byte aligned */
	attr->len = fc_roundup(attr->len, sizeof(u32));
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	count++;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * Serial Number
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
	attr->len = (u16) strlen(fcs_hba_attr->serial_num);
	memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
	/* variable fields need to be 4 byte aligned */
	attr->len = fc_roundup(attr->len, sizeof(u32));
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	count++;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * Model
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
	attr->len = (u16) strlen(fcs_hba_attr->model);
	memcpy(attr->value, fcs_hba_attr->model, attr->len);
	/* variable fields need to be 4 byte aligned */
	attr->len = fc_roundup(attr->len, sizeof(u32));
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	count++;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * Model Desc
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
	attr->len = (u16) strlen(fcs_hba_attr->model_desc);
	memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
	/* variable fields need to be 4 byte aligned */
	attr->len = fc_roundup(attr->len, sizeof(u32));
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	count++;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * H/W Version
	 */
	if (fcs_hba_attr->hw_version[0] != '\0') {
		attr = (struct fdmi_attr_s *) curr_ptr;
		attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
		attr->len = (u16) strlen(fcs_hba_attr->hw_version);
		memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
		/* variable fields need to be 4 byte aligned */
		attr->len = fc_roundup(attr->len, sizeof(u32));
		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
		len += attr->len;
		count++;
		attr->len =
			bfa_os_htons(attr->len + sizeof(attr->type) +
				     sizeof(attr->len));
	}

	/*
	 * Driver Version
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
	/* variable fields need to be 4 byte aligned */
	attr->len = fc_roundup(attr->len, sizeof(u32));
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;;
	count++;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * Option Rom Version
	 */
	if (fcs_hba_attr->option_rom_ver[0] != '\0') {
		attr = (struct fdmi_attr_s *) curr_ptr;
		attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
		attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
		memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
		/* variable fields need to be 4 byte aligned */
		attr->len = fc_roundup(attr->len, sizeof(u32));
		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
		len += attr->len;
		count++;
		attr->len =
			bfa_os_htons(attr->len + sizeof(attr->type) +
				     sizeof(attr->len));
	}

	/*
	 * f/w Version = driver version
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
	/* variable fields need to be 4 byte aligned */
	attr->len = fc_roundup(attr->len, sizeof(u32));
	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
	len += attr->len;
	count++;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * OS Name
	 */
	if (fcs_hba_attr->os_name[0] != '\0') {
		attr = (struct fdmi_attr_s *) curr_ptr;
		attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
		attr->len = (u16) strlen(fcs_hba_attr->os_name);
		memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
		/* variable fields need to be 4 byte aligned */
		attr->len = fc_roundup(attr->len, sizeof(u32));
		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
		len += attr->len;
		count++;
		attr->len =
			bfa_os_htons(attr->len + sizeof(attr->type) +
				     sizeof(attr->len));
	}

	/*
	 * MAX_CT_PAYLOAD
	 */
	attr = (struct fdmi_attr_s *) curr_ptr;
	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
	attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
	len += attr->len;
	count++;
	attr->len =
		bfa_os_htons(attr->len + sizeof(attr->type) +
			     sizeof(attr->len));

	/*
	 * Update size of payload
	 */
	len += ((sizeof(attr->type) + sizeof(attr->len)) * count);

	rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
	return len;
}