Example #1
0
static void
bfa_fcport_send_txcredit(void *port_cbarg)
{

	struct bfa_fcport_s *fcport = port_cbarg;
	struct bfi_fcport_set_svc_params_req_s *m;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
	if (!m) {
		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
		return;
	}

	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
			bfa_lpuid(fcport->bfa));
	m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
}
Example #2
0
/**
 * bfa_port_get_stats()
 *
 *   Send the request to the f/w to fetch Port statistics.
 *
 * @param[in] Pointer to the Port module data structure.
 *
 * @return Status
 */
bfa_status_t
bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
		   bfa_port_stats_cbfn_t cbfn, void *cbarg)
{
	struct bfi_port_get_stats_req_s *m;

	if (!bfa_ioc_is_operational(port->ioc)) {
		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
		return BFA_STATUS_IOC_FAILURE;
	}

	if (port->stats_busy) {
		bfa_trc(port, BFA_STATUS_DEVBUSY);
		return BFA_STATUS_DEVBUSY;
	}

	m = (struct bfi_port_get_stats_req_s *)port->stats_mb.msg;

	port->stats = stats;
	port->stats_cbfn = cbfn;
	port->stats_cbarg = cbarg;
	port->stats_busy = BFA_TRUE;
	bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);

	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
		    bfa_ioc_portid(port->ioc));
	bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);

	return BFA_STATUS_OK;
}
Example #3
0
/**
 * bfa_port_clear_stats()
 *
 *
 * @param[in] Pointer to the Port module data structure.
 *
 * @return Status
 */
bfa_status_t
bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
		     void *cbarg)
{
	struct bfi_port_generic_req_s *m;

	if (!bfa_ioc_is_operational(port->ioc)) {
		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
		return BFA_STATUS_IOC_FAILURE;
	}

	if (port->stats_busy) {
		bfa_trc(port, BFA_STATUS_DEVBUSY);
		return BFA_STATUS_DEVBUSY;
	}

	m = (struct bfi_port_generic_req_s *)port->stats_mb.msg;

	port->stats_cbfn = cbfn;
	port->stats_cbarg = cbarg;
	port->stats_busy = BFA_TRUE;

	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
		    bfa_ioc_portid(port->ioc));
	bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);

	return BFA_STATUS_OK;
}
Example #4
0
bfa_status_t
bfa_cee_reset_stats(struct bfa_cee_s *cee,
		    bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg)
{
	struct bfi_cee_reset_stats_s *cmd;

	WARN_ON((cee == NULL) || (cee->ioc == NULL));
	if (!bfa_ioc_is_operational(cee->ioc)) {
		bfa_trc(cee, 0);
		return BFA_STATUS_IOC_FAILURE;
	}
	if (cee->reset_stats_pending == BFA_TRUE) {
		bfa_trc(cee, 0);
		return  BFA_STATUS_DEVBUSY;
	}
	cee->reset_stats_pending = BFA_TRUE;
	cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg;
	cee->cbfn.reset_stats_cbfn = cbfn;
	cee->cbfn.reset_stats_cbarg = cbarg;
	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
		bfa_ioc_portid(cee->ioc));
	bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);

	return BFA_STATUS_OK;
}
Example #5
0
/**
 * bfa_port_disable()
 *
 *   Send the Port disable request to the f/w
 *
 * @param[in] Pointer to the Port module data structure.
 *
 * @return Status
 */
bfa_status_t
bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
		 void *cbarg)
{
	struct bfi_port_generic_req_s *m;

	/** todo Not implemented */
	bfa_assert(0);

	if (!bfa_ioc_is_operational(port->ioc)) {
		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
		return BFA_STATUS_IOC_FAILURE;
	}

	if (port->endis_pending) {
		bfa_trc(port, BFA_STATUS_DEVBUSY);
		return BFA_STATUS_DEVBUSY;
	}

	m = (struct bfi_port_generic_req_s *)port->endis_mb.msg;

	port->msgtag++;
	port->endis_cbfn = cbfn;
	port->endis_cbarg = cbarg;
	port->endis_pending = BFA_TRUE;

	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
		    bfa_ioc_portid(port->ioc));
	bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);

	return BFA_STATUS_OK;
}
Example #6
0
/**
 * Send I/O abort request to firmware.
 */
static          bfa_boolean_t
bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
{
	struct bfi_ioim_abort_req_s *m;
	enum bfi_ioim_h2i       msgop;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
	if (!m)
		return BFA_FALSE;

	/**
	 * build i/o request message next
	 */
	if (ioim->iosp->abort_explicit)
		msgop = BFI_IOIM_H2I_IOABORT_REQ;
	else
		msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;

	bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
	m->io_tag    = bfa_os_htons(ioim->iotag);
	m->abort_tag = ++ioim->abort_tag;

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(ioim->bfa, ioim->reqq);
	return BFA_TRUE;
}
Example #7
0
bfa_status_t
bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
		 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
{
	struct bfi_cee_get_req_s *cmd;

	WARN_ON((cee == NULL) || (cee->ioc == NULL));
	bfa_trc(cee, 0);
	if (!bfa_ioc_is_operational(cee->ioc)) {
		bfa_trc(cee, 0);
		return BFA_STATUS_IOC_FAILURE;
	}
	if (cee->get_attr_pending == BFA_TRUE) {
		bfa_trc(cee, 0);
		return  BFA_STATUS_DEVBUSY;
	}
	cee->get_attr_pending = BFA_TRUE;
	cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg;
	cee->attr = attr;
	cee->cbfn.get_attr_cbfn = cbfn;
	cee->cbfn.get_attr_cbarg = cbarg;
	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
		bfa_ioc_portid(cee->ioc));
	bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
	bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);

	return BFA_STATUS_OK;
}
Example #8
0
/*
 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
 */
static void
bfa_iocfc_send_cfg(void *bfa_arg)
{
	struct bfa_s *bfa = bfa_arg;
	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
	struct bfi_iocfc_cfg_req_s cfg_req;
	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
	struct bfa_iocfc_cfg_s	*cfg = &iocfc->cfg;
	int		i;

	WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
	bfa_trc(bfa, cfg->fwcfg.num_cqs);

	bfa_iocfc_reset_queues(bfa);

	/*
	 * initialize IOC configuration info
	 */
	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
	cfg_info->num_cqs = cfg->fwcfg.num_cqs;

	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
	/*
	 * dma map REQ and RSP circular queues and shadow pointers
	 */
	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
				    iocfc->req_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
				    iocfc->req_cq_shadow_ci[i].pa);
		cfg_info->req_cq_elems[i] =
			cpu_to_be16(cfg->drvcfg.num_reqq_elems);

		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
				    iocfc->rsp_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
				    iocfc->rsp_cq_shadow_pi[i].pa);
		cfg_info->rsp_cq_elems[i] =
			cpu_to_be16(cfg->drvcfg.num_rspq_elems);
	}

	/*
	 * Enable interrupt coalescing if it is driver init path
	 * and not ioc disable/enable path.
	 */
	if (!iocfc->cfgdone)
		cfg_info->intr_attr.coalesce = BFA_TRUE;

	iocfc->cfgdone = BFA_FALSE;

	/*
	 * dma map IOC configuration itself
	 */
	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
		    bfa_lpuid(bfa));
	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);

	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
			  sizeof(struct bfi_iocfc_cfg_req_s));
}
Example #9
0
bfa_status_t
bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
{
	struct bfa_iocfc_s		*iocfc = &bfa->iocfc;
	struct bfi_iocfc_set_intr_req_s *m;

	iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
	iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
	iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);

	if (!bfa_iocfc_is_operational(bfa))
		return BFA_STATUS_OK;

	m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
	if (!m)
		return BFA_STATUS_DEVBUSY;

	bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
		    bfa_lpuid(bfa));
	m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
	m->delay    = iocfc->cfginfo->intr_attr.delay;
	m->latency  = iocfc->cfginfo->intr_attr.latency;

	bfa_trc(bfa, attr->delay);
	bfa_trc(bfa, attr->latency);

	bfa_reqq_produce(bfa, BFA_REQQ_IOC);
	return BFA_STATUS_OK;
}
Example #10
0
static void
bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
{
	struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
		(struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
	int copied;
	u8 *addr = (u8 *)cmdq->addr.kva;

	memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
	bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
	rsp->mh.mtag.i2htok = htons(cmdq->token);
	copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
		cmdq->bytes_to_copy;
	addr += cmdq->offset;
	memcpy(rsp->data, addr, copied);

	cmdq->token++;
	cmdq->offset += copied;
	cmdq->bytes_to_copy -= copied;

	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
				bfa_msgq_cmdq_copy_next, cmdq)) {
		bfa_msgq_cmdq_copy_next(cmdq);
	}
}
Example #11
0
/**
 * Send port disable message to firmware.
 */
static          bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
{
	struct bfi_fcport_req_s *m;

	/**
	 * Increment message tag before queue check, so that responses to old
	 * requests are discarded.
	 */
	fcport->msgtag++;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
	if (!m) {
		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
							&fcport->reqq_wait);
		return BFA_FALSE;
	}

	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
			bfa_lpuid(fcport->bfa));
	m->msgtag = fcport->msgtag;

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);

	return BFA_TRUE;
}
Example #12
0
/**
 * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
 *
 * @cee: Pointer to the CEE module data structure.
 *
 * Return: status
 */
enum bfa_status
bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
		    bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
{
	struct bfi_cee_get_req *cmd;

	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
	if (!bfa_nw_ioc_is_operational(cee->ioc))
		return BFA_STATUS_IOC_FAILURE;

	if (cee->get_attr_pending)
		return  BFA_STATUS_DEVBUSY;

	cee->get_attr_pending = true;
	cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
	cee->attr = attr;
	cee->cbfn.get_attr_cbfn = cbfn;
	cee->cbfn.get_attr_cbarg = cbarg;
	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
		    bfa_ioc_portid(cee->ioc));
	bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
	bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);

	return BFA_STATUS_OK;
}
static void
bfa_iocfc_stats_query(struct bfa_s *bfa)
{
	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
	struct bfi_iocfc_stats_req_s stats_req;

	bfa_timer_start(bfa, &iocfc->stats_timer,
			    bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);

	bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
			bfa_lpuid(bfa));
	bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
		sizeof(struct bfi_iocfc_stats_req_s));
}
Example #14
0
static void
bfa_iocfc_send_cfg(void *bfa_arg)
{
	struct bfa_s *bfa = bfa_arg;
	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
	struct bfi_iocfc_cfg_req_s cfg_req;
	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
	struct bfa_iocfc_cfg_s  *cfg = &iocfc->cfg;
	int             i;

	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
	bfa_trc(bfa, cfg->fwcfg.num_cqs);

	iocfc->cfgdone = BFA_FALSE;
	bfa_iocfc_reset_queues(bfa);

	
	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
	cfg_info->num_cqs = cfg->fwcfg.num_cqs;

	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
	bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);

	
	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
				       iocfc->req_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
				       iocfc->req_cq_shadow_ci[i].pa);
		cfg_info->req_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_reqq_elems);

		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
				       iocfc->rsp_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
				       iocfc->rsp_cq_shadow_pi[i].pa);
		cfg_info->rsp_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_rspq_elems);
	}

	
	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
			bfa_lpuid(bfa));
	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);

	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
			sizeof(struct bfi_iocfc_cfg_req_s));
}
Example #15
0
static void
bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
{
	struct bfi_msgq_h2i_db *dbell =
		(struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);

	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
	dbell->mh.mtag.i2htok = 0;
	dbell->idx.cmdq_pi = htons(cmdq->producer_index);

	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
				bfa_msgq_cmdq_dbell_ready, cmdq)) {
		bfa_msgq_cmdq_dbell_ready(cmdq);
	}
}
Example #16
0
static void
bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
{
	struct bfi_msgq_h2i_db *dbell =
		(struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);

	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
	dbell->mh.mtag.i2htok = 0;
	dbell->idx.rspq_ci = htons(rspq->consumer_index);

	if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
				bfa_msgq_rspq_dbell_ready, rspq)) {
		bfa_msgq_rspq_dbell_ready(rspq);
	}
}
Example #17
0
static void
bfa_msgq_init(void *arg)
{
	struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
	struct bfi_msgq_cfg_req *msgq_cfg =
		(struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];

	memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
	bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
	msgq_cfg->mh.mtag.i2htok = 0;

	bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
	msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
	bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
	msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);

	bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
}
Example #18
0
/*
 * bfa_port_disable()
 *
 *   Send the Port disable request to the f/w
 *
 * @param[in] Pointer to the Port module data structure.
 *
 * @return Status
 */
bfa_status_t
bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
		  void *cbarg)
{
	struct bfi_port_generic_req_s *m;

	/* If port is PBC disabled, return error */
	if (port->pbc_disabled) {
		bfa_trc(port, BFA_STATUS_PBC);
		return BFA_STATUS_PBC;
	}

	if (bfa_ioc_is_disabled(port->ioc)) {
		bfa_trc(port, BFA_STATUS_IOC_DISABLED);
		return BFA_STATUS_IOC_DISABLED;
	}

	if (!bfa_ioc_is_operational(port->ioc)) {
		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
		return BFA_STATUS_IOC_FAILURE;
	}

	if (port->endis_pending) {
		bfa_trc(port, BFA_STATUS_DEVBUSY);
		return BFA_STATUS_DEVBUSY;
	}

	m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;

	port->msgtag++;
	port->endis_cbfn    = cbfn;
	port->endis_cbarg   = cbarg;
	port->endis_pending = BFA_TRUE;

	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
		    bfa_ioc_portid(port->ioc));
	bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);

	return BFA_STATUS_OK;
}
Example #19
0
void
bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
				u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn,
				void *cbarg)
{
	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
	struct bfi_iocfc_updateq_req_s updateq_req;

	iocfc->updateq_cbfn = cbfn;
	iocfc->updateq_cbarg = cbarg;

	bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ,
			bfa_lpuid(bfa));

	updateq_req.reqq_ba = bfa_os_htonl(reqq_ba);
	updateq_req.rspq_ba = bfa_os_htonl(rspq_ba);
	updateq_req.reqq_sci = bfa_os_htonl(reqq_sci);
	updateq_req.rspq_spi = bfa_os_htonl(rspq_spi);

	bfa_ioc_mbox_send(&bfa->ioc, &updateq_req,
			sizeof(struct bfi_iocfc_updateq_req_s));
}
Example #20
0
/**
 * Send port enable message to firmware.
 */
static          bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
{
	struct bfi_fcport_enable_req_s *m;

	/**
	 * Increment message tag before queue check, so that responses to old
	 * requests are discarded.
	 */
	fcport->msgtag++;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
	if (!m) {
		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
							&fcport->reqq_wait);
		return BFA_FALSE;
	}

	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
				bfa_lpuid(fcport->bfa));
	m->nwwn = fcport->nwwn;
	m->pwwn = fcport->pwwn;
	m->port_cfg = fcport->cfg;
	m->msgtag = fcport->msgtag;
	m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
	return BFA_TRUE;
}
Example #21
0
static void
bfa_port_qos_stats_clear(void *cbarg)
{
	struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
	bfi_pport_clear_qos_stats_req_t *msg;

	msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);

	if (!msg) {
		port->stats_qfull = BFA_TRUE;
		bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
			       port);
		bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
		return;
	}
	port->stats_qfull = BFA_FALSE;

	bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
	bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
		    bfa_lpuid(port->bfa));
	bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
	return;
}
Example #22
0
static void
bfa_fcport_send_stats_clear(void *cbarg)
{
	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
	struct bfi_fcport_req_s *msg;

	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);

	if (!msg) {
		fcport->stats_qfull = BFA_TRUE;
		bfa_reqq_winit(&fcport->stats_reqq_wait,
				bfa_fcport_send_stats_clear, fcport);
		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
				&fcport->stats_reqq_wait);
		return;
	}
	fcport->stats_qfull = BFA_FALSE;

	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
			bfa_lpuid(fcport->bfa));
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
}
Example #23
0
/**
 * Send I/O request to firmware.
 */
static          bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
{
	struct bfa_itnim_s *itnim = ioim->itnim;
	struct bfi_ioim_req_s *m;
	static struct fcp_cmnd_s cmnd_z0 = { 0 };
	struct bfi_sge_s      *sge;
	u32        pgdlen = 0;
	u64 addr;
	struct scatterlist *sg;
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
	if (!m) {
		bfa_reqq_wait(ioim->bfa, ioim->reqq,
				  &ioim->iosp->reqq_wait);
		return BFA_FALSE;
	}

	/**
	 * build i/o request message next
	 */
	m->io_tag = bfa_os_htons(ioim->iotag);
	m->rport_hdl = ioim->itnim->rport->fw_handle;
	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);

	/**
	 * build inline IO SG element here
	 */
	sge = &m->sges[0];
	if (ioim->nsges) {
		sg = (struct scatterlist *)scsi_sglist(cmnd);
		addr = bfa_os_sgaddr(sg_dma_address(sg));
		sge->sga = *(union bfi_addr_u *) &addr;
		pgdlen = sg_dma_len(sg);
		sge->sg_len = pgdlen;
		sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
		bfa_sge_to_be(sge);
		sge++;
	}

	if (ioim->nsges > BFI_SGE_INLINE) {
		sge->sga = ioim->sgpg->sgpg_pa;
	} else {
		sge->sga.a32.addr_lo = 0;
		sge->sga.a32.addr_hi = 0;
	}
	sge->sg_len = pgdlen;
	sge->flags = BFI_SGE_PGDLEN;
	bfa_sge_to_be(sge);

	/**
	 * set up I/O command parameters
	 */
	bfa_os_assign(m->cmnd, cmnd_z0);
	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
	bfa_os_assign(m->cmnd.cdb,
			*(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
	m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));

	/**
	 * set up I/O message header
	 */
	switch (m->cmnd.iodir) {
	case FCP_IODIR_READ:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, input_reqs);
		break;
	case FCP_IODIR_WRITE:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, output_reqs);
		break;
	case FCP_IODIR_RW:
		bfa_stats(itnim, input_reqs);
		bfa_stats(itnim, output_reqs);
	default:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
	}
	if (itnim->seq_rec ||
	    (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));

#ifdef IOIM_ADVANCED
	m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);

	/**
	 * Handle large CDB (>16 bytes).
	 */
	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
					FCP_CMND_CDB_LEN) / sizeof(u32);
	if (m->cmnd.addl_cdb_len) {
		bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
				m->cmnd.addl_cdb_len * sizeof(u32));
		fcp_cmnd_fcpdl(&m->cmnd) =
				bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
	}
#endif

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(ioim->bfa, ioim->reqq);
	return BFA_TRUE;
}