Exemple #1
0
/**
 * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
 *
 * @cee: Pointer to the CEE module data structure.
 *
 * Return: status
 */
enum bfa_status
bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
		    bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
{
	struct bfi_cee_get_req *cmd;

	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
	if (!bfa_nw_ioc_is_operational(cee->ioc))
		return BFA_STATUS_IOC_FAILURE;

	if (cee->get_attr_pending)
		return  BFA_STATUS_DEVBUSY;

	cee->get_attr_pending = true;
	cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
	cee->attr = attr;
	cee->cbfn.get_attr_cbfn = cbfn;
	cee->cbfn.get_attr_cbarg = cbarg;
	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
		    bfa_ioc_portid(cee->ioc));
	bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
	bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);

	return BFA_STATUS_OK;
}
Exemple #2
0
/**
 * bfa_port_get_stats()
 *
 *   Send the request to the f/w to fetch Port statistics.
 *
 * @param[in] Pointer to the Port module data structure.
 *
 * @return Status
 */
bfa_status_t
bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
		   bfa_port_stats_cbfn_t cbfn, void *cbarg)
{
	struct bfi_port_get_stats_req_s *m;

	if (!bfa_ioc_is_operational(port->ioc)) {
		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
		return BFA_STATUS_IOC_FAILURE;
	}

	if (port->stats_busy) {
		bfa_trc(port, BFA_STATUS_DEVBUSY);
		return BFA_STATUS_DEVBUSY;
	}

	m = (struct bfi_port_get_stats_req_s *)port->stats_mb.msg;

	port->stats = stats;
	port->stats_cbfn = cbfn;
	port->stats_cbarg = cbarg;
	port->stats_busy = BFA_TRUE;
	bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);

	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
		    bfa_ioc_portid(port->ioc));
	bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);

	return BFA_STATUS_OK;
}
bfa_status_t
bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
		  bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
{
	struct bfi_cee_get_req_s *cmd;

	WARN_ON((cee == NULL) || (cee->ioc == NULL));

	if (!bfa_ioc_is_operational(cee->ioc)) {
		bfa_trc(cee, 0);
		return BFA_STATUS_IOC_FAILURE;
	}
	if (cee->get_stats_pending == BFA_TRUE) {
		bfa_trc(cee, 0);
		return  BFA_STATUS_DEVBUSY;
	}
	cee->get_stats_pending = BFA_TRUE;
	cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg;
	cee->stats = stats;
	cee->cbfn.get_stats_cbfn = cbfn;
	cee->cbfn.get_stats_cbarg = cbarg;
	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
		bfa_ioc_portid(cee->ioc));
	bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
	bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);

	return BFA_STATUS_OK;
}
Exemple #4
0
void
bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
{
	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;

	iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
	bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
}
static void
bfa_msgq_init(void *arg)
{
	struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
	struct bfi_msgq_cfg_req *msgq_cfg =
		(struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];

	memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
	bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
	msgq_cfg->mh.mtag.i2htok = 0;

	bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
	msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
	bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
	msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);

	bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
}
Exemple #6
0
/*
 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
 */
static void
bfa_iocfc_send_cfg(void *bfa_arg)
{
	struct bfa_s *bfa = bfa_arg;
	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
	struct bfi_iocfc_cfg_req_s cfg_req;
	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
	struct bfa_iocfc_cfg_s	*cfg = &iocfc->cfg;
	int		i;

	WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
	bfa_trc(bfa, cfg->fwcfg.num_cqs);

	bfa_iocfc_reset_queues(bfa);

	/*
	 * initialize IOC configuration info
	 */
	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
	cfg_info->num_cqs = cfg->fwcfg.num_cqs;

	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
	/*
	 * dma map REQ and RSP circular queues and shadow pointers
	 */
	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
				    iocfc->req_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
				    iocfc->req_cq_shadow_ci[i].pa);
		cfg_info->req_cq_elems[i] =
			cpu_to_be16(cfg->drvcfg.num_reqq_elems);

		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
				    iocfc->rsp_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
				    iocfc->rsp_cq_shadow_pi[i].pa);
		cfg_info->rsp_cq_elems[i] =
			cpu_to_be16(cfg->drvcfg.num_rspq_elems);
	}

	/*
	 * Enable interrupt coalescing if it is driver init path
	 * and not ioc disable/enable path.
	 */
	if (!iocfc->cfgdone)
		cfg_info->intr_attr.coalesce = BFA_TRUE;

	iocfc->cfgdone = BFA_FALSE;

	/*
	 * dma map IOC configuration itself
	 */
	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
		    bfa_lpuid(bfa));
	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);

	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
			  sizeof(struct bfi_iocfc_cfg_req_s));
}
Exemple #7
0
static void
bfa_iocfc_send_cfg(void *bfa_arg)
{
	struct bfa_s *bfa = bfa_arg;
	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
	struct bfi_iocfc_cfg_req_s cfg_req;
	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
	struct bfa_iocfc_cfg_s  *cfg = &iocfc->cfg;
	int             i;

	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
	bfa_trc(bfa, cfg->fwcfg.num_cqs);

	iocfc->cfgdone = BFA_FALSE;
	bfa_iocfc_reset_queues(bfa);

	
	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
	cfg_info->num_cqs = cfg->fwcfg.num_cqs;

	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
	bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);

	
	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
				       iocfc->req_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
				       iocfc->req_cq_shadow_ci[i].pa);
		cfg_info->req_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_reqq_elems);

		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
				       iocfc->rsp_cq_ba[i].pa);
		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
				       iocfc->rsp_cq_shadow_pi[i].pa);
		cfg_info->rsp_cq_elems[i] =
			bfa_os_htons(cfg->drvcfg.num_rspq_elems);
	}

	
	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
			bfa_lpuid(bfa));
	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);

	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
			sizeof(struct bfi_iocfc_cfg_req_s));
}
Exemple #8
0
/**
 * Send port enable message to firmware.
 */
static          bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
{
	struct bfi_fcport_enable_req_s *m;

	/**
	 * Increment message tag before queue check, so that responses to old
	 * requests are discarded.
	 */
	fcport->msgtag++;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
	if (!m) {
		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
							&fcport->reqq_wait);
		return BFA_FALSE;
	}

	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
				bfa_lpuid(fcport->bfa));
	m->nwwn = fcport->nwwn;
	m->pwwn = fcport->pwwn;
	m->port_cfg = fcport->cfg;
	m->msgtag = fcport->msgtag;
	m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
	return BFA_TRUE;
}