/* * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ */ static void bfa_iocfc_send_cfg(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfg_req_s cfg_req; struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; int i; WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); bfa_trc(bfa, cfg->fwcfg.num_cqs); bfa_iocfc_reset_queues(bfa); /* * initialize IOC configuration info */ cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); /* * dma map REQ and RSP circular queues and shadow pointers */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) { bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], iocfc->req_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], iocfc->req_cq_shadow_ci[i].pa); cfg_info->req_cq_elems[i] = cpu_to_be16(cfg->drvcfg.num_reqq_elems); bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], iocfc->rsp_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], iocfc->rsp_cq_shadow_pi[i].pa); cfg_info->rsp_cq_elems[i] = cpu_to_be16(cfg->drvcfg.num_rspq_elems); } /* * Enable interrupt coalescing if it is driver init path * and not ioc disable/enable path. */ if (!iocfc->cfgdone) cfg_info->intr_attr.coalesce = BFA_TRUE; iocfc->cfgdone = BFA_FALSE; /* * dma map IOC configuration itself */ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, bfa_lpuid(bfa)); bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, sizeof(struct bfi_iocfc_cfg_req_s)); }
/* * Actions on chip-reset completion. */ static void bfa_iocfc_reset_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_iocfc_reset_queues(bfa); bfa_isr_enable(bfa); }
static void bfa_iocfc_send_cfg(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfg_req_s cfg_req; struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; int i; bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); bfa_trc(bfa, cfg->fwcfg.num_cqs); iocfc->cfgdone = BFA_FALSE; bfa_iocfc_reset_queues(bfa); cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa); for (i = 0; i < cfg->fwcfg.num_cqs; i++) { bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], iocfc->req_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], iocfc->req_cq_shadow_ci[i].pa); cfg_info->req_cq_elems[i] = bfa_os_htons(cfg->drvcfg.num_reqq_elems); bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], iocfc->rsp_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], iocfc->rsp_cq_shadow_pi[i].pa); cfg_info->rsp_cq_elems[i] = bfa_os_htons(cfg->drvcfg.num_rspq_elems); } bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, bfa_lpuid(bfa)); bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, sizeof(struct bfi_iocfc_cfg_req_s)); }