Example #1
0
void
bfa_isr_enable(struct bfa_s *bfa)
{
	u32 intr_unmask;
	int pci_func = bfa_ioc_pcifn(&bfa->ioc);

	bfa_trc(bfa, pci_func);

	bfa_msix_install(bfa);
	intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
		       __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
		       __HFN_INT_LL_HALT);

	if (pci_func == 0)
		intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
				__HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
				__HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
				__HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
				__HFN_INT_MBOX_LPU0);
	else
		intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
				__HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
				__HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
				__HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
				__HFN_INT_MBOX_LPU1);

	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
	bfa->iocfc.intr_mask = ~intr_unmask;
	bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
}
Example #2
0
static void
bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
{
	int fn = bfa_ioc_pcifn(&bfa->ioc);
	bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);

	if (msix)
		bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec);
	else
		bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0);
}
Example #3
0
void
bfa_isr_disable(struct bfa_s *bfa)
{
	bfa_isr_mode_set(bfa, BFA_FALSE);
	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
	bfa_msix_uninstall(bfa);
}
Example #4
0
void
bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
{
	u32	r32;

	r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32);
}
Example #5
0
void
bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
{
	u32 intr, curr_value;

	intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);

	if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
		bfa_msix_lpu(bfa);

	intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
		__HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);

	if (intr) {
		if (intr & __HFN_INT_LL_HALT) {
			/**
			 * If LL_HALT bit is set then FW Init Halt LL Port
			 * Register needs to be cleared as well so Interrupt
			 * Status Register will be cleared.
			 */
			curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
			curr_value &= ~__FW_INIT_HALT_P;
			bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
		}

		if (intr & __HFN_INT_ERR_PSS) {
			/**
			 * ERR_PSS bit needs to be cleared as well in case
			 * interrups are shared so driver's interrupt handler is
			 * still called eventhough it is already masked out.
			 */
			curr_value = bfa_reg_read(
					bfa->ioc.ioc_regs.pss_err_status_reg);
			curr_value &= __PSS_ERR_STATUS_SET;
			bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
					curr_value);
		}

		bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
		bfa_msix_errint(bfa, intr);
	}
}
Example #6
0
/**
 *  hal_intr_api
 */
bfa_boolean_t
bfa_intx(struct bfa_s *bfa)
{
	u32 intr, qintr;
	int queue;

	intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
	if (!intr)
		return BFA_FALSE;

	/**
	 * RME completion queue interrupt
	 */
	qintr = intr & __HFN_INT_RME_MASK;
	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);

	for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
		if (intr & (__HFN_INT_RME_Q0 << queue))
			bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
	}
	intr &= ~qintr;
	if (!intr)
		return BFA_TRUE;

	/**
	 * CPE completion queue interrupt
	 */
	qintr = intr & __HFN_INT_CPE_MASK;
	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);

	for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
		if (intr & (__HFN_INT_CPE_Q0 << queue))
			bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
	}
	intr &= ~qintr;
	if (!intr)
		return BFA_TRUE;

	bfa_msix_lpu_err(bfa, intr);

	return BFA_TRUE;
}
Example #7
0
void
bfa_msix_rspq(struct bfa_s *bfa, int qid)
{
	struct bfi_msg_s *m;
	u32 pi, ci;
	struct list_head *waitq;

	bfa_trc_fp(bfa, qid);

	qid &= (BFI_IOC_MAX_CQS - 1);

	bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);

	ci = bfa_rspq_ci(bfa, qid);
	pi = bfa_rspq_pi(bfa, qid);

	bfa_trc_fp(bfa, ci);
	bfa_trc_fp(bfa, pi);

	if (bfa->rme_process) {
		while (ci != pi) {
			m = bfa_rspq_elem(bfa, qid, ci);
			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);

			bfa_isrs[m->mhdr.msg_class] (bfa, m);

			CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
		}
	}

	/**
	 * update CI
	 */
	bfa_rspq_ci(bfa, qid) = pi;
	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
	mmiowb();

	/**
	 * Resume any pending requests in the corresponding reqq.
	 */
	waitq = bfa_reqq(bfa, qid);
	if (!list_empty(waitq))
		bfa_reqq_resume(bfa, qid);
}
Example #8
0
void
bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
{
	struct bfi_msg_s      *m;
	u32        pi, ci;

	bfa_trc_fp(bfa, rsp_qid);

	rsp_qid &= (BFI_IOC_MAX_CQS - 1);

	bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid);

	ci = bfa_rspq_ci(bfa, rsp_qid);
	pi = bfa_rspq_pi(bfa, rsp_qid);

	bfa_trc_fp(bfa, ci);
	bfa_trc_fp(bfa, pi);

	if (bfa->rme_process) {
		while (ci != pi) {
			m = bfa_rspq_elem(bfa, rsp_qid, ci);
			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);

			bfa_isrs[m->mhdr.msg_class] (bfa, m);

			CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
		}
	}

	/**
	 * update CI
	 */
	bfa_rspq_ci(bfa, rsp_qid) = pi;
	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi);
	bfa_os_mmiowb();
}
Example #9
0
void
bfa_intx_disable(struct bfa_s *bfa)
{
	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
}
Example #10
0
void
bfa_intx_enable(struct bfa_s *bfa)
{
	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask);
}
static void
bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
{
	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
		__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
}