static irqreturn_t qedr_irq_handler(int irq, void *handle) { u16 hw_comp_cons, sw_comp_cons; struct qedr_cnq *cnq = handle; struct regpair *cq_handle; struct qedr_cq *cq; qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); qed_sb_update_sb_idx(cnq->sb); hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr); sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); /* Align protocol-index and chain reads */ rmb(); while (sw_comp_cons != hw_comp_cons) { cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl); cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi, cq_handle->lo); if (cq == NULL) { DP_ERR(cnq->dev, "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n", cq_handle->hi, cq_handle->lo, sw_comp_cons, hw_comp_cons); break; } if (cq->sig != QEDR_CQ_MAGIC_NUMBER) { DP_ERR(cnq->dev, "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n", cq_handle->hi, cq_handle->lo, cq); break; } cq->arm_flags = 0; if (!cq->destroyed && cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); /* The CQ's CNQ notification counter is checked before * destroying the CQ in a busy-wait loop that waits for all of * the CQ's CNQ interrupts to be processed. It is increased * here, only after the completion handler, to ensure that the * the handler is not running when the CQ is destroyed. */ cq->cnq_notif++; sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); cnq->n_comp++; } qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, sw_comp_cons); qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1); return IRQ_HANDLED; }
void qed_int_sp_dpc(unsigned long hwfn_cookie) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; struct qed_pi_info *pi_info = NULL; struct qed_sb_attn_info *sb_attn; struct qed_sb_info *sb_info; int arr_size; u16 rc = 0; if (!p_hwfn) { DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n"); return; } if (!p_hwfn->p_sp_sb) { DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); return; } sb_info = &p_hwfn->p_sp_sb->sb_info; arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); if (!sb_info) { DP_ERR(p_hwfn->cdev, "Status block is NULL - cannot ack interrupts\n"); return; } if (!p_hwfn->p_sb_attn) { DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); return; } sb_attn = p_hwfn->p_sb_attn; DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", p_hwfn, p_hwfn->my_id); /* Disable ack for def status block. Required both for msix + * inta in non-mask mode, in inta does no harm. */ qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); /* Gather Interrupts/Attentions information */ if (!sb_info->sb_virt) { DP_ERR( p_hwfn->cdev, "Interrupt Status block is NULL - cannot check for new interrupts!\n"); } else { u32 tmp_index = sb_info->sb_ack; rc = qed_sb_update_sb_idx(sb_info); DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, "Interrupt indices: 0x%08x --> 0x%08x\n", tmp_index, sb_info->sb_ack); } if (!sb_attn || !sb_attn->sb_attn) { DP_ERR( p_hwfn->cdev, "Attentions Status block is NULL - cannot check for new attentions!\n"); } else { u16 tmp_index = sb_attn->index; rc |= qed_attn_update_idx(p_hwfn, sb_attn); DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, "Attention indices: 0x%08x --> 0x%08x\n", tmp_index, sb_attn->index); } /* Check if we expect interrupts at this time. if not just ack them */ if (!(rc & QED_SB_EVENT_MASK)) { qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); return; } /* Check the validity of the DPC ptt. If not ack interrupts and fail */ if (!p_hwfn->p_dpc_ptt) { DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); return; } if (rc & QED_SB_ATT_IDX) qed_int_attentions(p_hwfn); if (rc & QED_SB_IDX) { int pi; /* Look for a free index */ for (pi = 0; pi < arr_size; pi++) { pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; if (pi_info->comp_cb) pi_info->comp_cb(p_hwfn, pi_info->cookie); } } if (sb_attn && (rc & QED_SB_ATT_IDX)) /* This should be done before the interrupts are enabled, * since otherwise a new attention will be generated. */ qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); }