static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) { u64 eqe_value; u32 token; struct ehca_cq *cq; eqe_value = eqe->entry; ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value); if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { ehca_dbg(&shca->ib_device, "Got completion event"); token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); read_lock(&ehca_cq_idr_lock); cq = idr_find(&ehca_cq_idr, token); if (cq) atomic_inc(&cq->nr_events); read_unlock(&ehca_cq_idr_lock); if (cq == NULL) { ehca_err(&shca->ib_device, "Invalid eqe for non-existing cq token=%x", token); return; } reset_eq_pending(cq); if (ehca_scaling_code) queue_comp_task(cq); else { comp_event_callback(cq); if (atomic_dec_and_test(&cq->nr_events)) wake_up(&cq->wait_completion); } } else { ehca_dbg(&shca->ib_device, "Got non completion event"); parse_identifier(shca, eqe_value); } }
u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ehca_qp, struct ib_qp_init_attr *qp_init_attr) { u32 pma_qp_nr, bma_qp_nr; u64 ret; u8 port = qp_init_attr->port_num; int counter; shca->sport[port - 1].port_state = IB_PORT_DOWN; switch (qp_init_attr->qp_type) { case IB_QPT_SMI: /* function not supported yet */ break; case IB_QPT_GSI: ret = hipz_h_define_aqp1(shca->ipz_hca_handle, ehca_qp->ipz_qp_handle, ehca_qp->galpas.kernel, (u32) qp_init_attr->port_num, &pma_qp_nr, &bma_qp_nr); if (ret != H_SUCCESS) { ehca_err(&shca->ib_device, "Can't define AQP1 for port %x. h_ret=%lli", port, ret); return ret; } shca->sport[port - 1].pma_qp_nr = pma_qp_nr; ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x", port, pma_qp_nr); break; default: ehca_err(&shca->ib_device, "invalid qp_type=%x", qp_init_attr->qp_type); return H_PARAMETER; } if (ehca_nr_ports < 0) /* autodetect mode */ return H_SUCCESS; for (counter = 0; shca->sport[port - 1].port_state != IB_PORT_ACTIVE && counter < ehca_port_act_time; counter++) { ehca_dbg(&shca->ib_device, "... wait until port %x is active", port); msleep_interruptible(1000); } if (counter == ehca_port_act_time) { ehca_err(&shca->ib_device, "Port %x is not active.", port); return H_HARDWARE; } return H_SUCCESS; }
static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) { u64 eqe_value; u32 token; unsigned long flags; struct ehca_cq *cq; eqe_value = eqe->entry; ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { ehca_dbg(&shca->ib_device, "Got completion event"); token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); spin_lock_irqsave(&ehca_cq_idr_lock, flags); cq = idr_find(&ehca_cq_idr, token); if (cq == NULL) { spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); ehca_err(&shca->ib_device, "Invalid eqe for non-existing cq token=%x", token); return; } reset_eq_pending(cq); cq->nr_events++; spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); if (ehca_scaling_code) queue_comp_task(cq); else { comp_event_callback(cq); spin_lock_irqsave(&ehca_cq_idr_lock, flags); cq->nr_events--; if (!cq->nr_events) wake_up(&cq->wait_completion); spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); } } else { ehca_dbg(&shca->ib_device, "Got non completion event"); parse_identifier(shca, eqe_value); } }
void ehca_process_eq(struct ehca_shca *shca, int is_irq) { struct ehca_eq *eq = &shca->eq; struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache; u64 eqe_value, ret; unsigned long flags; int eqe_cnt, i; int eq_empty = 0; spin_lock_irqsave(&eq->irq_spinlock, flags); if (is_irq) { const int max_query_cnt = 100; int query_cnt = 0; int int_state = 1; do { int_state = hipz_h_query_int_state( shca->ipz_hca_handle, eq->ist); query_cnt++; iosync(); } while (int_state && query_cnt < max_query_cnt); if (unlikely((query_cnt == max_query_cnt))) ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x", int_state, query_cnt); } /* read out all eqes */ eqe_cnt = 0; do { u32 token; eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq); if (!eqe_cache[eqe_cnt].eqe) break; eqe_value = eqe_cache[eqe_cnt].eqe->entry; if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); read_lock(&ehca_cq_idr_lock); eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); if (eqe_cache[eqe_cnt].cq) atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events); read_unlock(&ehca_cq_idr_lock); if (!eqe_cache[eqe_cnt].cq) { ehca_err(&shca->ib_device, "Invalid eqe for non-existing cq " "token=%x", token); continue; } } else eqe_cache[eqe_cnt].cq = NULL; eqe_cnt++; } while (eqe_cnt < EHCA_EQE_CACHE_SIZE); if (!eqe_cnt) { if (is_irq) ehca_dbg(&shca->ib_device, "No eqe found for irq event"); goto unlock_irq_spinlock; } else if (!is_irq) { ret = hipz_h_eoi(eq->ist); if (ret != H_SUCCESS) ehca_err(&shca->ib_device, "bad return code EOI -rc = %lld\n", ret); ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); } if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE)) ehca_dbg(&shca->ib_device, "too many eqes for one irq event"); /* enable irq for new packets */ for (i = 0; i < eqe_cnt; i++) { if (eq->eqe_cache[i].cq) reset_eq_pending(eq->eqe_cache[i].cq); } /* check eq */ spin_lock(&eq->spinlock); eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue)); spin_unlock(&eq->spinlock); /* call completion handler for cached eqes */ for (i = 0; i < eqe_cnt; i++) if (eq->eqe_cache[i].cq) { if (ehca_scaling_code) queue_comp_task(eq->eqe_cache[i].cq); else { struct ehca_cq *cq = eq->eqe_cache[i].cq; comp_event_callback(cq); if (atomic_dec_and_test(&cq->nr_events)) wake_up(&cq->wait_completion); } } else { ehca_dbg(&shca->ib_device, "Got non completion event"); parse_identifier(shca, eq->eqe_cache[i].eqe->entry); } /* poll eq if not empty */ if (eq_empty) goto unlock_irq_spinlock; do { struct ehca_eqe *eqe; eqe = ehca_poll_eq(shca, &shca->eq); if (!eqe) break; process_eqe(shca, eqe); } while (1); unlock_irq_spinlock: spin_unlock_irqrestore(&eq->irq_spinlock, flags); }