int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { struct hfi1_qp *qp = to_iqp(ibqp); attr->qp_state = qp->state; attr->cur_qp_state = attr->qp_state; attr->path_mtu = qp->path_mtu; attr->path_mig_state = qp->s_mig_state; attr->qkey = qp->qkey; attr->rq_psn = mask_psn(qp->r_psn); attr->sq_psn = mask_psn(qp->s_next_psn); attr->dest_qp_num = qp->remote_qpn; attr->qp_access_flags = qp->qp_access_flags; attr->cap.max_send_wr = qp->s_size - 1; attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; attr->cap.max_send_sge = qp->s_max_sge; attr->cap.max_recv_sge = qp->r_rq.max_sge; attr->cap.max_inline_data = 0; attr->ah_attr = qp->remote_ah_attr; attr->alt_ah_attr = qp->alt_ah_attr; attr->pkey_index = qp->s_pkey_index; attr->alt_pkey_index = qp->s_alt_pkey_index; attr->en_sqd_async_notify = 0; attr->sq_draining = qp->s_draining; attr->max_rd_atomic = qp->s_max_rd_atomic; attr->max_dest_rd_atomic = qp->r_max_rd_atomic; attr->min_rnr_timer = qp->r_min_rnr_timer; attr->port_num = qp->port_num; attr->timeout = qp->timeout; attr->retry_cnt = qp->s_retry_cnt; attr->rnr_retry = qp->s_rnr_retry_cnt; attr->alt_port_num = qp->alt_ah_attr.port_num; attr->alt_timeout = qp->alt_timeout; init_attr->event_handler = qp->ibqp.event_handler; init_attr->qp_context = qp->ibqp.qp_context; init_attr->send_cq = qp->ibqp.send_cq; init_attr->recv_cq = qp->ibqp.recv_cq; init_attr->srq = qp->ibqp.srq; init_attr->cap = attr->cap; if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR) init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; else init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->qp_type = qp->ibqp.qp_type; init_attr->port_num = qp->port_num; return 0; }
/** * hfi1_make_uc_req - construct a request packet (SEND, RDMA write) * @qp: a pointer to the QP * * Assume s_lock is held. * * Return 1 if constructed; otherwise, return 0. */ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct ib_other_headers *ohdr; struct rvt_swqe *wqe; u32 hwords; u32 bth0 = 0; u32 len; u32 pmtu = qp->pmtu; int middle = 0; ps->s_txreq = get_txreq(ps->dev, qp); if (IS_ERR(ps->s_txreq)) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } clear_ahg(qp); wqe = rvt_get_swqe_ptr(qp, qp->s_last); hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); goto done_free_tx; } if (priv->hdr_type == HFI1_PKT_TYPE_9B) { /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; else ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; } else { /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ hwords = 7; if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr)))) ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; else ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; } /* Get the next send request. */ wqe = rvt_get_swqe_ptr(qp, qp->s_cur); qp->s_wqe = NULL; switch (qp->s_state) { default: if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ if (qp->s_cur == READ_ONCE(qp->s_head)) { clear_ahg(qp); goto bail; } /* * Local operations are processed immediately * after all prior requests have completed. */ if (wqe->wr.opcode == IB_WR_REG_MR || wqe->wr.opcode == IB_WR_LOCAL_INV) { int local_ops = 0; int err = 0; if (qp->s_last != qp->s_cur) goto bail; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { err = rvt_invalidate_rkey( qp, wqe->wr.ex.invalidate_rkey); local_ops = 1; } hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR : IB_WC_SUCCESS); if (local_ops) atomic_dec(&qp->local_ops_pending); goto done_free_tx; } /* * Start a new request. */ qp->s_psn = wqe->psn; qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; qp->s_sge.total_len = wqe->length; len = wqe->length; qp->s_len = len; switch (wqe->wr.opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: if (len > pmtu) { qp->s_state = OP(SEND_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_ONLY); } else { qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: ohdr->u.rc.reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / 4; if (len > pmtu) { qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_ONLY); } else { qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the RETH */ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; } qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; default: goto bail; } break; case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); /* FALLTHROUGH */ case OP(SEND_MIDDLE): len = qp->s_len; if (len > pmtu) { len = pmtu; middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_LAST); } else { qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); /* FALLTHROUGH */ case OP(RDMA_WRITE_MIDDLE): len = qp->s_len; if (len > pmtu) { len = pmtu; middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_LAST); } else { qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; } qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; } qp->s_len -= len; ps->s_txreq->hdr_dwords = hwords; ps->s_txreq->sde = priv->s_sde; ps->s_txreq->ss = &qp->s_sge; ps->s_txreq->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), mask_psn(qp->s_psn++), middle, ps); return 1; done_free_tx: hfi1_put_txreq(ps->s_txreq); ps->s_txreq = NULL; return 1; bail: hfi1_put_txreq(ps->s_txreq); bail_no_tx: ps->s_txreq = NULL; qp->s_flags &= ~RVT_S_BUSY; return 0; }