static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, int paylen) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); u32 crc = 0; u32 *p; int err; err = rxe_prepare(rxe, pkt, skb, &crc); if (err) return err; if (pkt->mask & RXE_WRITE_OR_SEND) { if (wqe->wr.send_flags & IB_SEND_INLINE) { u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; crc = rxe_crc32(rxe, crc, tmp, paylen); memcpy(payload_addr(pkt), tmp, paylen); wqe->dma.resid -= paylen; wqe->dma.sge_offset += paylen; } else { err = copy_data(rxe, qp->pd, 0, &wqe->dma, payload_addr(pkt), paylen, from_mem_obj, &crc); if (err) return err; } } p = payload_addr(pkt) + paylen + bth_pad(pkt); *p = ~crc; return 0; }
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_pkt_info *ack, int opcode, int payload, u32 psn, u8 syndrome, u32 *crcp) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct sk_buff *skb; u32 crc = 0; u32 *p; int paylen; int pad; int err; /* * allocate packet */ pad = (-payload) & 0x3; paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); if (!skb) return NULL; ack->qp = qp; ack->opcode = opcode; ack->mask = rxe_opcode[opcode].mask; ack->offset = pkt->offset; ack->paylen = paylen; /* fill in bth using the request packet headers */ memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES); bth_set_opcode(ack, opcode); bth_set_qpn(ack, qp->attr.dest_qp_num); bth_set_pad(ack, pad); bth_set_se(ack, 0); bth_set_psn(ack, psn); bth_set_ack(ack, 0); ack->psn = psn; if (ack->mask & RXE_AETH_MASK) { aeth_set_syn(ack, syndrome); aeth_set_msn(ack, qp->resp.msn); } if (ack->mask & RXE_ATMACK_MASK) atmack_set_orig(ack, qp->resp.atomic_orig); err = rxe_prepare(ack, skb, &crc); if (err) { kfree_skb(skb); return NULL; } if (crcp) { /* CRC computation will be continued by the caller */ *crcp = crc; } else { p = payload_addr(ack) + payload + bth_pad(ack); *p = ~crc; } return skb; }
/* RDMA read response. If res is not NULL, then we have a current RDMA request * being processed or replayed. */ static enum resp_states read_reply(struct rxe_qp *qp, struct rxe_pkt_info *req_pkt) { struct rxe_pkt_info ack_pkt; struct sk_buff *skb; int mtu = qp->mtu; enum resp_states state; int payload; int opcode; int err; struct resp_res *res = qp->resp.res; u32 icrc; u32 *p; if (!res) { /* This is the first time we process that request. Get a * resource */ res = &qp->resp.resources[qp->resp.res_head]; free_rd_atomic_resource(qp, res); rxe_advance_resp_resource(qp); res->type = RXE_READ_MASK; res->replay = 0; res->read.va = qp->resp.va; res->read.va_org = qp->resp.va; res->first_psn = req_pkt->psn; if (reth_len(req_pkt)) { res->last_psn = (req_pkt->psn + (reth_len(req_pkt) + mtu - 1) / mtu - 1) & BTH_PSN_MASK; } else { res->last_psn = res->first_psn; } res->cur_psn = req_pkt->psn; res->read.resid = qp->resp.resid; res->read.length = qp->resp.resid; res->read.rkey = qp->resp.rkey; /* note res inherits the reference to mr from qp */ res->read.mr = qp->resp.mr; qp->resp.mr = NULL; qp->resp.res = res; res->state = rdatm_res_state_new; } if (res->state == rdatm_res_state_new) { if (res->read.resid <= mtu) opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY; else opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST; } else { if (res->read.resid > mtu) opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE; else opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST; } res->state = rdatm_res_state_next; payload = min_t(int, res->read.resid, mtu); skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload, res->cur_psn, AETH_ACK_UNLIMITED, &icrc); if (!skb) return RESPST_ERR_RNR; err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt), payload, from_mem_obj, &icrc); if (err) pr_err("Failed copying memory\n"); p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); *p = ~icrc; err = rxe_xmit_packet(qp, &ack_pkt, skb); if (err) { pr_err("Failed sending RDMA reply.\n"); return RESPST_ERR_RNR; } res->read.va += payload; res->read.resid -= payload; res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK; if (res->read.resid > 0) { state = RESPST_DONE; } else { qp->resp.res = NULL; if (!res->replay) qp->resp.opcode = -1; if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) qp->resp.psn = res->cur_psn; state = RESPST_CLEANUP; } return state; }
static enum resp_states check_rkey(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { struct rxe_mem *mem = NULL; u64 va; u32 rkey; u32 resid; u32 pktlen; int mtu = qp->mtu; enum resp_states state; int access; if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) { if (pkt->mask & RXE_RETH_MASK) { qp->resp.va = reth_va(pkt); qp->resp.rkey = reth_rkey(pkt); qp->resp.resid = reth_len(pkt); } access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ : IB_ACCESS_REMOTE_WRITE; } else if (pkt->mask & RXE_ATOMIC_MASK) { qp->resp.va = atmeth_va(pkt); qp->resp.rkey = atmeth_rkey(pkt); qp->resp.resid = sizeof(u64); access = IB_ACCESS_REMOTE_ATOMIC; } else { return RESPST_EXECUTE; } /* A zero-byte op is not required to set an addr or rkey. */ if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) && (pkt->mask & RXE_RETH_MASK) && reth_len(pkt) == 0) { return RESPST_EXECUTE; } va = qp->resp.va; rkey = qp->resp.rkey; resid = qp->resp.resid; pktlen = payload_size(pkt); mem = lookup_mem(qp->pd, access, rkey, lookup_remote); if (!mem) { state = RESPST_ERR_RKEY_VIOLATION; goto err; } if (unlikely(mem->state == RXE_MEM_STATE_FREE)) { state = RESPST_ERR_RKEY_VIOLATION; goto err; } if (mem_check_range(mem, va, resid)) { state = RESPST_ERR_RKEY_VIOLATION; goto err; } if (pkt->mask & RXE_WRITE_MASK) { if (resid > mtu) { if (pktlen != mtu || bth_pad(pkt)) { state = RESPST_ERR_LENGTH; goto err; } } else { if (pktlen != resid) { state = RESPST_ERR_LENGTH; goto err; } if ((bth_pad(pkt) != (0x3 & (-resid)))) { /* This case may not be exactly that * but nothing else fits. */ state = RESPST_ERR_LENGTH; goto err; } } } WARN_ON_ONCE(qp->resp.mr); qp->resp.mr = mem; return RESPST_EXECUTE; err: if (mem) rxe_drop_ref(mem); return state; }