static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, struct rds_ib_send_work *send, int wc_status) { struct rds_message *rm = send->s_rm; rdsdebug("ic %p send %p rm %p\n", ic, send, rm); ib_dma_unmap_sg(ic->i_cm_id->device, rm->m_sg, rm->m_nents, DMA_TO_DEVICE); if (rm->m_rdma_op != NULL) { rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); /* If the user asked for a completion notification on this * message, we can implement three different semantics: * 1. Notify when we received the ACK on the RDS message * that was queued with the RDMA. This provides reliable * notification of RDMA status at the expense of a one-way * packet delay. * 2. Notify when the IB stack gives us the completion event for * the RDMA operation. * 3. Notify when the IB stack gives us the completion event for * the accompanying RDS messages. * Here, we implement approach #3. To implement approach #2, * call rds_rdma_send_complete from the cq_handler. To implement #1, * don't call rds_rdma_send_complete at all, and fall back to the notify * handling in the ACK processing code. * * Note: There's no need to explicitly sync any RDMA buffers using * ib_dma_sync_sg_for_cpu - the completion for the RDMA * operation itself unmapped the RDMA buffers, which takes care * of synching. */ rds_ib_send_rdma_complete(rm, wc_status); if (rm->m_rdma_op->r_write) rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); else rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); } /* If anyone waited for this message to get flushed out, wake * them up now */ rds_message_unmapped(rm); rds_message_put(rm); send->s_rm = NULL; }
void rds_ib_send_clear_ring(struct rds_ib_connection *ic) { struct rds_ib_send_work *send; u32 i; for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { if (send->s_wr.opcode == 0xdead) continue; if (send->s_rm) rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); if (send->s_op) rds_ib_send_unmap_rdma(ic, send->s_op); } }
static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, struct rm_data_op *op, int wc_status) { struct rds_message *rm; rm = container_of(op, struct rds_message, data); if (op->op_nents) ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, DMA_TO_DEVICE); if (rm->data.op_async) rds_ib_send_complete(rm, wc_status, rds_asend_complete); else if (rm->rdma.op_active && rm->rdma.op_remote_complete) rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status); }
/* * Unmap the resources associated with a struct send_work. * * Returns the rm for no good reason other than it is unobtainable * other than by switching on wr.opcode, currently, and the caller, * the event handler, needs it. */ static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic, struct rds_ib_send_work *send, int wc_status) { struct rds_message *rm = NULL; /* In the error case, wc.opcode sometimes contains garbage */ switch (send->s_wr.opcode) { case IB_WR_SEND: if (send->s_op) { rm = container_of(send->s_op, struct rds_message, data); rds_ib_send_unmap_data(ic, send->s_op, wc_status); } break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_READ: if (send->s_op) { rm = container_of(send->s_op, struct rds_message, rdma); rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); }