void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) { struct ib_conn *ib_conn = wc->qp->qp_context; struct iser_conn *iser_conn = to_iser_conn(ib_conn); struct iser_login_desc *desc = iser_login(wc->wr_cqe); struct iscsi_hdr *hdr; char *data; int length; if (unlikely(wc->status != IB_WC_SUCCESS)) { iser_err_comp(wc, "login_rsp"); return; } ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, desc->rsp_dma, ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); hdr = desc->rsp + sizeof(struct iser_ctrl); data = desc->rsp + ISER_HEADERS_LEN; length = wc->byte_len - ISER_HEADERS_LEN; iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, hdr->itt, length); iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length); ib_dma_sync_single_for_device(ib_conn->device->ib_device, desc->rsp_dma, ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); ib_conn->post_recv_buf_count--; }
void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) { struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); struct ib_conn *ib_conn = wc->qp->qp_context; struct iser_device *device = ib_conn->device; if (unlikely(wc->status != IB_WC_SUCCESS)) iser_err_comp(wc, "dataout"); ib_dma_unmap_single(device->ib_device, desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); kmem_cache_free(ig.desc_cache, desc); }
void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) { struct ib_conn *ib_conn = wc->qp->qp_context; struct iser_conn *iser_conn = to_iser_conn(ib_conn); struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); struct iscsi_hdr *hdr; int length; int outstanding, count, err; if (unlikely(wc->status != IB_WC_SUCCESS)) { iser_err_comp(wc, "task_rsp"); return; } ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, desc->dma_addr, ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); hdr = &desc->iscsi_header; length = wc->byte_len - ISER_HEADERS_LEN; iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, hdr->itt, length); if (iser_check_remote_inv(iser_conn, wc, hdr)) { iscsi_conn_failure(iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); return; } iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length); ib_dma_sync_single_for_device(ib_conn->device->ib_device, desc->dma_addr, ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); /* decrementing conn->post_recv_buf_count only --after-- freeing the * * task eliminates the need to worry on tasks which are completed in * * parallel to the execution of iser_conn_term. So the code that waits * * for the posted rx bufs refcount to become zero handles everything */ ib_conn->post_recv_buf_count--; outstanding = ib_conn->post_recv_buf_count; if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { count = min(iser_conn->qp_max_recv_dtos - outstanding, iser_conn->min_posted_rx); err = iser_post_recvm(iser_conn, count); if (err) iser_err("posting %d rx bufs err %d\n", count, err); } }
void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) { struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); struct iscsi_task *task; if (unlikely(wc->status != IB_WC_SUCCESS)) { iser_err_comp(wc, "control"); return; } /* this arithmetic is legal by libiscsi dd_data allocation */ task = (void *)desc - sizeof(struct iscsi_task); if (task->hdr->itt == RESERVED_ITT) iscsi_put_task(task); }
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc) { iser_err_comp(wc, "memreg"); }
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) { if (unlikely(wc->status != IB_WC_SUCCESS)) iser_err_comp(wc, "command"); }