void iser_snd_completion(struct iser_desc *tx_desc) { struct iser_dto *dto = &tx_desc->dto; struct iser_conn *ib_conn = dto->ib_conn; struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; struct iscsi_conn *conn = iser_conn->iscsi_conn; struct iscsi_task *task; int resume_tx = 0; iser_dbg("Initiator, Data sent dto=0x%p\n", dto); iser_dto_buffs_release(dto); if (tx_desc->type == ISCSI_TX_DATAOUT) kmem_cache_free(ig.desc_cache, tx_desc); if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == ISER_QP_MAX_REQ_DTOS) resume_tx = 1; atomic_dec(&ib_conn->post_send_buf_count); if (resume_tx) { iser_dbg("%ld resuming tx\n",jiffies); scsi_queue_work(conn->session->host, &conn->xmitwork); } if (tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ task = (void *) ((long)(void *)tx_desc - sizeof(struct iscsi_task)); if (task->hdr->itt == RESERVED_ITT) iscsi_put_task(task); } }
/** * iscsi_tcp_task_xmit - xmit normal PDU task * @task: iscsi command task * * We're expected to return 0 when everything was transmitted successfully, * -EAGAIN if there's still data in the queue, or != 0 for any other kind * of error. */ int iscsi_tcp_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct iscsi_r2t_info *r2t; int rc = 0; flush: /* Flush any pending data first. */ rc = session->tt->xmit_pdu(task); if (rc < 0) return rc; /* mgmt command */ if (!task->sc) { if (task->hdr->itt == RESERVED_ITT) iscsi_put_task(task); return 0; } /* Are we done already? */ if (task->sc->sc_data_direction != DMA_TO_DEVICE) return 0; r2t = iscsi_tcp_get_curr_r2t(task); if (r2t == NULL) { /* Waiting for more R2Ts to arrive. */ ISCSI_DBG_TCP(conn, "no R2Ts yet\n"); return 0; } rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT); if (rc) return rc; iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr); ISCSI_DBG_TCP(conn, "sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", r2t, r2t->datasn - 1, task->hdr->itt, r2t->data_offset + r2t->sent, r2t->data_count); rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent, r2t->data_count); if (rc) { iscsi_conn_failure(conn, ISCSI_ERR_XMIT_FAILED); return rc; } r2t->sent += r2t->data_count; goto flush; }
void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) { struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); struct iscsi_task *task; if (unlikely(wc->status != IB_WC_SUCCESS)) { iser_err_comp(wc, "control"); return; } /* this arithmetic is legal by libiscsi dd_data allocation */ task = (void *)desc - sizeof(struct iscsi_task); if (task->hdr->itt == RESERVED_ITT) iscsi_put_task(task); }
void iser_snd_completion(struct iser_tx_desc *tx_desc, struct ib_conn *ib_conn) { struct iscsi_task *task; struct iser_device *device = ib_conn->device; if (tx_desc->type == ISCSI_TX_DATAOUT) { ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); kmem_cache_free(ig.desc_cache, tx_desc); tx_desc = NULL; } if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ task = (void *) ((long)(void *)tx_desc - sizeof(struct iscsi_task)); if (task->hdr->itt == RESERVED_ITT) iscsi_put_task(task); } }
/** * iser_rcv_dto_completion - recv DTO completion */ void iser_rcv_completion(struct iser_desc *rx_desc, unsigned long dto_xfer_len) { struct iser_dto *dto = &rx_desc->dto; struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; struct iscsi_task *task; struct iscsi_iser_task *iser_task; struct iscsi_hdr *hdr; char *rx_data = NULL; int rx_data_len = 0; unsigned char opcode; hdr = &rx_desc->iscsi_header; iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt); if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */ rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN; rx_data = dto->regd[1]->virt_addr; rx_data += dto->offset[1]; } opcode = hdr->opcode & ISCSI_OPCODE_MASK; if (opcode == ISCSI_OP_SCSI_CMD_RSP) { spin_lock(&conn->iscsi_conn->session->lock); task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt); if (task) __iscsi_get_task(task); spin_unlock(&conn->iscsi_conn->session->lock); if (!task) iser_err("itt can't be matched to task!!! " "conn %p opcode %d itt %d\n", conn->iscsi_conn, opcode, hdr->itt); else { iser_task = task->dd_data; iser_dbg("itt %d task %p\n",hdr->itt, task); iser_task->status = ISER_TASK_STATUS_COMPLETED; iser_task_rdma_finalize(iser_task); iscsi_put_task(task); } } iser_dto_buffs_release(dto); iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); kfree(rx_desc->data); kmem_cache_free(ig.desc_cache, rx_desc); /* decrementing conn->post_recv_buf_count only --after-- freeing the * * task eliminates the need to worry on tasks which are completed in * * parallel to the execution of iser_conn_term. So the code that waits * * for the posted rx bufs refcount to become zero handles everything */ atomic_dec(&conn->ib_conn->post_recv_buf_count); /* * if an unexpected PDU was received then the recv wr consumed must * be replaced, this is done in the next send of a control-type PDU */ if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) { /* nop-in with itt = 0xffffffff */ atomic_inc(&conn->ib_conn->unexpected_pdu_count); } else if (opcode == ISCSI_OP_ASYNC_EVENT) { /* asyncronous message */ atomic_inc(&conn->ib_conn->unexpected_pdu_count); } /* a reject PDU consumes the recv buf posted for the response */ }