int spdk_nvmf_request_exec(struct spdk_nvmf_request *req) { struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; bool done; if (cmd->opc == SPDK_NVME_OPC_FABRIC) { done = nvmf_process_fabrics_command(req); } else if (req->conn->type == CONN_TYPE_AQ) { done = nvmf_process_admin_cmd(req); } else { done = nvmf_process_io_cmd(req); } if (done) { /* Synchronous command - response is already filled out */ return spdk_nvmf_request_complete(req); } /* * Asynchronous command. * The completion callback will call spdk_nvmf_request_complete(). */ return 0; }
static void nvmf_test_process_io_cmd(void) { struct spdk_nvme_cmd nvmf_cmd = {}; struct nvmf_session *sess; struct nvmf_request nvmf_req = {}; struct nvme_read_cdw12 *cdw12; struct spdk_nvmf_subsystem *tmp; int buf_len = 64; uint8_t *buf; nvmf_cmd.opc = SPDK_NVME_OPC_READ; nvmf_cmd.nsid = 2; nvmf_cmd.cid = 3; nvmf_req.rsp = malloc(sizeof(union nvmf_c2h_msg)); nvmf_req.cb_fn = io_nvmf_cmd_complete; nvmf_req.cid = nvmf_cmd.cid; cdw12 = (struct nvme_read_cdw12 *)&nvmf_cmd.cdw12; cdw12->nlb = 16; //read 16 lb, check in nvme read buf = malloc(buf_len); SPDK_CU_ASSERT_FATAL(buf != NULL); sess = nvmf_find_session_by_id("subsystem1", SS_SC_CNTLID); sess->vcprop.csts.bits.rdy = 1; CU_ASSERT_EQUAL(nvmf_process_io_cmd(sess, &nvmf_cmd, buf, buf_len, &nvmf_req), 0); CU_ASSERT_STRING_EQUAL(buf, "hello"); nvmf_cmd.cid = 4; nvmf_cmd.opc = SPDK_NVME_OPC_WRITE; CU_ASSERT_EQUAL(nvmf_process_io_cmd(sess, &nvmf_cmd, buf, buf_len, &nvmf_req), 0); nvmf_cmd.opc = 0xff; nvmf_cmd.cid = 5; CU_ASSERT_EQUAL(nvmf_process_io_cmd(sess, &nvmf_cmd, buf, buf_len, &nvmf_req), 0); sess->vcprop.csts.bits.rdy = 0; nvmf_cmd.cid = 6; CU_ASSERT_EQUAL(nvmf_process_io_cmd(sess, &nvmf_cmd, buf, buf_len, &nvmf_req), -1); CU_ASSERT_EQUAL(nvmf_req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_NAMESPACE_NOT_READY); sess->vcprop.csts.bits.rdy = 1; /* nsid = 0 */ nvmf_cmd.nsid = 0; nvmf_cmd.cid = 7; CU_ASSERT_EQUAL(nvmf_process_io_cmd(sess, &nvmf_cmd, buf, buf_len, &nvmf_req), -1); CU_ASSERT_NOT_EQUAL(nvmf_req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS); /* set sess->subsys to NULL */ tmp = sess->subsys; sess->subsys = NULL; nvmf_cmd.nsid = 1; nvmf_cmd.cid = 8; CU_ASSERT_EQUAL(nvmf_process_io_cmd(sess, &nvmf_cmd, buf, buf_len, &nvmf_req), -1); CU_ASSERT_NOT_EQUAL(nvmf_req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS); sess->subsys = tmp; free(buf); free(nvmf_req.rsp); }
static int nvmf_io_cmd_continue(struct spdk_nvmf_conn *conn, struct nvme_qp_tx_desc *tx_desc) { struct nvme_qp_rx_desc *rx_desc; struct nvmf_request *req; struct spdk_nvme_cmd *cmd; int ret; rx_desc = tx_desc->rx_desc; if (rx_desc == NULL) { SPDK_ERRLOG(" rx_desc does not exist!\n"); return -1; } req = &tx_desc->req_state; cmd = &req->cmd->nvme_cmd; req->fabric_rx_ctx = rx_desc; /* clear the SGL details for RDMA performed */ req->length = 0; /* send to NVMf library for backend NVMe processing */ ret = nvmf_process_io_cmd(req->session, cmd, (void *)rx_desc->bb, rx_desc->bb_sgl.length, req); if (ret) { /* library failed the request and should have Updated the response */ SPDK_TRACELOG(SPDK_TRACE_DEBUG, " send nvme io cmd capsule error response\n"); ret = spdk_nvmf_send_response(conn, req); if (ret) { SPDK_ERRLOG("Unable to send aq qp tx descriptor\n"); return -1; } } return 0; }
static int nvmf_process_io_command(struct spdk_nvmf_conn *conn, struct nvme_qp_tx_desc *tx_desc) { struct nvme_qp_rx_desc *rx_desc = tx_desc->rx_desc; struct nvmf_request *req; struct spdk_nvme_sgl_descriptor *sgl; struct spdk_nvmf_keyed_sgl_descriptor *keyed_sgl; struct spdk_nvme_cmd *cmd; enum spdk_nvme_data_transfer xfer; void *buf = NULL; uint32_t len = 0; int ret; req = &tx_desc->req_state; cmd = &req->cmd->nvme_cmd; sgl = (struct spdk_nvme_sgl_descriptor *)&cmd->dptr.sgl1; keyed_sgl = (struct spdk_nvmf_keyed_sgl_descriptor *)sgl; xfer = spdk_nvme_opc_get_data_transfer(cmd->opc); if (xfer != SPDK_NVME_DATA_NONE) { /* NVMf does support in-capsule data for write comamnds. If caller indicates SGL, verify the SGL for in-capsule or RDMA read/write use and prepare data buffer reference and length for the NVMf library. */ /* TBD: add code to handle I/O larger than default bb size */ if (sgl->type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK && (sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_ADDRESS || sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) { if (keyed_sgl->key == 0) { SPDK_ERRLOG("Host did not specify SGL key!\n"); goto command_fail; } if (keyed_sgl->length > rx_desc->bb_sgl.length) { SPDK_ERRLOG("SGL length 0x%x exceeds BB length 0x%x\n", (uint32_t)keyed_sgl->length, rx_desc->bb_sgl.length); goto command_fail; } buf = (void *)rx_desc->bb; len = rx_desc->bb_sgl.length; req->remote_addr = keyed_sgl->address; req->rkey = keyed_sgl->key; req->length = keyed_sgl->length; } else if (sgl->type == SPDK_NVME_SGL_TYPE_DATA_BLOCK && sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_OFFSET) { uint64_t offset = sgl->address; uint32_t max_len = rx_desc->bb_sgl.length; if (offset > max_len) { SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n", offset, max_len); goto command_fail; } max_len -= (uint32_t)offset; if (sgl->length > max_len) { SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n", sgl->length, max_len); goto command_fail; } buf = rx_desc->bb + offset; len = sgl->length; } else { SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type %2x, Subtype %2x\n", sgl->type, sgl->type_specific); goto command_fail; } /* for any I/O that requires rdma data to be pulled into target BB before processing by the backend NVMe device */ if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { if (len > 0 && sgl->type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) { SPDK_TRACELOG(SPDK_TRACE_RDMA, " Issuing RDMA Read to get host data\n"); /* data to be copied from remote host via memory RDMA */ if (req->length < rx_desc->bb_len) { /* temporarily adjust SGE to only copy what the host is prepared to send. */ SPDK_TRACELOG(SPDK_TRACE_DEBUG, " *** modify bb sgl length from %x to %x\n", rx_desc->bb_sgl.length, req->length); rx_desc->bb_sgl.length = req->length; } req->pending = NVMF_PENDING_WRITE; ret = nvmf_post_rdma_read(tx_desc->conn, tx_desc); if (ret) { SPDK_ERRLOG("Unable to post rdma read tx descriptor\n"); goto command_fail; } /* Need to wait for RDMA completion indication where it will continue I/O operation */ return 0; } } } /* send to NVMf library for backend NVMe processing */ ret = nvmf_process_io_cmd(req->session, cmd, buf, len, req); if (ret) { /* library failed the request and should have Updated the response */ SPDK_TRACELOG(SPDK_TRACE_RDMA, "send nvme io cmd capsule error response\n"); ret = spdk_nvmf_send_response(conn, req); if (ret) { SPDK_ERRLOG("Unable to send aq qp tx descriptor\n"); goto command_fail; } } return 0; command_fail: return -1; }