예제 #1
0
파일: request.c 프로젝트: gongchuang/spdk
int
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_session *session = req->conn->sess;
	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
	spdk_nvmf_request_exec_status status;

	nvmf_trace_command(req->cmd, req->conn->type);

	if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
		status = nvmf_process_fabrics_command(req);
	} else if (session == NULL || !session->vcprop.cc.bits.en) {
		/* Only Fabric commands are allowed when the controller is disabled */
		SPDK_ERRLOG("Non-Fabric command sent to disabled controller\n");
		rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
		status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	} else if (req->conn->type == CONN_TYPE_AQ) {
		struct spdk_nvmf_subsystem *subsystem;

		subsystem = session->subsys;
		assert(subsystem != NULL);
		if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) {
			status = nvmf_process_discovery_cmd(req);
		} else {
			status = session->subsys->ops->process_admin_cmd(req);
		}
	} else {
		status = session->subsys->ops->process_io_cmd(req);
	}

	switch (status) {
	case SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE:
		return spdk_nvmf_request_complete(req);
	case SPDK_NVMF_REQUEST_EXEC_STATUS_RELEASE:
		if (req->conn->transport->req_release(req)) {
			SPDK_ERRLOG("Transport request release error!\n");
			return -1;
		}

		return 0;
	case SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS:
		return 0;
	default:
		SPDK_ERRLOG("Unknown request exec status: 0x%x\n", status);
		return -1;
	}

	return 0;
}
예제 #2
0
파일: request.c 프로젝트: Shengliang/spdk
int
spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req,
			    void *in_cap_data, uint32_t in_cap_len,
			    void *bb, uint32_t bb_len)
{
	struct spdk_nvmf_conn *conn = req->conn;
	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
	enum spdk_nvme_data_transfer xfer;
	int ret;

	nvmf_trace_command(req->cmd, conn->type);

	req->length = 0;
	req->xfer = SPDK_NVME_DATA_NONE;
	req->data = NULL;

	if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
		xfer = spdk_nvme_opc_get_data_transfer(req->cmd->nvmf_cmd.fctype);
	} else {
		xfer = spdk_nvme_opc_get_data_transfer(cmd->opc);
	}

	if (xfer != SPDK_NVME_DATA_NONE) {
		struct spdk_nvme_sgl_descriptor *sgl = (struct spdk_nvme_sgl_descriptor *)&cmd->dptr.sgl1;

		if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
		    (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
		     sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
			if (sgl->keyed.length > bb_len) {
				SPDK_ERRLOG("SGL length 0x%x exceeds BB length 0x%x\n",
					    sgl->keyed.length, bb_len);
				rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
				return -1;
			}

			req->data = bb;
			req->length = sgl->keyed.length;
		} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
			   sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
			uint64_t offset = sgl->address;
			uint32_t max_len = in_cap_len;

			SPDK_TRACELOG(SPDK_TRACE_NVMF, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
				      offset, sgl->unkeyed.length);

			if (conn->type == CONN_TYPE_AQ) {
				SPDK_ERRLOG("In-capsule data not allowed for admin queue\n");
				return -1;
			}

			if (offset > max_len) {
				SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n",
					    offset, max_len);
				rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET;
				return -1;
			}
			max_len -= (uint32_t)offset;

			if (sgl->unkeyed.length > max_len) {
				SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
					    sgl->unkeyed.length, max_len);
				rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
				return -1;
			}

			req->data = in_cap_data + offset;
			req->length = sgl->unkeyed.length;
		} else {
			SPDK_ERRLOG("Invalid NVMf I/O Command SGL:  Type 0x%x, Subtype 0x%x\n",
				    sgl->generic.type, sgl->generic.subtype);
			rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
			return -1;
		}

		if (req->length == 0) {
			xfer = SPDK_NVME_DATA_NONE;
			req->data = NULL;
		}

		req->xfer = xfer;

		/*
		 * For any I/O that requires data to be
		 * pulled into target BB before processing by
		 * the backend NVMe device
		 */
		if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
			if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
				SPDK_TRACELOG(SPDK_TRACE_NVMF, "Initiating Host to Controller data transfer\n");
				ret = nvmf_post_rdma_read(conn, req);
				if (ret) {
					SPDK_ERRLOG("Unable to post rdma read tx descriptor\n");
					rsp->status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
					return -1;
				}

				/* Wait for transfer to complete before executing command. */
				return 1;
			}
		}
	}

	if (xfer == SPDK_NVME_DATA_NONE) {
		SPDK_TRACELOG(SPDK_TRACE_NVMF, "No data to transfer\n");
		RTE_VERIFY(req->data == NULL);
		RTE_VERIFY(req->length == 0);
	} else {
		RTE_VERIFY(req->data != NULL);
		RTE_VERIFY(req->length != 0);
		SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s data ready\n",
			      xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER ? "Host to Controller" :
			      "Controller to Host");
	}

	return 0;
}
예제 #3
0
파일: conn.c 프로젝트: varun2784/spdk
static int nvmf_recv(struct spdk_nvmf_conn *conn, struct ibv_wc *wc)
{
	struct nvme_qp_rx_desc *rx_desc;
	struct nvme_qp_tx_desc *tx_desc = NULL;
	struct spdk_nvmf_capsule_cmd *cap_hdr;
	struct nvmf_request *req;
	int ret = 0;

	rx_desc = (struct nvme_qp_rx_desc *)wc->wr_id;
	cap_hdr = (struct spdk_nvmf_capsule_cmd *)&rx_desc->msg_buf;

	/* Update Connection SQ Tracking, increment
	   the SQ tail consuming a free RX recv slot.
	   Check for exceeding queue full - should
	   never happen.
	*/
	conn->sq_tail < (conn->sq_depth - 1) ? (conn->sq_tail++) : (conn->sq_tail = 0);
	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "sq_head %x, sq_tail %x, sq_depth %x\n",
		      conn->sq_head, conn->sq_tail, conn->sq_depth);
	/* trap if initiator exceeds qdepth */
	if (conn->sq_head == conn->sq_tail) {
		SPDK_ERRLOG("	*** SQ Overflow !! ***\n");
		/* controller fatal status condition:
		   set the cfs flag in controller status
		   and stop processing this and any I/O
		   on this queue.
		*/
		if (conn->sess) {
			conn->sess->vcprop.csts.bits.cfs = 1;
			conn->state = CONN_STATE_OVERFLOW;
		}
		if (conn->type == CONN_TYPE_IOQ) {
			/* if overflow on the I/O queue
			   stop processing, allow for
			   remote host to query failure
			   via admin queue
			 */
			goto drop_recv;
		} else {
			/* if overflow on the admin queue
			   there is no recovery, error out
			   to trigger disconnect
			 */
			goto recv_error;
		}
	}

	if (wc->byte_len < sizeof(*cap_hdr)) {
		SPDK_ERRLOG("recv length less than capsule header\n");
		goto recv_error;
	}
	rx_desc->recv_bc = wc->byte_len;
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "recv byte count %x\n", rx_desc->recv_bc);

	/* get a response buffer */
	if (STAILQ_EMPTY(&conn->qp_tx_desc)) {
		SPDK_ERRLOG("tx desc pool empty!\n");
		goto recv_error;
	}
	tx_desc = STAILQ_FIRST(&conn->qp_tx_desc);
	nvmf_active_tx_desc(tx_desc);
	tx_desc->rx_desc = rx_desc;

	req = &tx_desc->req_state;
	req->session = conn->sess;
	req->fabric_tx_ctx = tx_desc;
	req->fabric_rx_ctx = rx_desc;
	req->cb_fn = nvmf_process_async_completion;
	req->length = 0;
	req->cid = cap_hdr->cid;
	req->cmd = &rx_desc->msg_buf;

	nvmf_trace_command(cap_hdr, conn->type);

	if (cap_hdr->opcode == SPDK_NVMF_FABRIC_OPCODE) {
		ret = nvmf_process_fabrics_command(conn, tx_desc);
	} else if (conn->type == CONN_TYPE_AQ) {
		ret = nvmf_process_admin_command(conn, tx_desc);
	} else {
		ret = nvmf_process_io_command(conn, tx_desc);
	}

	if (ret < 0) {
		goto recv_error;
	}

	/* re-post rx_desc and re-queue tx_desc here,
	   there is not a delayed posting because of
	   command processing.
	 */
	if (ret == 1) {
		tx_desc->rx_desc = NULL;
		nvmf_deactive_tx_desc(tx_desc);
		if (nvmf_post_rdma_recv(conn, rx_desc)) {
			SPDK_ERRLOG("Unable to re-post aq rx descriptor\n");
			goto recv_error;
		}
	}

drop_recv:
	return 0;

recv_error:
	/* recover the tx_desc */
	if (tx_desc != NULL) {
		tx_desc->rx_desc = NULL;
		nvmf_deactive_tx_desc(tx_desc);
	}
	return -1;
}