Esempio n. 1
0
int
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
{
	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
	bool done;

	if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
		done = nvmf_process_fabrics_command(req);
	} else if (req->conn->type == CONN_TYPE_AQ) {
		done = nvmf_process_admin_cmd(req);
	} else {
		done = nvmf_process_io_cmd(req);
	}

	if (done) {
		/* Synchronous command - response is already filled out */
		return spdk_nvmf_request_complete(req);
	}

	/*
	 * Asynchronous command.
	 * The completion callback will call spdk_nvmf_request_complete().
	 */
	return 0;
}
Esempio n. 2
0
int
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_session *session = req->conn->sess;
	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
	spdk_nvmf_request_exec_status status;

	nvmf_trace_command(req->cmd, req->conn->type);

	if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
		status = nvmf_process_fabrics_command(req);
	} else if (session == NULL || !session->vcprop.cc.bits.en) {
		/* Only Fabric commands are allowed when the controller is disabled */
		SPDK_ERRLOG("Non-Fabric command sent to disabled controller\n");
		rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
		status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	} else if (req->conn->type == CONN_TYPE_AQ) {
		struct spdk_nvmf_subsystem *subsystem;

		subsystem = session->subsys;
		assert(subsystem != NULL);
		if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) {
			status = nvmf_process_discovery_cmd(req);
		} else {
			status = session->subsys->ops->process_admin_cmd(req);
		}
	} else {
		status = session->subsys->ops->process_io_cmd(req);
	}

	switch (status) {
	case SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE:
		return spdk_nvmf_request_complete(req);
	case SPDK_NVMF_REQUEST_EXEC_STATUS_RELEASE:
		if (req->conn->transport->req_release(req)) {
			SPDK_ERRLOG("Transport request release error!\n");
			return -1;
		}

		return 0;
	case SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS:
		return 0;
	default:
		SPDK_ERRLOG("Unknown request exec status: 0x%x\n", status);
		return -1;
	}

	return 0;
}
Esempio n. 3
0
static void
test_nvmf_process_fabrics_cmd(void)
{
	struct	spdk_nvmf_request req = {};
	int	ret;
	struct	spdk_nvmf_conn req_conn = {};
	union	nvmf_h2c_msg  req_cmd = {};
	union	nvmf_c2h_msg   req_rsp = {};

	req.conn = &req_conn;
	req.cmd  = &req_cmd;
	req.rsp  = &req_rsp;
	req.conn->sess = NULL;

	/* No session and invalid command check */
	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
	ret = nvmf_process_fabrics_command(&req);
	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
}
Esempio n. 4
0
static int nvmf_recv(struct spdk_nvmf_conn *conn, struct ibv_wc *wc)
{
	struct nvme_qp_rx_desc *rx_desc;
	struct nvme_qp_tx_desc *tx_desc = NULL;
	struct spdk_nvmf_capsule_cmd *cap_hdr;
	struct nvmf_request *req;
	int ret = 0;

	rx_desc = (struct nvme_qp_rx_desc *)wc->wr_id;
	cap_hdr = (struct spdk_nvmf_capsule_cmd *)&rx_desc->msg_buf;

	/* Update Connection SQ Tracking, increment
	   the SQ tail consuming a free RX recv slot.
	   Check for exceeding queue full - should
	   never happen.
	*/
	conn->sq_tail < (conn->sq_depth - 1) ? (conn->sq_tail++) : (conn->sq_tail = 0);
	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "sq_head %x, sq_tail %x, sq_depth %x\n",
		      conn->sq_head, conn->sq_tail, conn->sq_depth);
	/* trap if initiator exceeds qdepth */
	if (conn->sq_head == conn->sq_tail) {
		SPDK_ERRLOG("	*** SQ Overflow !! ***\n");
		/* controller fatal status condition:
		   set the cfs flag in controller status
		   and stop processing this and any I/O
		   on this queue.
		*/
		if (conn->sess) {
			conn->sess->vcprop.csts.bits.cfs = 1;
			conn->state = CONN_STATE_OVERFLOW;
		}
		if (conn->type == CONN_TYPE_IOQ) {
			/* if overflow on the I/O queue
			   stop processing, allow for
			   remote host to query failure
			   via admin queue
			 */
			goto drop_recv;
		} else {
			/* if overflow on the admin queue
			   there is no recovery, error out
			   to trigger disconnect
			 */
			goto recv_error;
		}
	}

	if (wc->byte_len < sizeof(*cap_hdr)) {
		SPDK_ERRLOG("recv length less than capsule header\n");
		goto recv_error;
	}
	rx_desc->recv_bc = wc->byte_len;
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "recv byte count %x\n", rx_desc->recv_bc);

	/* get a response buffer */
	if (STAILQ_EMPTY(&conn->qp_tx_desc)) {
		SPDK_ERRLOG("tx desc pool empty!\n");
		goto recv_error;
	}
	tx_desc = STAILQ_FIRST(&conn->qp_tx_desc);
	nvmf_active_tx_desc(tx_desc);
	tx_desc->rx_desc = rx_desc;

	req = &tx_desc->req_state;
	req->session = conn->sess;
	req->fabric_tx_ctx = tx_desc;
	req->fabric_rx_ctx = rx_desc;
	req->cb_fn = nvmf_process_async_completion;
	req->length = 0;
	req->cid = cap_hdr->cid;
	req->cmd = &rx_desc->msg_buf;

	nvmf_trace_command(cap_hdr, conn->type);

	if (cap_hdr->opcode == SPDK_NVMF_FABRIC_OPCODE) {
		ret = nvmf_process_fabrics_command(conn, tx_desc);
	} else if (conn->type == CONN_TYPE_AQ) {
		ret = nvmf_process_admin_command(conn, tx_desc);
	} else {
		ret = nvmf_process_io_command(conn, tx_desc);
	}

	if (ret < 0) {
		goto recv_error;
	}

	/* re-post rx_desc and re-queue tx_desc here,
	   there is not a delayed posting because of
	   command processing.
	 */
	if (ret == 1) {
		tx_desc->rx_desc = NULL;
		nvmf_deactive_tx_desc(tx_desc);
		if (nvmf_post_rdma_recv(conn, rx_desc)) {
			SPDK_ERRLOG("Unable to re-post aq rx descriptor\n");
			goto recv_error;
		}
	}

drop_recv:
	return 0;

recv_error:
	/* recover the tx_desc */
	if (tx_desc != NULL) {
		tx_desc->rx_desc = NULL;
		nvmf_deactive_tx_desc(tx_desc);
	}
	return -1;
}