Example #1
0
File: nvmf.c Project: spdk/spdk
int
spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess,
		   uint32_t in_capsule_data_size, uint32_t max_io_size)
{
	int rc;

	g_nvmf_tgt.max_queues_per_session = max_queues_per_sess;
	g_nvmf_tgt.max_queue_depth = max_queue_depth;
	g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size;
	g_nvmf_tgt.max_io_size = max_io_size;

	spdk_nvmf_transport_init();

	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Session: %d\n", max_queues_per_sess);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size);

	rc = spdk_nvmf_transport_init();
	if (rc <= 0) {
		SPDK_ERRLOG("Transport initialization failed\n");
		return -1;
	}

	return 0;
}
Example #2
0
static void
spdk_rpc_clear_trace_flag(struct spdk_jsonrpc_server_conn *conn,
			  const struct spdk_json_val *params,
			  const struct spdk_json_val *id)
{
	struct rpc_trace_flag req = {};
	struct spdk_json_write_ctx *w;

	if (spdk_json_decode_object(params, rpc_trace_flag_decoders,
				    sizeof(rpc_trace_flag_decoders) / sizeof(*rpc_trace_flag_decoders), &req)) {
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "spdk_json_decode_object failed\n");
		goto invalid;
	}

	if (req.flag == 0) {
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "flag was 0\n");
		goto invalid;
	}

	spdk_log_clear_trace_flag(req.flag);
	free_rpc_trace_flag(&req);

	if (id == NULL) {
		return;
	}

	w = spdk_jsonrpc_begin_result(conn, id);
	spdk_json_write_bool(w, true);
	spdk_jsonrpc_end_result(conn, w);
	return;

invalid:
	spdk_jsonrpc_send_error_response(conn, id, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
	free_rpc_trace_flag(&req);
}
Example #3
0
static int
nvmf_process_property_set(struct spdk_nvmf_conn *conn,
			  struct nvme_qp_tx_desc *tx_desc)
{
	struct spdk_nvmf_fabric_prop_set_rsp *response;
	struct nvmf_request *req = &tx_desc->req_state;
	struct spdk_nvmf_fabric_prop_set_cmd *cmd;
	bool	shutdown = false;
	int	ret;

	cmd = &req->cmd->prop_set_cmd;
	response = &req->rsp->prop_set_rsp;

	nvmf_property_set(conn->sess, cmd, response, &shutdown);
	if (shutdown == true) {
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Call to set properties has indicated shutdown\n");
		conn->state = CONN_STATE_FABRIC_DISCONNECT;
	}

	/* send the nvmf response if setup by NVMf library */
	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send property set capsule response\n");
	ret = spdk_nvmf_send_response(conn, req);
	if (ret) {
		SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
		return -1;
	}

	return 0;
}
Example #4
0
static void
spdk_rpc_kill_instance(struct spdk_jsonrpc_server_conn *conn,
		       const struct spdk_json_val *params,
		       const struct spdk_json_val *id)
{
	static const struct {
		const char	*signal_string;
		int32_t		signal;
	} signals[] = {
		{"SIGINT",	SIGINT},
		{"SIGTERM",	SIGTERM},
		{"SIGQUIT",	SIGQUIT},
		{"SIGHUP",	SIGHUP},
		{"SIGKILL",	SIGKILL},
	};
	size_t i, sig_count;
	int signal;
	struct rpc_kill_instance req = {};
	struct spdk_json_write_ctx *w;

	if (spdk_json_decode_object(params, rpc_kill_instance_decoders,
				    sizeof(rpc_kill_instance_decoders) / sizeof(*rpc_kill_instance_decoders),
				    &req)) {
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "spdk_json_decode_object failed\n");
		goto invalid;
	}

	sig_count = sizeof(signals) / sizeof(*signals);
	signal = atoi(req.sig_name);
	for (i = 0 ; i < sig_count; i++) {
		if (strcmp(req.sig_name, signals[i].signal_string) == 0 ||
		    signal == signals[i].signal) {
			break;
		}
	}

	if (i == sig_count) {
		goto invalid;
	}

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "sending signal %d\n", signals[i].signal);
	kill(getpid(), signals[i].signal);
	free_rpc_kill_instance(&req);

	if (id == NULL) {
		return;
	}

	w = spdk_jsonrpc_begin_result(conn, id);
	spdk_json_write_bool(w, true);
	spdk_jsonrpc_end_result(conn, w);
	return;

invalid:
	spdk_jsonrpc_send_error_response(conn, id, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
	free_rpc_kill_instance(&req);
}
Example #5
0
static int
nvmf_process_admin_command(struct spdk_nvmf_conn *conn,
			   struct nvme_qp_tx_desc *tx_desc)
{
	struct nvme_qp_rx_desc *rx_desc = tx_desc->rx_desc;
	struct nvmf_request *req;
	struct spdk_nvme_cmd *cmd;
	struct spdk_nvme_sgl_descriptor *sgl;
	struct spdk_nvmf_keyed_sgl_descriptor *keyed_sgl;
	void *buf = NULL;
	uint32_t len = 0;
	int	ret;

	req = &tx_desc->req_state;
	cmd = &req->cmd->nvme_cmd;
	sgl = (struct spdk_nvme_sgl_descriptor *)&cmd->dptr.sgl1;
	keyed_sgl = (struct spdk_nvmf_keyed_sgl_descriptor *)sgl;

	/*
	  NVMf does not support in-capsule data for admin command or response capsules.
	  If caller indicates SGL for return RDMA data, verify the SGL and prepare
	  data buffer reference and length for the NVMf library.  Only keyed type
	  SGLs are supported for return data
	 */
	if (sgl->type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
	    (sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
	     sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
		buf = (void *)rx_desc->bb;
		len = rx_desc->bb_sgl.length;
		req->remote_addr = keyed_sgl->address;
		req->rkey = keyed_sgl->key;
		req->length = keyed_sgl->length;
	}

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "	tx_desc %p: req_state %p, rsp %p, addr %p\n",
		      tx_desc, req, (void *)req->rsp, (void *)tx_desc->send_sgl.addr);

	/* send to NVMf library for backend NVMe processing */
	ret = nvmf_process_admin_cmd(req->session, cmd, buf, len, req);
	if (ret) {
		/* library failed the request and should have
		   Updated the response */
		SPDK_TRACELOG(SPDK_TRACE_NVMF, "send nvme admin cmd capsule sync response\n");
		ret = spdk_nvmf_send_response(conn, req);
		if (ret) {
			SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
			goto command_fail;
		}
	}

	return 0;

command_fail:
	return -1;
}
Example #6
0
int
nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess,
	      uint32_t in_capsule_data_size, uint32_t max_io_size)
{
	g_nvmf_tgt.max_queues_per_session = max_queues_per_sess;
	g_nvmf_tgt.max_queue_depth = max_queue_depth;
	g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size;
	g_nvmf_tgt.max_io_size = max_io_size;

	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Session: %d\n", max_queues_per_sess);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size);

	return 0;
}
Example #7
0
File: nvmf.c Project: famz/spdk
static int
spdk_nvmf_initialize_pools(void)
{
	SPDK_NOTICELOG("\n*** NVMf Pool Creation ***\n");

	g_num_requests = MAX_SUBSYSTEMS * g_nvmf_tgt.MaxConnectionsPerSession * g_nvmf_tgt.MaxQueueDepth;

	/* create NVMe backend request pool */
	request_mempool = rte_mempool_create("NVMe_Pool",
					     g_num_requests,
					     spdk_nvme_request_size(),
					     128, 0,
					     NULL, NULL, NULL, NULL,
					     SOCKET_ID_ANY, 0);
	if (!request_mempool) {
		SPDK_ERRLOG("create NVMe request pool failed\n");
		return -1;
	}

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "NVMe request_mempool %p, size %" PRIu64 " bytes\n",
		      request_mempool,
		      (uint64_t)g_num_requests * spdk_nvme_request_size());

	return 0;
}
Example #8
0
static int64_t
blockdev_aio_writev(struct file_disk *fdisk, struct spdk_io_channel *ch,
		    struct blockdev_aio_task *aio_task,
		    struct iovec *iov, int iovcnt, size_t len, uint64_t offset)
{
	struct iocb *iocb = &aio_task->iocb;
	struct blockdev_aio_io_channel *aio_ch = spdk_io_channel_get_ctx(ch);
	int rc;

	io_prep_pwritev(iocb, fdisk->fd, iov, iovcnt, offset);
	iocb->data = aio_task;
	aio_task->len = len;

	SPDK_TRACELOG(SPDK_TRACE_AIO, "write %d iovs size %lu from off: %#lx\n",
		      iovcnt, len, offset);

	rc = io_submit(aio_ch->io_ctx, 1, &iocb);
	if (rc < 0) {
		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(aio_task), SPDK_BDEV_IO_STATUS_FAILED);
		SPDK_ERRLOG("%s: io_submit returned %d\n", __func__, rc);
		return -1;
	}

	return len;
}
Example #9
0
File: log_ut.c Project: spdk/spdk
static void
log_test(void)
{
	int rc = 0;

	rc = spdk_set_log_facility("test");
	CU_ASSERT(rc == -1);
	rc = spdk_set_log_facility("local7");
	CU_ASSERT(rc == 0);
	rc = spdk_set_log_priority("test");
	CU_ASSERT(rc == -1);
	rc = spdk_set_log_priority("debug");
	CU_ASSERT(rc == 0);

#ifdef DEBUG
	CU_ASSERT(spdk_log_get_trace_flag("debug") == false);

	spdk_log_set_trace_flag("debug");
	CU_ASSERT(spdk_log_get_trace_flag("debug") == true);

	spdk_log_clear_trace_flag("debug");
	CU_ASSERT(spdk_log_get_trace_flag("debug") == false);
#endif

	spdk_open_log();
	spdk_log_set_trace_flag("debug");
	SPDK_WARNLOG("log warning unit test\n");
	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "log trace test\n");
	SPDK_TRACEDUMP(SPDK_TRACE_DEBUG, "log trace dump test:", "trace dump", 10);
	spdk_trace_dump("spdk dump test:", "spdk dump", 9);

	spdk_close_log();
}
Example #10
0
File: nvmf.c Project: famz/spdk
int
nvmf_tgt_init(int max_queue_depth, int max_conn_per_sess)
{
	int rc;

	if (max_queue_depth >= 1 &&
	    max_queue_depth <= SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH) {
		g_nvmf_tgt.MaxQueueDepth = max_queue_depth;
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "MaxQueueDepth: %d\n",
			      g_nvmf_tgt.MaxQueueDepth);
	} else {
		SPDK_ERRLOG("Invalid MaxQueueDepth: %d\n", max_queue_depth);
		return -EINVAL;
	}

	if (max_conn_per_sess >= 1 &&
	    max_conn_per_sess <= SPDK_NVMF_DEFAULT_MAX_CONNECTIONS_PER_SESSION) {
		g_nvmf_tgt.MaxConnectionsPerSession = max_conn_per_sess;
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "MaxConnectionsPerSession: %d\n",
			      g_nvmf_tgt.MaxConnectionsPerSession);
	} else {
		SPDK_ERRLOG("Invalid MaxConnectionsPerSession: %d\n", max_conn_per_sess);
		return -EINVAL;
	}

	rc = pthread_mutex_init(&g_nvmf_tgt.mutex, NULL);
	if (rc != 0) {
		SPDK_ERRLOG("mutex_init() failed\n");
		return -1;
	}

	/* init nvmf specific config options */
	if (!g_nvmf_tgt.sin_port) {
		g_nvmf_tgt.sin_port = htons(SPDK_NVMF_DEFAULT_SIN_PORT);
	}

	rc = spdk_nvmf_initialize_pools();
	if (rc != 0) {
		SPDK_ERRLOG("spdk_nvmf_initialize_pools() failed\n");
		return rc;
	}

	return 0;
}
Example #11
0
static spdk_nvmf_request_exec_status
nvmf_process_discovery_cmd(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_session *session = req->conn->sess;
	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
	struct spdk_nvmf_discovery_log_page *log;

	/* pre-set response details for this command */
	response->status.sc = SPDK_NVME_SC_SUCCESS;

	if (req->data == NULL) {
		SPDK_ERRLOG("discovery command with no buffer\n");
		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}

	switch (cmd->opc) {
	case SPDK_NVME_OPC_IDENTIFY:
		/* Only identify controller can be supported */
		if ((cmd->cdw10 & 0xFF) == SPDK_NVME_IDENTIFY_CTRLR) {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "Identify Controller\n");
			memcpy(req->data, (char *)&session->vcdata, sizeof(struct spdk_nvme_ctrlr_data));
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		} else {
			SPDK_ERRLOG("Unsupported identify command\n");
			response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		}
		break;
	case SPDK_NVME_OPC_GET_LOG_PAGE:
		if ((cmd->cdw10 & 0xFF) == SPDK_NVME_LOG_DISCOVERY) {
			log = (struct spdk_nvmf_discovery_log_page *)req->data;
			/*
			 * Does not support change discovery
			 *  information at runtime now.
			 */
			log->genctr = 0;
			log->numrec = 0;
			spdk_format_discovery_log(log, req->length);
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		} else {
			SPDK_ERRLOG("Unsupported log page %u\n", cmd->cdw10 & 0xFF);
			response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		}
		break;
	default:
		SPDK_ERRLOG("Unsupported Opcode 0x%x for Discovery service\n", cmd->opc);
		response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}

	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
Example #12
0
static spdk_nvmf_request_exec_status
nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_conn *conn = req->conn;
	struct spdk_nvmf_capsule_cmd *cap_hdr;

	cap_hdr = &req->cmd->nvmf_cmd;

	if (conn->sess == NULL) {
		/* No session established yet; the only valid command is Connect */
		if (cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) {
			return nvmf_process_connect(req);
		} else {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "Got fctype 0x%x, expected Connect\n",
				      cap_hdr->fctype);
			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		}
	} else if (conn->type == CONN_TYPE_AQ) {
		/*
		 * Session is established, and this is an admin queue.
		 * Disallow Connect and allow other fabrics commands.
		 */
		switch (cap_hdr->fctype) {
		case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET:
			return nvmf_process_property_set(req);
		case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET:
			return nvmf_process_property_get(req);
		default:
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "recv capsule header type invalid [%x]!\n",
				      cap_hdr->fctype);
			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		}
	} else {
		/* Session is established, and this is an I/O queue */
		/* For now, no I/O-specific Fabrics commands are implemented (other than Connect) */
		SPDK_TRACELOG(SPDK_TRACE_NVMF, "Unexpected I/O fctype 0x%x\n", cap_hdr->fctype);
		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}
}
Example #13
0
static void spdk_nvmf_conn_destruct(struct spdk_nvmf_conn *conn)
{
	struct spdk_event *event;

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "conn %p\n", conn);
	conn->state = CONN_STATE_INVALID;

	event = spdk_event_allocate(rte_lcore_id(), _conn_destruct, conn, NULL, NULL);
	spdk_poller_unregister(&conn->poller, event);
	rte_atomic32_dec(&g_num_connections[rte_lcore_id()]);
}
Example #14
0
static int64_t
blockdev_malloc_read(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
		     struct copy_task *copy_req,
		     void *buf, uint64_t nbytes, uint64_t offset)
{
	SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx to %p\n",
		      nbytes, offset, buf);

	return spdk_copy_submit(copy_req, ch, buf, mdisk->malloc_buf + offset,
				nbytes, malloc_done);
}
Example #15
0
static void
nvmf_process_async_completion(struct nvmf_request *req)
{
	struct nvme_qp_tx_desc *tx_desc = (struct nvme_qp_tx_desc *)req->fabric_tx_ctx;
	struct spdk_nvme_cpl *response;
	struct nvme_qp_rx_desc *rx_desc = tx_desc->rx_desc;
	int ret;

	response = &req->rsp->nvme_cpl;

	/* Was the command successful */
	if ((response->status.sc == SPDK_NVME_SC_SUCCESS) && req->length > 0) {
		/* data to be copied to host via memory RDMA */
		if (req->length < rx_desc->bb_len) {
			/* temporarily adjust SGE to only copy what the
				host is prepared to receive.
			*/
			SPDK_TRACELOG(SPDK_TRACE_DEBUG, " *** modify sgl length from %x to %x\n",
				      rx_desc->bb_sgl.length, req->length);
			rx_desc->bb_sgl.length = req->length;
		}
		ret = nvmf_post_rdma_write(tx_desc->conn, tx_desc);
		if (ret) {
			SPDK_ERRLOG("Unable to post rdma write tx descriptor\n");
			goto command_fail;
		}
	}

	/* Now send back the response */
	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send nvme cmd capsule response\n");
	ret = spdk_nvmf_send_response(tx_desc->conn, req);
	if (ret) {
		SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
		goto command_fail;
	}

	return;

command_fail:
	nvmf_deactive_tx_desc(tx_desc);
}
Example #16
0
static int64_t
blockdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
		       struct copy_task *copy_req,
		       struct iovec *iov, int iovcnt, size_t len, uint64_t offset)
{
	if ((iovcnt != 1) || (iov->iov_len != len))
		return -1;

	SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx from %p\n",
		      iov->iov_len, offset, iov->iov_base);

	return spdk_copy_submit(copy_req, ch, mdisk->malloc_buf + offset,
				iov->iov_base, len, malloc_done);
}
Example #17
0
/**

\brief This is the main routine for the nvmf connection work item.

Serves mainly as a wrapper for the nvmf_execute_conn() function which
does the bulk of the work.  This function handles connection cleanup when
NVMf application is exiting or there is an error on the connection.
It also drains the connection if the work item is being suspended to
move to a different reactor.

*/
static void
spdk_nvmf_conn_do_work(void *arg)
{
	struct spdk_nvmf_conn *conn = arg;
	int rc;

	rc = nvmf_execute_conn(conn);

	if (rc != 0 || conn->state == CONN_STATE_EXITING ||
	    conn->state == CONN_STATE_FABRIC_DISCONNECT) {
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "state exiting to shutdown\n");
		spdk_nvmf_conn_destruct(conn);
	}
}
Example #18
0
void
spdk_nvmf_handle_connect(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_fabric_connect_cmd *connect = &req->cmd->connect_cmd;
	struct spdk_nvmf_fabric_connect_data *connect_data = (struct spdk_nvmf_fabric_connect_data *)
			req->data;
	struct spdk_nvmf_fabric_connect_rsp *response = &req->rsp->connect_rsp;
	struct spdk_nvmf_conn *conn = req->conn;

	spdk_nvmf_session_connect(conn, connect, connect_data, response);

	SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n",
		      response->status_code_specific.success.cntlid);

	spdk_nvmf_request_complete(req);
	return;
}
Example #19
0
static int
spdk_nvmf_send_response(struct spdk_nvmf_conn *conn, struct nvmf_request *req)
{
	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;

	/* Zero out fields reserved in NVMf */
	rsp->sqid = 0;
	rsp->status.p = 0;

	rsp->sqhd = conn->sq_head;
	rsp->cid = req->cid;

	SPDK_TRACELOG(SPDK_TRACE_NVMF,
		      "cpl: cdw0=0x%x rsvd1=0x%x sqhd=0x%x sqid=0x%x cid=0x%x status=0x%x\n",
		      rsp->cdw0, rsp->rsvd1, rsp->sqhd, rsp->sqid, rsp->cid, *(uint16_t *)&rsp->status);

	return nvmf_post_rdma_send(conn, req->fabric_tx_ctx);
}
Example #20
0
/**

\brief Create an NVMf fabric connection from the given parameters and schedule it
       on a reactor thread.

\code

# identify reactor where the new connections work item will be scheduled
reactor = nvmf_allocate_reactor()
schedule fabric connection work item on reactor

\endcode

*/
int
spdk_nvmf_startup_conn(struct spdk_nvmf_conn *conn)
{
	int lcore;
	struct spdk_nvmf_conn *admin_conn;
	uint64_t nvmf_session_core = spdk_app_get_core_mask();

	/*
	 * if starting IO connection then determine core
	 * allocated to admin queue to request core mask.
	 * Can not assume nvmf session yet created at time
	 * of fabric connection setup.  Rely on fabric
	 * function to locate matching controller session.
	 */
	if (conn->type == CONN_TYPE_IOQ && conn->cntlid != 0) {
		admin_conn = spdk_find_nvmf_conn_by_cntlid(conn->cntlid);
		if (admin_conn != NULL) {
			SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Located admin conn session core %d\n",
				      admin_conn->poller.lcore);
			nvmf_session_core = 1ULL << admin_conn->poller.lcore;
		}
	}

	lcore = nvmf_allocate_reactor(nvmf_session_core);
	if (lcore < 0) {
		SPDK_ERRLOG("Unable to find core to launch connection.\n");
		goto err0;
	}

	conn->state = CONN_STATE_RUNNING;
	SPDK_NOTICELOG("Launching nvmf connection[qid=%d] on core: %d\n",
		       conn->qid, lcore);
	conn->poller.fn = spdk_nvmf_conn_do_work;
	conn->poller.arg = conn;

	rte_atomic32_inc(&g_num_connections[lcore]);
	spdk_poller_register(&conn->poller, lcore, NULL);

	return 0;
err0:
	free_conn(conn);
	return -1;
}
Example #21
0
void spdk_shutdown_nvmf_conns(void)
{
	struct spdk_nvmf_conn	*conn;
	int				i;

	pthread_mutex_lock(&g_conns_mutex);

	for (i = 0; i < g_max_conns; i++) {
		conn = &g_conns_array[i];
		if (!conn->is_valid)
			continue;
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Set conn %d state to exiting\n", i);
		conn->state = CONN_STATE_EXITING;
	}

	pthread_mutex_unlock(&g_conns_mutex);
	rte_timer_init(&g_shutdown_timer);
	rte_timer_reset(&g_shutdown_timer, rte_get_timer_hz() / 1000, PERIODICAL,
			rte_get_master_lcore(), spdk_nvmf_conn_check_shutdown, NULL);
}
Example #22
0
static void
nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum conn_type conn_type)
{
	struct spdk_nvmf_capsule_cmd *cap_hdr = &h2c_msg->nvmf_cmd;
	struct spdk_nvme_cmd *cmd = &h2c_msg->nvme_cmd;
	struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1;
	uint8_t opc;

	if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
		opc = cap_hdr->fctype;
		SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s Fabrics cmd: fctype 0x%02x cid %u\n",
			      conn_type == CONN_TYPE_AQ ? "Admin" : "I/O",
			      cap_hdr->fctype, cap_hdr->cid);
	} else {
		opc = cmd->opc;
		SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s cmd: opc 0x%02x fuse %u cid %u nsid %u cdw10 0x%08x\n",
			      conn_type == CONN_TYPE_AQ ? "Admin" : "I/O",
			      cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10);
		if (cmd->mptr) {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "mptr 0x%" PRIx64 "\n", cmd->mptr);
		}
		if (cmd->psdt != SPDK_NVME_PSDT_SGL_MPTR_CONTIG &&
		    cmd->psdt != SPDK_NVME_PSDT_SGL_MPTR_SGL) {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "psdt %u\n", cmd->psdt);
		}
	}

	if (spdk_nvme_opc_get_data_transfer(opc) != SPDK_NVME_DATA_NONE) {
		if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
			SPDK_TRACELOG(SPDK_TRACE_NVMF,
				      "SGL: Keyed%s: addr 0x%" PRIx64 " key 0x%x len 0x%x\n",
				      sgl->generic.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY ? " (Inv)" : "",
				      sgl->address, sgl->keyed.key, sgl->keyed.length);
		} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "SGL: Data block: %s 0x%" PRIx64 " len 0x%x\n",
				      sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET ? "offs" : "addr",
				      sgl->address, sgl->unkeyed.length);
		} else {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "SGL type 0x%x subtype 0x%x\n",
				      sgl->generic.type, sgl->generic.subtype);
		}
	}
}
Example #23
0
static bool
nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_capsule_cmd *cap_hdr;

	cap_hdr = &req->cmd->nvmf_cmd;

	switch (cap_hdr->fctype) {
	case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET:
		return nvmf_process_property_set(req);
	case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET:
		return nvmf_process_property_get(req);
	case SPDK_NVMF_FABRIC_COMMAND_CONNECT:
		return nvmf_process_connect(req);
	default:
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "recv capsule header type invalid [%x]!\n",
			      cap_hdr->fctype);
		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
		return true;
	}
}
Example #24
0
static int
nvmf_process_fabrics_command(struct spdk_nvmf_conn *conn, struct nvme_qp_tx_desc *tx_desc)
{
	struct nvme_qp_rx_desc *rx_desc = tx_desc->rx_desc;
	struct spdk_nvmf_capsule_cmd *cap_hdr;

	cap_hdr = (struct spdk_nvmf_capsule_cmd *)&rx_desc->msg_buf;

	switch (cap_hdr->fctype) {
	case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET:
		return nvmf_process_property_set(conn, tx_desc);
	case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET:
		return nvmf_process_property_get(conn, tx_desc);
	case SPDK_NVMF_FABRIC_COMMAND_CONNECT:
		return nvmf_process_connect(conn, tx_desc);
	default:
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "recv capsule header type invalid [%x]!\n",
			      cap_hdr->fctype);
		return 1; /* skip, do nothing */
	}
}
Example #25
0
int
spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
{
	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

	response->sqid = 0;
	response->status.p = 0;
	response->cid = req->cmd->nvme_cmd.cid;

	SPDK_TRACELOG(SPDK_TRACE_NVMF,
		      "cpl: cid=%u cdw0=0x%08x rsvd1=%u status=0x%04x\n",
		      response->cid, response->cdw0, response->rsvd1,
		      *(uint16_t *)&response->status);

	if (req->conn->transport->req_complete(req)) {
		SPDK_ERRLOG("Transport request completion error!\n");
		return -1;
	}

	return 0;
}
Example #26
0
int spdk_initialize_nvmf_conns(int max_connections)
{
	size_t conns_size;
	int i, rc;

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Enter\n");

	rc = pthread_mutex_init(&g_conns_mutex, NULL);
	if (rc != 0) {
		SPDK_ERRLOG("mutex_init() failed\n");
		return -1;
	}

	sprintf(g_shm_name, "nvmf_conns.%d", spdk_app_get_instance_id());
	g_conns_array_fd = shm_open(g_shm_name, O_RDWR | O_CREAT, 0600);
	if (g_conns_array_fd < 0) {
		SPDK_ERRLOG("could not shm_open %s\n", g_shm_name);
		return -1;
	}

	g_max_conns = max_connections;
	conns_size = sizeof(struct spdk_nvmf_conn) * g_max_conns;

	if (ftruncate(g_conns_array_fd, conns_size) != 0) {
		SPDK_ERRLOG("could not ftruncate\n");
		shm_unlink(g_shm_name);
		close(g_conns_array_fd);
		return -1;
	}
	g_conns_array = mmap(0, conns_size, PROT_READ | PROT_WRITE, MAP_SHARED,
			     g_conns_array_fd, 0);

	memset(g_conns_array, 0, conns_size);

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		rte_atomic32_set(&g_num_connections[i], 0);
	}

	return 0;
}
Example #27
0
static int nvmf_execute_conn(struct spdk_nvmf_conn *conn)
{
	int		rc = 0;

	/* for an active session, process any pending NVMf completions */
	if (conn->sess) {
		if (conn->type == CONN_TYPE_AQ)
			nvmf_check_admin_completions(conn->sess);
		else
			nvmf_check_io_completions(conn->sess);
	}

	/* process all pending completions */
	rc = nvmf_cq_event_handler(conn);
	if (rc > 0) {
		SPDK_TRACELOG(SPDK_TRACE_RDMA, "CQ event handler, %d CQ completions\n", rc);
	} else if (rc < 0) {
		SPDK_ERRLOG("CQ event handler error!\n");
		return -1;
	}

	return 0;
}
Example #28
0
static int
nvmf_process_property_get(struct spdk_nvmf_conn *conn,
			  struct nvme_qp_tx_desc *tx_desc)
{
	struct spdk_nvmf_fabric_prop_get_rsp *response;
	struct nvmf_request *req = &tx_desc->req_state;
	struct spdk_nvmf_fabric_prop_get_cmd *cmd;
	int	ret;

	cmd = &req->cmd->prop_get_cmd;
	response = &req->rsp->prop_get_rsp;

	nvmf_property_get(conn->sess, cmd, response);

	/* send the nvmf response if setup by NVMf library */
	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send property get capsule response\n");
	ret = spdk_nvmf_send_response(conn, req);
	if (ret) {
		SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
		return -1;
	}

	return 0;
}
Example #29
0
static void
_conn_destruct(spdk_event_t event)
{
	struct spdk_nvmf_conn *conn = spdk_event_get_arg1(event);

	/*
	 * Notify NVMf library of the fabric connection
	 * going away.  If this is the AQ connection then
	 * set state for other connections to abort.
	 */
	nvmf_disconnect((void *)conn, conn->sess);

	if (conn->type == CONN_TYPE_AQ) {
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "AQ connection destruct, trigger session closure\n");
		/* Trigger all I/O connections to shutdown */
		conn->state = CONN_STATE_FABRIC_DISCONNECT;
	}

	nvmf_rdma_conn_cleanup(conn);

	pthread_mutex_lock(&g_conns_mutex);
	free_conn(conn);
	pthread_mutex_unlock(&g_conns_mutex);
}
Example #30
0
static int
nvmf_io_cmd_continue(struct spdk_nvmf_conn *conn, struct nvme_qp_tx_desc *tx_desc)
{
	struct nvme_qp_rx_desc *rx_desc;
	struct nvmf_request *req;
	struct spdk_nvme_cmd *cmd;
	int ret;


	rx_desc = tx_desc->rx_desc;
	if (rx_desc == NULL) {
		SPDK_ERRLOG(" rx_desc does not exist!\n");
		return -1;
	}

	req = &tx_desc->req_state;
	cmd = &req->cmd->nvme_cmd;
	req->fabric_rx_ctx = rx_desc;

	/* clear the SGL details for RDMA performed */
	req->length = 0;

	/* send to NVMf library for backend NVMe processing */
	ret = nvmf_process_io_cmd(req->session, cmd, (void *)rx_desc->bb, rx_desc->bb_sgl.length, req);
	if (ret) {
		/* library failed the request and should have
		   Updated the response */
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, " send nvme io cmd capsule error response\n");
		ret = spdk_nvmf_send_response(conn, req);
		if (ret) {
			SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
			return -1;
		}
	}
	return 0;
}