Esempio n. 1
0
File: nvmf.c Progetto: famz/spdk
static int
nvmf_tgt_subsystem_initialize(void)
{
	int rc;

	/* initialize from configuration file */
	rc = spdk_nvmf_parse_conf();
	if (rc < 0) {
		SPDK_ERRLOG("spdk_nvmf_parse_conf() failed\n");
		return rc;
	}

	/* initialize with the NVMf transport */
	rc = spdk_nvmf_transport_init();
	if (rc <= 0) {
		SPDK_ERRLOG("Transport initialization failed\n");
		return -1;
	}

	rc = spdk_add_nvmf_discovery_subsystem();
	if (rc < 0) {
		SPDK_ERRLOG("spdk_add_nvmf_discovery_subsystem failed\n");
		return rc;
	}

	return rc;
}
Esempio n. 2
0
static int blockdev_malloc_initialize(void)
{
	struct spdk_conf_section *sp = spdk_conf_find_section(NULL, "Malloc");
	int NumberOfLuns, LunSizeInMB, BlockSize, i;
	uint64_t size;
	struct malloc_disk *mdisk;

	if (sp != NULL) {
		NumberOfLuns = spdk_conf_section_get_intval(sp, "NumberOfLuns");
		LunSizeInMB = spdk_conf_section_get_intval(sp, "LunSizeInMB");
		BlockSize = spdk_conf_section_get_intval(sp, "BlockSize");
		if ((NumberOfLuns < 1) || (LunSizeInMB < 1)) {
			SPDK_ERRLOG("Malloc section present, but no devices specified\n");
			return EINVAL;
		}
		if (BlockSize < 1) {
			/* Default is 512 bytes */
			BlockSize = 512;
		}
		size = (uint64_t)LunSizeInMB * 1024 * 1024;
		for (i = 0; i < NumberOfLuns; i++) {
			mdisk = create_malloc_disk(size / BlockSize, BlockSize);
			if (mdisk == NULL) {
				SPDK_ERRLOG("Could not create malloc disk\n");
				return EINVAL;
			}
		}
	}
	return 0;
}
Esempio n. 3
0
struct malloc_disk *create_malloc_disk(uint64_t num_blocks, uint32_t block_size)
{
	struct malloc_disk	*mdisk;

	if (block_size % 512 != 0) {
		SPDK_ERRLOG("Block size %u is not a multiple of 512.\n", block_size);
		return NULL;
	}

	if (num_blocks == 0) {
		SPDK_ERRLOG("Disk must be more than 0 blocks\n");
		return NULL;
	}

	mdisk = rte_malloc(NULL, sizeof(*mdisk), 0);
	if (!mdisk) {
		perror("mdisk");
		return NULL;
	}

	memset(mdisk, 0, sizeof(*mdisk));

	/*
	 * Allocate the large backend memory buffer using rte_malloc(),
	 *  so that we guarantee it is allocated from hugepage memory.
	 *
	 * TODO: need to pass a hint so we know which socket to allocate
	 *  from on multi-socket systems.
	 */
	mdisk->malloc_buf = rte_zmalloc(NULL, num_blocks * block_size, 2 * 1024 * 1024);
	if (!mdisk->malloc_buf) {
		SPDK_ERRLOG("rte_zmalloc failed\n");
		rte_free(mdisk);
		return NULL;
	}

	snprintf(mdisk->disk.name, SPDK_BDEV_MAX_NAME_LENGTH, "Malloc%d", malloc_disk_count);
	snprintf(mdisk->disk.product_name, SPDK_BDEV_MAX_PRODUCT_NAME_LENGTH, "Malloc disk");
	malloc_disk_count++;

	mdisk->disk.write_cache = 1;
	mdisk->disk.blocklen = block_size;
	mdisk->disk.blockcnt = num_blocks;
	mdisk->disk.thin_provisioning = 1;
	mdisk->disk.max_unmap_bdesc_count = MALLOC_MAX_UNMAP_BDESC;

	mdisk->disk.ctxt = mdisk;
	mdisk->disk.fn_table = &malloc_fn_table;

	spdk_bdev_register(&mdisk->disk);

	mdisk->next = g_malloc_disk_head;
	g_malloc_disk_head = mdisk;

	return mdisk;
}
Esempio n. 4
0
static spdk_nvmf_request_exec_status
nvmf_process_discovery_cmd(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_session *session = req->conn->sess;
	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
	struct spdk_nvmf_discovery_log_page *log;

	/* pre-set response details for this command */
	response->status.sc = SPDK_NVME_SC_SUCCESS;

	if (req->data == NULL) {
		SPDK_ERRLOG("discovery command with no buffer\n");
		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}

	switch (cmd->opc) {
	case SPDK_NVME_OPC_IDENTIFY:
		/* Only identify controller can be supported */
		if ((cmd->cdw10 & 0xFF) == SPDK_NVME_IDENTIFY_CTRLR) {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "Identify Controller\n");
			memcpy(req->data, (char *)&session->vcdata, sizeof(struct spdk_nvme_ctrlr_data));
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		} else {
			SPDK_ERRLOG("Unsupported identify command\n");
			response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		}
		break;
	case SPDK_NVME_OPC_GET_LOG_PAGE:
		if ((cmd->cdw10 & 0xFF) == SPDK_NVME_LOG_DISCOVERY) {
			log = (struct spdk_nvmf_discovery_log_page *)req->data;
			/*
			 * Does not support change discovery
			 *  information at runtime now.
			 */
			log->genctr = 0;
			log->numrec = 0;
			spdk_format_discovery_log(log, req->length);
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		} else {
			SPDK_ERRLOG("Unsupported log page %u\n", cmd->cdw10 & 0xFF);
			response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
		}
		break;
	default:
		SPDK_ERRLOG("Unsupported Opcode 0x%x for Discovery service\n", cmd->opc);
		response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}

	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
Esempio n. 5
0
_spdk_scsi_dev *
spdk_scsi_dev_construct(const char *name, char *lun_name_list[], int *lun_id_list, int num_luns)
{
	struct spdk_scsi_dev *dev;
	struct spdk_bdev *bdev;
	struct spdk_scsi_lun *lun;
	int i;

	if (num_luns == 0) {
		SPDK_ERRLOG("device %s: no LUNs specified\n", name);
		return NULL;
	}

	if (lun_id_list[0] != 0) {
		SPDK_ERRLOG("device %s: no LUN 0 specified\n", name);
		return NULL;
	}

	for (i = 0; i < num_luns; i++) {
		if (lun_name_list[i] == NULL) {
			SPDK_ERRLOG("NULL spdk_scsi_lun for LUN %d\n",
				    lun_id_list[i]);
			return NULL;
		}
	}

	dev = allocate_dev();
	if (dev == NULL) {
		return NULL;
	}

	strncpy(dev->name, name, SPDK_SCSI_DEV_MAX_NAME);

	dev->num_ports = 0;
	dev->maxlun = 0;

	for (i = 0; i < num_luns; i++) {
		bdev = spdk_bdev_get_by_name(lun_name_list[i]);
		if (bdev == NULL) {
			free_dev(dev);
			return NULL;
		}

		lun = spdk_scsi_lun_construct(bdev->name, bdev);
		if (lun == NULL) {
			free_dev(dev);
			return NULL;
		}

		spdk_scsi_dev_add_lun(dev, lun, lun_id_list[i]);
	}

	return dev;
}
Esempio n. 6
0
static spdk_nvmf_request_exec_status
nvmf_process_connect(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_subsystem	*subsystem;
	struct spdk_nvmf_fabric_connect_data *data = (struct spdk_nvmf_fabric_connect_data *)
			req->data;
	struct spdk_nvmf_fabric_connect_cmd *cmd = &req->cmd->connect_cmd;
	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
	void *end;

#define INVALID_CONNECT_DATA(field) invalid_connect_response(rsp, 1, offsetof(struct spdk_nvmf_fabric_connect_data, field))

	if (cmd->recfmt != 0) {
		SPDK_ERRLOG("Connect command unsupported RECFMT %u\n", cmd->recfmt);
		rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
		rsp->status.sc = SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT;
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}

	if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) {
		SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length);
		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}

	/* Ensure that subnqn and hostnqn are null terminated */
	end = memchr(data->subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN);
	if (!end) {
		SPDK_ERRLOG("Connect SUBNQN is not null terminated\n");
		INVALID_CONNECT_DATA(subnqn);
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}

	end = memchr(data->hostnqn, '\0', SPDK_NVMF_NQN_MAX_LEN);
	if (!end) {
		SPDK_ERRLOG("Connect HOSTNQN is not null terminated\n");
		INVALID_CONNECT_DATA(hostnqn);
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}
	/* Look up the requested subsystem */
	subsystem = nvmf_find_subsystem(data->subnqn, data->hostnqn);
	if (subsystem == NULL) {
		SPDK_ERRLOG("Could not find subsystem '%s'\n", data->subnqn);
		INVALID_CONNECT_DATA(subnqn);
		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	}

	subsystem->connect_cb(subsystem->cb_ctx, req);

	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}
Esempio n. 7
0
int
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_session *session = req->conn->sess;
	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
	spdk_nvmf_request_exec_status status;

	nvmf_trace_command(req->cmd, req->conn->type);

	if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
		status = nvmf_process_fabrics_command(req);
	} else if (session == NULL || !session->vcprop.cc.bits.en) {
		/* Only Fabric commands are allowed when the controller is disabled */
		SPDK_ERRLOG("Non-Fabric command sent to disabled controller\n");
		rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
		status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
	} else if (req->conn->type == CONN_TYPE_AQ) {
		struct spdk_nvmf_subsystem *subsystem;

		subsystem = session->subsys;
		assert(subsystem != NULL);
		if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) {
			status = nvmf_process_discovery_cmd(req);
		} else {
			status = session->subsys->ops->process_admin_cmd(req);
		}
	} else {
		status = session->subsys->ops->process_io_cmd(req);
	}

	switch (status) {
	case SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE:
		return spdk_nvmf_request_complete(req);
	case SPDK_NVMF_REQUEST_EXEC_STATUS_RELEASE:
		if (req->conn->transport->req_release(req)) {
			SPDK_ERRLOG("Transport request release error!\n");
			return -1;
		}

		return 0;
	case SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS:
		return 0;
	default:
		SPDK_ERRLOG("Unknown request exec status: 0x%x\n", status);
		return -1;
	}

	return 0;
}
Esempio n. 8
0
static void
blockdev_aio_poll(void *arg)
{
	struct blockdev_aio_io_channel *ch = arg;
	int nr, i;
	enum spdk_bdev_io_status status;
	struct blockdev_aio_task *aio_task;
	struct timespec timeout;

	timeout.tv_sec = 0;
	timeout.tv_nsec = 0;

	nr = io_getevents(ch->io_ctx, 1, ch->queue_depth,
			  ch->events, &timeout);

	if (nr < 0) {
		SPDK_ERRLOG("%s: io_getevents returned %d\n", __func__, nr);
		return;
	}

	for (i = 0; i < nr; i++) {
		aio_task = ch->events[i].data;
		if (ch->events[i].res != aio_task->len) {
			status = SPDK_BDEV_IO_STATUS_FAILED;
		} else {
			status = SPDK_BDEV_IO_STATUS_SUCCESS;
		}

		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(aio_task), status);
	}
}
Esempio n. 9
0
File: nvmf.c Progetto: spdk/spdk
int
spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess,
		   uint32_t in_capsule_data_size, uint32_t max_io_size)
{
	int rc;

	g_nvmf_tgt.max_queues_per_session = max_queues_per_sess;
	g_nvmf_tgt.max_queue_depth = max_queue_depth;
	g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size;
	g_nvmf_tgt.max_io_size = max_io_size;

	spdk_nvmf_transport_init();

	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Session: %d\n", max_queues_per_sess);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size);
	SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size);

	rc = spdk_nvmf_transport_init();
	if (rc <= 0) {
		SPDK_ERRLOG("Transport initialization failed\n");
		return -1;
	}

	return 0;
}
Esempio n. 10
0
static int64_t
blockdev_aio_writev(struct file_disk *fdisk, struct spdk_io_channel *ch,
		    struct blockdev_aio_task *aio_task,
		    struct iovec *iov, int iovcnt, size_t len, uint64_t offset)
{
	struct iocb *iocb = &aio_task->iocb;
	struct blockdev_aio_io_channel *aio_ch = spdk_io_channel_get_ctx(ch);
	int rc;

	io_prep_pwritev(iocb, fdisk->fd, iov, iovcnt, offset);
	iocb->data = aio_task;
	aio_task->len = len;

	SPDK_TRACELOG(SPDK_TRACE_AIO, "write %d iovs size %lu from off: %#lx\n",
		      iovcnt, len, offset);

	rc = io_submit(aio_ch->io_ctx, 1, &iocb);
	if (rc < 0) {
		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(aio_task), SPDK_BDEV_IO_STATUS_FAILED);
		SPDK_ERRLOG("%s: io_submit returned %d\n", __func__, rc);
		return -1;
	}

	return len;
}
Esempio n. 11
0
static int
nvmf_process_property_set(struct spdk_nvmf_conn *conn,
			  struct nvme_qp_tx_desc *tx_desc)
{
	struct spdk_nvmf_fabric_prop_set_rsp *response;
	struct nvmf_request *req = &tx_desc->req_state;
	struct spdk_nvmf_fabric_prop_set_cmd *cmd;
	bool	shutdown = false;
	int	ret;

	cmd = &req->cmd->prop_set_cmd;
	response = &req->rsp->prop_set_rsp;

	nvmf_property_set(conn->sess, cmd, response, &shutdown);
	if (shutdown == true) {
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Call to set properties has indicated shutdown\n");
		conn->state = CONN_STATE_FABRIC_DISCONNECT;
	}

	/* send the nvmf response if setup by NVMf library */
	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send property set capsule response\n");
	ret = spdk_nvmf_send_response(conn, req);
	if (ret) {
		SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
		return -1;
	}

	return 0;
}
Esempio n. 12
0
File: nvmf.c Progetto: famz/spdk
static int
spdk_nvmf_initialize_pools(void)
{
	SPDK_NOTICELOG("\n*** NVMf Pool Creation ***\n");

	g_num_requests = MAX_SUBSYSTEMS * g_nvmf_tgt.MaxConnectionsPerSession * g_nvmf_tgt.MaxQueueDepth;

	/* create NVMe backend request pool */
	request_mempool = rte_mempool_create("NVMe_Pool",
					     g_num_requests,
					     spdk_nvme_request_size(),
					     128, 0,
					     NULL, NULL, NULL, NULL,
					     SOCKET_ID_ANY, 0);
	if (!request_mempool) {
		SPDK_ERRLOG("create NVMe request pool failed\n");
		return -1;
	}

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "NVMe request_mempool %p, size %" PRIu64 " bytes\n",
		      request_mempool,
		      (uint64_t)g_num_requests * spdk_nvme_request_size());

	return 0;
}
Esempio n. 13
0
struct spdk_nvmf_conn *
spdk_nvmf_allocate_conn(void)
{
	struct spdk_nvmf_conn *conn;

	conn = allocate_conn();
	if (conn == NULL) {
		SPDK_ERRLOG("Could not allocate new connection.\n");
		goto err0;
	}

	/* all new connections initially default as AQ until nvmf connect */
	conn->type = CONN_TYPE_AQ;

	/* no session association until nvmf connect */
	conn->sess = NULL;

	conn->state = CONN_STATE_INVALID;
	conn->sq_head = conn->sq_tail = 0;

	return conn;

err0:
	return NULL;
}
Esempio n. 14
0
File: nvmf.c Progetto: famz/spdk
int
nvmf_tgt_init(int max_queue_depth, int max_conn_per_sess)
{
	int rc;

	if (max_queue_depth >= 1 &&
	    max_queue_depth <= SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH) {
		g_nvmf_tgt.MaxQueueDepth = max_queue_depth;
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "MaxQueueDepth: %d\n",
			      g_nvmf_tgt.MaxQueueDepth);
	} else {
		SPDK_ERRLOG("Invalid MaxQueueDepth: %d\n", max_queue_depth);
		return -EINVAL;
	}

	if (max_conn_per_sess >= 1 &&
	    max_conn_per_sess <= SPDK_NVMF_DEFAULT_MAX_CONNECTIONS_PER_SESSION) {
		g_nvmf_tgt.MaxConnectionsPerSession = max_conn_per_sess;
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "MaxConnectionsPerSession: %d\n",
			      g_nvmf_tgt.MaxConnectionsPerSession);
	} else {
		SPDK_ERRLOG("Invalid MaxConnectionsPerSession: %d\n", max_conn_per_sess);
		return -EINVAL;
	}

	rc = pthread_mutex_init(&g_nvmf_tgt.mutex, NULL);
	if (rc != 0) {
		SPDK_ERRLOG("mutex_init() failed\n");
		return -1;
	}

	/* init nvmf specific config options */
	if (!g_nvmf_tgt.sin_port) {
		g_nvmf_tgt.sin_port = htons(SPDK_NVMF_DEFAULT_SIN_PORT);
	}

	rc = spdk_nvmf_initialize_pools();
	if (rc != 0) {
		SPDK_ERRLOG("spdk_nvmf_initialize_pools() failed\n");
		return rc;
	}

	return 0;
}
Esempio n. 15
0
struct spdk_bdev *
create_aio_disk(char *fname)
{
	struct file_disk *fdisk;

	fdisk = calloc(sizeof(*fdisk), 1);
	if (!fdisk) {
		SPDK_ERRLOG("Unable to allocate enough memory for aio backend\n");
		return NULL;
	}

	fdisk->file = fname;
	if (blockdev_aio_open(fdisk)) {
		SPDK_ERRLOG("Unable to open file %s. fd: %d errno: %d\n", fname, fdisk->fd, errno);
		goto error_return;
	}

	fdisk->size = spdk_fd_get_size(fdisk->fd);

	TAILQ_INIT(&fdisk->sync_completion_list);
	snprintf(fdisk->disk.name, SPDK_BDEV_MAX_NAME_LENGTH, "AIO%d",
		 g_blockdev_count);
	snprintf(fdisk->disk.product_name, SPDK_BDEV_MAX_PRODUCT_NAME_LENGTH, "AIO disk");

	fdisk->disk.need_aligned_buffer = 1;
	fdisk->disk.write_cache = 1;
	fdisk->disk.blocklen = spdk_fd_get_blocklen(fdisk->fd);
	fdisk->disk.blockcnt = fdisk->size / fdisk->disk.blocklen;
	fdisk->disk.ctxt = fdisk;

	fdisk->disk.fn_table = &aio_fn_table;
	g_blockdev_count++;

	spdk_io_device_register(&fdisk->disk, blockdev_aio_create_cb, blockdev_aio_destroy_cb,
				sizeof(struct blockdev_aio_io_channel));
	spdk_bdev_register(&fdisk->disk);
	return &fdisk->disk;

error_return:
	blockdev_aio_close(fdisk);
	aio_free_disk(fdisk);
	return NULL;
}
Esempio n. 16
0
File: nvmf.c Progetto: famz/spdk
static int spdk_nvmf_check_pool(struct rte_mempool *pool, uint32_t count)
{
	if (rte_mempool_count(pool) != count) {
		SPDK_ERRLOG("rte_mempool_count(%s) == %d, should be %d\n",
			    pool->name, rte_mempool_count(pool), count);
		return -1;
	} else {
		return 0;
	}
}
Esempio n. 17
0
static int
spdk_scsi_subsystem_init(void)
{
	int rc;

	rc = pthread_mutex_init(&g_spdk_scsi.mutex, NULL);
	if (rc != 0) {
		SPDK_ERRLOG("mutex_init() failed\n");
		return -1;
	}

	rc = spdk_read_config_scsi_parameters();
	if (rc < 0) {
		SPDK_ERRLOG("spdk_scsi_parameters() failed\n");
		return -1;
	}

	return rc;
}
Esempio n. 18
0
static int
nvmf_process_admin_command(struct spdk_nvmf_conn *conn,
			   struct nvme_qp_tx_desc *tx_desc)
{
	struct nvme_qp_rx_desc *rx_desc = tx_desc->rx_desc;
	struct nvmf_request *req;
	struct spdk_nvme_cmd *cmd;
	struct spdk_nvme_sgl_descriptor *sgl;
	struct spdk_nvmf_keyed_sgl_descriptor *keyed_sgl;
	void *buf = NULL;
	uint32_t len = 0;
	int	ret;

	req = &tx_desc->req_state;
	cmd = &req->cmd->nvme_cmd;
	sgl = (struct spdk_nvme_sgl_descriptor *)&cmd->dptr.sgl1;
	keyed_sgl = (struct spdk_nvmf_keyed_sgl_descriptor *)sgl;

	/*
	  NVMf does not support in-capsule data for admin command or response capsules.
	  If caller indicates SGL for return RDMA data, verify the SGL and prepare
	  data buffer reference and length for the NVMf library.  Only keyed type
	  SGLs are supported for return data
	 */
	if (sgl->type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
	    (sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
	     sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
		buf = (void *)rx_desc->bb;
		len = rx_desc->bb_sgl.length;
		req->remote_addr = keyed_sgl->address;
		req->rkey = keyed_sgl->key;
		req->length = keyed_sgl->length;
	}

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "	tx_desc %p: req_state %p, rsp %p, addr %p\n",
		      tx_desc, req, (void *)req->rsp, (void *)tx_desc->send_sgl.addr);

	/* send to NVMf library for backend NVMe processing */
	ret = nvmf_process_admin_cmd(req->session, cmd, buf, len, req);
	if (ret) {
		/* library failed the request and should have
		   Updated the response */
		SPDK_TRACELOG(SPDK_TRACE_NVMF, "send nvme admin cmd capsule sync response\n");
		ret = spdk_nvmf_send_response(conn, req);
		if (ret) {
			SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
			goto command_fail;
		}
	}

	return 0;

command_fail:
	return -1;
}
Esempio n. 19
0
static void
nvmf_process_async_completion(struct nvmf_request *req)
{
	struct nvme_qp_tx_desc *tx_desc = (struct nvme_qp_tx_desc *)req->fabric_tx_ctx;
	struct spdk_nvme_cpl *response;
	struct nvme_qp_rx_desc *rx_desc = tx_desc->rx_desc;
	int ret;

	response = &req->rsp->nvme_cpl;

	/* Was the command successful */
	if ((response->status.sc == SPDK_NVME_SC_SUCCESS) && req->length > 0) {
		/* data to be copied to host via memory RDMA */
		if (req->length < rx_desc->bb_len) {
			/* temporarily adjust SGE to only copy what the
				host is prepared to receive.
			*/
			SPDK_TRACELOG(SPDK_TRACE_DEBUG, " *** modify sgl length from %x to %x\n",
				      rx_desc->bb_sgl.length, req->length);
			rx_desc->bb_sgl.length = req->length;
		}
		ret = nvmf_post_rdma_write(tx_desc->conn, tx_desc);
		if (ret) {
			SPDK_ERRLOG("Unable to post rdma write tx descriptor\n");
			goto command_fail;
		}
	}

	/* Now send back the response */
	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send nvme cmd capsule response\n");
	ret = spdk_nvmf_send_response(tx_desc->conn, req);
	if (ret) {
		SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
		goto command_fail;
	}

	return;

command_fail:
	nvmf_deactive_tx_desc(tx_desc);
}
Esempio n. 20
0
File: app.c Progetto: lkpdn/spdk
static int
spdk_app_write_pidfile(void)
{
	FILE *fp;
	pid_t pid;
	struct flock lock = {
		.l_type = F_WRLCK,
		.l_whence = SEEK_SET,
		.l_start = 0,
		.l_len = 0,
	};

	fp = fopen(g_spdk_app.pidfile, "w");
	if (fp == NULL) {
		SPDK_ERRLOG("pidfile open error %d\n", errno);
		return -1;
	}

	if (fcntl(fileno(fp), F_SETLK, &lock) != 0) {
		fprintf(stderr, "Cannot create lock on file %s, probably you"
			" should use different instance id\n", g_spdk_app.pidfile);
		exit(EXIT_FAILURE);
	}

	pid = getpid();
	fprintf(fp, "%d\n", (int)pid);
	fclose(fp);
	return 0;
}

static void
spdk_app_remove_pidfile(void)
{
	int rc;

	rc = remove(g_spdk_app.pidfile);
	if (rc != 0) {
		SPDK_ERRLOG("pidfile remove error %d\n", errno);
		/* ignore error */
	}
}
Esempio n. 21
0
int spdk_initialize_nvmf_conns(int max_connections)
{
	size_t conns_size;
	int i, rc;

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Enter\n");

	rc = pthread_mutex_init(&g_conns_mutex, NULL);
	if (rc != 0) {
		SPDK_ERRLOG("mutex_init() failed\n");
		return -1;
	}

	sprintf(g_shm_name, "nvmf_conns.%d", spdk_app_get_instance_id());
	g_conns_array_fd = shm_open(g_shm_name, O_RDWR | O_CREAT, 0600);
	if (g_conns_array_fd < 0) {
		SPDK_ERRLOG("could not shm_open %s\n", g_shm_name);
		return -1;
	}

	g_max_conns = max_connections;
	conns_size = sizeof(struct spdk_nvmf_conn) * g_max_conns;

	if (ftruncate(g_conns_array_fd, conns_size) != 0) {
		SPDK_ERRLOG("could not ftruncate\n");
		shm_unlink(g_shm_name);
		close(g_conns_array_fd);
		return -1;
	}
	g_conns_array = mmap(0, conns_size, PROT_READ | PROT_WRITE, MAP_SHARED,
			     g_conns_array_fd, 0);

	memset(g_conns_array, 0, conns_size);

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		rte_atomic32_set(&g_num_connections[i], 0);
	}

	return 0;
}
Esempio n. 22
0
File: conf.c Progetto: 64116278/spdk
static int
spdk_nvmf_parse_port(struct spdk_conf_section *sp)
{
	struct spdk_nvmf_port		*port;
	struct spdk_nvmf_fabric_intf	*fabric_intf;
	char *listen_addr, *host, *listen_port;
	int i = 0, rc = 0;

	/* Create the Subsystem Port */
	port = spdk_nvmf_port_create(sp->num);
	if (!port) {
		SPDK_ERRLOG("Port create failed\n");
		return -1;
	}

	/* Loop over the fabric interfaces and add them to the port */
	for (i = 0; ; i++) {
		listen_addr = spdk_conf_section_get_nmval(sp, "FabricIntf", i, 0);
		if (listen_addr == NULL) {
			break;
		}
		rc = spdk_nvmf_parse_addr(listen_addr, &host, &listen_port);
		if (rc < 0) {
			continue;
		}
		fabric_intf = spdk_nvmf_fabric_intf_create(host, listen_port);
		if (!fabric_intf) {
			continue;
		}

		spdk_nvmf_port_add_fabric_intf(port, fabric_intf);
	}

	if (TAILQ_EMPTY(&port->head)) {
		SPDK_ERRLOG("No fabric interface found\n");
		return -1;
	}

	return 0;
}
Esempio n. 23
0
File: conf.c Progetto: 64116278/spdk
static int
spdk_nvmf_parse_nvmf_tgt(void)
{
	struct spdk_conf_section *sp;
	char *nodebase;
	int max_in_capsule_data;
	int max_sessions_per_subsystem;
	int max_queue_depth;
	int max_conn_per_sess;
	int max_recv_seg_len;
	int listen_port;
	int rc;

	sp = spdk_conf_find_section(NULL, "Nvmf");
	if (sp == NULL) {
		SPDK_ERRLOG("No Nvmf section in configuration file.\n");
		return -1;
	}

	nodebase = spdk_conf_section_get_val(sp, "NodeBase");
	if (nodebase == NULL) {
		nodebase = SPDK_NVMF_DEFAULT_NODEBASE;
	}

	max_in_capsule_data = spdk_conf_section_get_intval(sp, "MaxInCapsuleData");
	if (max_in_capsule_data < 0) {
		max_in_capsule_data = SPDK_NVMF_DEFAULT_IN_CAPSULE_DATA_SIZE;
	}

	max_sessions_per_subsystem = spdk_conf_section_get_intval(sp, "MaxSessionsPerSubsystem");
	if (max_sessions_per_subsystem < 0) {
		max_sessions_per_subsystem = SPDK_NVMF_DEFAULT_MAX_SESSIONS_PER_SUBSYSTEM;
	}

	max_queue_depth = spdk_conf_section_get_intval(sp, "MaxQueueDepth");
	if (max_queue_depth < 0) {
		max_queue_depth = SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH;
	}

	max_conn_per_sess = spdk_conf_section_get_intval(sp, "MaxConnectionsPerSession");
	if (max_conn_per_sess < 0) {
		max_conn_per_sess = SPDK_NVMF_DEFAULT_MAX_CONNECTIONS_PER_SESSION;
	}

	max_recv_seg_len = SPDK_NVMF_MAX_RECV_DATA_TRANSFER_SIZE;
	listen_port = SPDK_NVMF_DEFAULT_SIN_PORT;

	rc = nvmf_tgt_init(nodebase, max_in_capsule_data, max_sessions_per_subsystem,
			   max_queue_depth, max_conn_per_sess, max_recv_seg_len, listen_port);

	return rc;
}
Esempio n. 24
0
static int
blockdev_aio_destruct(struct spdk_bdev *bdev)
{
	struct file_disk *fdisk = (struct file_disk *)bdev;
	int rc = 0;

	rc = blockdev_aio_close(fdisk);
	if (rc < 0) {
		SPDK_ERRLOG("blockdev_aio_close() failed\n");
	}
	aio_free_disk(fdisk);
	return rc;
}
Esempio n. 25
0
static int
nvmf_io_cmd_continue(struct spdk_nvmf_conn *conn, struct nvme_qp_tx_desc *tx_desc)
{
	struct nvme_qp_rx_desc *rx_desc;
	struct nvmf_request *req;
	struct spdk_nvme_cmd *cmd;
	int ret;


	rx_desc = tx_desc->rx_desc;
	if (rx_desc == NULL) {
		SPDK_ERRLOG(" rx_desc does not exist!\n");
		return -1;
	}

	req = &tx_desc->req_state;
	cmd = &req->cmd->nvme_cmd;
	req->fabric_rx_ctx = rx_desc;

	/* clear the SGL details for RDMA performed */
	req->length = 0;

	/* send to NVMf library for backend NVMe processing */
	ret = nvmf_process_io_cmd(req->session, cmd, (void *)rx_desc->bb, rx_desc->bb_sgl.length, req);
	if (ret) {
		/* library failed the request and should have
		   Updated the response */
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, " send nvme io cmd capsule error response\n");
		ret = spdk_nvmf_send_response(conn, req);
		if (ret) {
			SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
			return -1;
		}
	}
	return 0;
}
Esempio n. 26
0
static void
nvmf_init_conn_properites(struct spdk_nvmf_conn *conn,
			  struct nvmf_session *session,
			  struct spdk_nvmf_fabric_connect_rsp *response)
{

	struct spdk_nvmf_extended_identify_ctrlr_data *lcdata;
	uint32_t mdts;

	conn->cntlid = response->status_code_specific.success.cntlid;
	session->max_connections_allowed = g_nvmf_tgt.MaxConnectionsPerSession;
	nvmf_init_session_properties(session, conn->sq_depth);

	/* Update the session logical controller data with any
	 * application fabric side limits
	 */
	/* reset mdts in vcdata to equal the application default maximum */
	mdts = SPDK_NVMF_MAX_RECV_DATA_TRANSFER_SIZE /
	       (1 << (12 + session->vcprop.cap_hi.bits.mpsmin));
	if (mdts == 0) {
		SPDK_ERRLOG("Min page size exceeds max transfer size!\n");
		SPDK_ERRLOG("Verify setting of SPDK_NVMF_MAX_RECV_DATA_TRANSFER_SIZE and mpsmin\n");
		session->vcdata.mdts = 1; /* Support single page for now */
	} else {
		/* set mdts as a power of 2 representing number of mpsmin units */
		session->vcdata.mdts = 0;
		while ((1ULL << session->vcdata.mdts) < mdts) {
			session->vcdata.mdts++;
		}
	}

	/* increase the I/O recv capsule size for in_capsule data */
	lcdata = (struct spdk_nvmf_extended_identify_ctrlr_data *)&session->vcdata.reserved5[1088];
	lcdata->ioccsz += (g_nvmf_tgt.MaxInCapsuleData / 16);

}
Esempio n. 27
0
static int
spdk_read_config_scsi_parameters(void)
{
	struct spdk_conf_section *sp;
	const char *val;

	sp = spdk_conf_find_section(NULL, "Scsi");
	if (sp == NULL) {
		spdk_set_default_scsi_parameters();
		return 0;
	}

	val = spdk_conf_section_get_val(sp, "MaxUnmapLbaCount");
	g_spdk_scsi.scsi_params.max_unmap_lba_count = (val == NULL) ?
			DEFAULT_MAX_UNMAP_LBA_COUNT : strtoul(val, NULL, 10);

	val = spdk_conf_section_get_val(sp, "MaxUnmapBlockDescriptorCount");
	g_spdk_scsi.scsi_params.max_unmap_block_descriptor_count = (val == NULL) ?
			DEFAULT_MAX_UNMAP_BLOCK_DESCRIPTOR_COUNT : strtoul(val, NULL, 10);
	val = spdk_conf_section_get_val(sp, "OptimalUnmapGranularity");
	g_spdk_scsi.scsi_params.optimal_unmap_granularity = (val == NULL) ?
			DEFAULT_OPTIMAL_UNMAP_GRANULARITY : strtoul(val, NULL, 10);

	val = spdk_conf_section_get_val(sp, "UnmapGranularityAlignment");
	g_spdk_scsi.scsi_params.unmap_granularity_alignment = (val == NULL) ?
			DEFAULT_UNMAP_GRANULARITY_ALIGNMENT : strtoul(val, NULL, 10);

	val = spdk_conf_section_get_val(sp, "Ugavalid");
	if (val == NULL) {
		g_spdk_scsi.scsi_params.ugavalid = DEFAULT_UGAVALID;
	} else if (strcasecmp(val, "Yes") == 0) {
		g_spdk_scsi.scsi_params.ugavalid = 1;
	} else if (strcasecmp(val, "No") == 0) {
		g_spdk_scsi.scsi_params.ugavalid = 0;
	} else {
		SPDK_ERRLOG("unknown value %s\n", val);
		return -1;
	}

	val = spdk_conf_section_get_val(sp, "MaxWriteSameLength");
	g_spdk_scsi.scsi_params.max_write_same_length = (val == NULL) ?
			DEFAULT_MAX_WRITE_SAME_LENGTH : strtoul(val, NULL, 10);

	return 0;
}
Esempio n. 28
0
static int
blockdev_aio_initialize_io_channel(struct blockdev_aio_io_channel *ch)
{
	ch->queue_depth = 128;

	if (io_setup(ch->queue_depth, &ch->io_ctx) < 0) {
		SPDK_ERRLOG("async I/O context setup failure\n");
		return -1;
	}

	ch->events = calloc(sizeof(struct io_event), ch->queue_depth);
	if (!ch->events) {
		io_destroy(ch->io_ctx);
		return -1;
	}

	return 0;
}
Esempio n. 29
0
/**

\brief Create an NVMf fabric connection from the given parameters and schedule it
       on a reactor thread.

\code

# identify reactor where the new connections work item will be scheduled
reactor = nvmf_allocate_reactor()
schedule fabric connection work item on reactor

\endcode

*/
int
spdk_nvmf_startup_conn(struct spdk_nvmf_conn *conn)
{
	int lcore;
	struct spdk_nvmf_conn *admin_conn;
	uint64_t nvmf_session_core = spdk_app_get_core_mask();

	/*
	 * if starting IO connection then determine core
	 * allocated to admin queue to request core mask.
	 * Can not assume nvmf session yet created at time
	 * of fabric connection setup.  Rely on fabric
	 * function to locate matching controller session.
	 */
	if (conn->type == CONN_TYPE_IOQ && conn->cntlid != 0) {
		admin_conn = spdk_find_nvmf_conn_by_cntlid(conn->cntlid);
		if (admin_conn != NULL) {
			SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Located admin conn session core %d\n",
				      admin_conn->poller.lcore);
			nvmf_session_core = 1ULL << admin_conn->poller.lcore;
		}
	}

	lcore = nvmf_allocate_reactor(nvmf_session_core);
	if (lcore < 0) {
		SPDK_ERRLOG("Unable to find core to launch connection.\n");
		goto err0;
	}

	conn->state = CONN_STATE_RUNNING;
	SPDK_NOTICELOG("Launching nvmf connection[qid=%d] on core: %d\n",
		       conn->qid, lcore);
	conn->poller.fn = spdk_nvmf_conn_do_work;
	conn->poller.arg = conn;

	rte_atomic32_inc(&g_num_connections[lcore]);
	spdk_poller_register(&conn->poller, lcore, NULL);

	return 0;
err0:
	free_conn(conn);
	return -1;
}
Esempio n. 30
0
File: task.c Progetto: chennqqi/spdk
struct spdk_iscsi_task *
spdk_iscsi_task_get(uint32_t *owner_task_ctr, struct spdk_iscsi_task *parent)
{
	struct spdk_iscsi_task *task;
	int rc;

	rc = rte_mempool_get(g_spdk_iscsi.task_pool, (void **)&task);
	if ((rc < 0) || !task) {
		SPDK_ERRLOG("Unable to get task\n");
		abort();
	}

	memset(task, 0, sizeof(*task));
	spdk_scsi_task_construct((struct spdk_scsi_task *)task, owner_task_ctr,
				 (struct spdk_scsi_task *)parent);
	task->scsi.free_fn = spdk_iscsi_task_free;

	return task;
}