Beispiel #1
0
static void
submit_single_io(struct ns_worker_ctx *ns_ctx)
{
	struct perf_task	*task = NULL;
	uint64_t		offset_in_ios;
	int			rc;
	struct ns_entry		*entry = ns_ctx->entry;

	if (rte_mempool_get(task_pool, (void **)&task) != 0) {
		fprintf(stderr, "task_pool rte_mempool_get failed\n");
		exit(1);
	}

	task->ns_ctx = ns_ctx;

	if (g_is_random) {
		offset_in_ios = rand_r(&seed) % entry->size_in_ios;
	} else {
		offset_in_ios = ns_ctx->offset_in_ios++;
		if (ns_ctx->offset_in_ios == entry->size_in_ios) {
			ns_ctx->offset_in_ios = 0;
		}
	}

	task->submit_tsc = rte_get_timer_cycles();

	if ((g_rw_percentage == 100) ||
	    (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
#if HAVE_LIBAIO
		if (entry->type == ENTRY_TYPE_AIO_FILE) {
			rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD, task->buf,
					g_io_size_bytes, offset_in_ios * g_io_size_bytes, task);
		} else
#endif
		{
			rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf,
						   offset_in_ios * entry->io_size_blocks,
						   entry->io_size_blocks, io_complete, task, 0);
		}
	} else {
#if HAVE_LIBAIO
		if (entry->type == ENTRY_TYPE_AIO_FILE) {
			rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE, task->buf,
					g_io_size_bytes, offset_in_ios * g_io_size_bytes, task);
		} else
#endif
		{
			rc = spdk_nvme_ns_cmd_write(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf,
						    offset_in_ios * entry->io_size_blocks,
						    entry->io_size_blocks, io_complete, task, 0);
		}
	}

	if (rc != 0) {
		fprintf(stderr, "starting I/O failed\n");
	}

	ns_ctx->current_queue_depth++;
}
Beispiel #2
0
Datei: reset.c Projekt: ceph/spdk
static void
submit_single_io(struct ns_worker_ctx *ns_ctx)
{
	struct reset_task	*task = NULL;
	uint64_t		offset_in_ios;
	int			rc;
	struct ns_entry		*entry = ns_ctx->entry;

	if (rte_mempool_get(task_pool, (void **)&task) != 0) {
		fprintf(stderr, "task_pool rte_mempool_get failed\n");
		exit(1);
	}

	task->ns_ctx = ns_ctx;
	task->ns_ctx->io_submitted++;

	if (g_is_random) {
		offset_in_ios = rand_r(&seed) % entry->size_in_ios;
	} else {
		offset_in_ios = ns_ctx->offset_in_ios++;
		if (ns_ctx->offset_in_ios == entry->size_in_ios) {
			ns_ctx->offset_in_ios = 0;
		}
	}

	if ((g_rw_percentage == 100) ||
	    (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
		rc = spdk_nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
					   entry->io_size_blocks, io_complete, task, 0);
	} else {
		rc = spdk_nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
					    entry->io_size_blocks, io_complete, task, 0);
	}

	if (rc != 0) {
		fprintf(stderr, "starting I/O failed\n");
	}

	ns_ctx->current_queue_depth++;
}
Beispiel #3
0
static bool
nvmf_process_io_cmd(struct spdk_nvmf_request *req)
{
	struct nvmf_session *session = req->conn->sess;
	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
	struct spdk_nvme_cpl *response;
	struct spdk_nvmf_subsystem *subsystem = session->subsys;
	struct spdk_nvmf_namespace *nvmf_ns;
	struct spdk_nvme_ctrlr *ctrlr = NULL;
	struct spdk_nvme_ns *ns = NULL;
	struct spdk_nvme_qpair *qpair;
	uint32_t nsid = 0;
	struct nvme_read_cdw12 *cdw12;
	uint64_t lba_address;
	uint32_t lba_count;
	uint32_t io_flags;
	int rc = 0;

	/* pre-set response details for this command */
	response = &req->rsp->nvme_cpl;
	response->status.sc = SPDK_NVME_SC_SUCCESS;
	response->cid = cmd->cid;

	/* verify subsystem */
	if (subsystem == NULL) {
		SPDK_ERRLOG("Subsystem Not Initialized!\n");
		response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
		return true;
	}

	/* verify that the contoller is ready to process commands */
	if (session->vcprop.csts.bits.rdy == 0) {
		SPDK_ERRLOG("Subsystem Controller Not Ready!\n");
		response->status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
		return true;
	}

	/* verify namespace id */
	if (cmd->nsid == 0 || cmd->nsid > MAX_PER_SUBSYSTEM_NAMESPACES) {
		SPDK_ERRLOG("Invalid NS_ID %u\n", cmd->nsid);
		response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
		return true;
	}

	nvmf_ns = &subsystem->ns_list_map[cmd->nsid - 1];
	ctrlr = nvmf_ns->ctrlr;
	nsid = nvmf_ns->nvme_ns_id;
	ns = nvmf_ns->ns;
	qpair = nvmf_ns->qpair;

	switch (cmd->opc) {
	case SPDK_NVME_OPC_READ:
	case SPDK_NVME_OPC_WRITE:
		cdw12 = (struct nvme_read_cdw12 *)&cmd->cdw12;
		/* NVMe library read/write interface expects non-0based lba_count value */
		lba_count = cdw12->nlb + 1;
		lba_address = cmd->cdw11;
		lba_address = (lba_address << 32) + cmd->cdw10;
		io_flags = cmd->cdw12 & 0xFFFF0000U;

		if (cmd->opc == SPDK_NVME_OPC_READ) {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "Read LBA 0x%" PRIx64 ", 0x%x blocks\n",
				      lba_address, lba_count);
			spdk_trace_record(TRACE_NVMF_LIB_READ_START, 0, 0, (uint64_t)req, 0);
			rc = spdk_nvme_ns_cmd_read(ns, qpair,
						   req->data, lba_address, lba_count,
						   nvmf_complete_cmd,
						   req, io_flags);
		} else {
			SPDK_TRACELOG(SPDK_TRACE_NVMF, "Write LBA 0x%" PRIx64 ", 0x%x blocks\n",
				      lba_address, lba_count);
			spdk_trace_record(TRACE_NVMF_LIB_WRITE_START, 0, 0, (uint64_t)req, 0);
			rc = spdk_nvme_ns_cmd_write(ns, qpair,
						    req->data, lba_address, lba_count,
						    nvmf_complete_cmd,
						    req, io_flags);
		}
		break;
	default:
		SPDK_TRACELOG(SPDK_TRACE_NVMF, "io_cmd passthrough: opc 0x%02x\n", cmd->opc);
		cmd->nsid = nsid;
		rc = spdk_nvme_ctrlr_cmd_io_raw(ctrlr, qpair,
						cmd,
						req->data, req->length,
						nvmf_complete_cmd,
						req);
		break;
	}

	if (rc) {
		SPDK_ERRLOG("Failed to submit Opcode 0x%02x\n", cmd->opc);
		response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
		return true;
	}

	return false;
}
Beispiel #4
0
static void
hello_world(void)
{
	struct ns_entry			*ns_entry;
	struct hello_world_sequence	sequence;
	int				rc;

	ns_entry = g_namespaces;
	while (ns_entry != NULL) {
		/*
		 * Allocate an I/O qpair that we can use to submit read/write requests
		 *  to namespaces on the controller.  NVMe controllers typically support
		 *  many qpairs per controller.  Any I/O qpair allocated for a controller
		 *  can submit I/O to any namespace on that controller.
		 *
		 * The SPDK NVMe driver provides no synchronization for qpair accesses -
		 *  the application must ensure only a single thread submits I/O to a
		 *  qpair, and that same thread must also check for completions on that
		 *  qpair.  This enables extremely efficient I/O processing by making all
		 *  I/O operations completely lockless.
		 */
		ns_entry->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, 0);
		if (ns_entry->qpair == NULL) {
			printf("ERROR: init_ns_worker_ctx() failed\n");
			return;
		}

		/*
		 * Use DPDK rte_zmalloc to allocate a 4KB zeroed buffer.  This memory
		 *  will be allocated from 2MB hugepages and will be pinned.  These are
		 *  both requirements for data buffers used for SPDK NVMe I/O operations.
		 */
		sequence.buf = rte_zmalloc(NULL, 0x1000, 0x1000);
		sequence.is_completed = 0;
		sequence.ns_entry = ns_entry;

		/*
		 * Print "Hello world!" to sequence.buf.  We will write this data to LBA
		 *  0 on the namespace, and then later read it back into a separate buffer
		 *  to demonstrate the full I/O path.
		 */
		sprintf(sequence.buf, "Hello world!\n");

		/*
		 * Write the data buffer to LBA 0 of this namespace.  "write_complete" and
		 *  "&sequence" are specified as the completion callback function and
		 *  argument respectively.  write_complete() will be called with the
		 *  value of &sequence as a parameter when the write I/O is completed.
		 *  This allows users to potentially specify different completion
		 *  callback routines for each I/O, as well as pass a unique handle
		 *  as an argument so the application knows which I/O has completed.
		 *
		 * Note that the SPDK NVMe driver will only check for completions
		 *  when the application calls spdk_nvme_qpair_process_completions().
		 *  It is the responsibility of the application to trigger the polling
		 *  process.
		 */
		rc = spdk_nvme_ns_cmd_write(ns_entry->ns, ns_entry->qpair, sequence.buf,
					    0, /* LBA start */
					    1, /* number of LBAs */
					    write_complete, &sequence, 0);
		if (rc != 0) {
			fprintf(stderr, "starting write I/O failed\n");
			exit(1);
		}

		/*
		 * Poll for completions.  0 here means process all available completions.
		 *  In certain usage models, the caller may specify a positive integer
		 *  instead of 0 to signify the maximum number of completions it should
		 *  process.  This function will never block - if there are no
		 *  completions pending on the specified qpair, it will return immediately.
		 *
		 * When the write I/O completes, write_complete() will submit a new I/O
		 *  to read LBA 0 into a separate buffer, specifying read_complete() as its
		 *  completion routine.  When the read I/O completes, read_complete() will
		 *  print the buffer contents and set sequence.is_completed = 1.  That will
		 *  break this loop and then exit the program.
		 */
		while (!sequence.is_completed) {
			spdk_nvme_qpair_process_completions(ns_entry->qpair, 0);
		}

		/*
		 * Free the I/O qpair.  This typically is done when an application exits.
		 *  But SPDK does support freeing and then reallocating qpairs during
		 *  operation.  It is the responsibility of the caller to ensure all
		 *  pending I/O are completed before trying to free the qpair.
		 */
		spdk_nvme_ctrlr_free_io_qpair(ns_entry->qpair);
		ns_entry = ns_entry->next;
	}
}
Beispiel #5
0
static int
write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name)
{
	int rc = 0;
	uint32_t lba_count;
	uint32_t io_flags = 0;

	struct io_request *req;
	struct spdk_nvme_ns *ns;
	struct spdk_nvme_qpair *qpair;
	const struct spdk_nvme_ns_data *nsdata;

	ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1);
	if (!ns) {
		fprintf(stderr, "Null namespace\n");
		return 0;
	}

	if (!(spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED))
		return 0;

	nsdata = spdk_nvme_ns_get_data(ns);
	if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) {
		fprintf(stderr, "Empty nsdata or wrong sector size\n");
		return 0;
	}

	req = rte_zmalloc(NULL, sizeof(*req), 0);
	if (!req) {
		fprintf(stderr, "Allocate request failed\n");
		return 0;
	}

	/* IO parameters setting */
	lba_count = build_io_fn(ns, req, &io_flags);

	if (!lba_count) {
		fprintf(stderr, "%s: %s bypass the test case\n", dev->name, test_name);
		free_req(req);
		return 0;
	}

	qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, 0);
	if (!qpair) {
		free_req(req);
		return -1;
	}

	ns_data_buffer_reset(ns, req, DATA_PATTERN);
	if (req->use_extended_lba)
		rc = spdk_nvme_ns_cmd_write(ns, qpair, req->contig, req->lba, lba_count,
					    io_complete, req, io_flags);
	else
		rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count,
						    io_complete, req, io_flags, req->apptag_mask, req->apptag);

	if (rc != 0) {
		fprintf(stderr, "%s: %s write submit failed\n", dev->name, test_name);
		spdk_nvme_ctrlr_free_io_qpair(qpair);
		free_req(req);
		return -1;
	}

	io_complete_flag = 0;

	while (!io_complete_flag)
		spdk_nvme_qpair_process_completions(qpair, 1);

	if (io_complete_flag != 1) {
		fprintf(stderr, "%s: %s write exec failed\n", dev->name, test_name);
		spdk_nvme_ctrlr_free_io_qpair(qpair);
		free_req(req);
		return -1;
	}

	/* reset completion flag */
	io_complete_flag = 0;

	ns_data_buffer_reset(ns, req, 0);
	if (req->use_extended_lba)
		rc = spdk_nvme_ns_cmd_read(ns, qpair, req->contig, req->lba, lba_count,
					   io_complete, req, io_flags);
	else
		rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count,
						   io_complete, req, io_flags, req->apptag_mask, req->apptag);

	if (rc != 0) {
		fprintf(stderr, "%s: %s read failed\n", dev->name, test_name);
		spdk_nvme_ctrlr_free_io_qpair(qpair);
		free_req(req);
		return -1;
	}

	while (!io_complete_flag)
		spdk_nvme_qpair_process_completions(qpair, 1);

	if (io_complete_flag != 1) {
		fprintf(stderr, "%s: %s read failed\n", dev->name, test_name);
		spdk_nvme_ctrlr_free_io_qpair(qpair);
		free_req(req);
		return -1;
	}

	rc = ns_data_buffer_compare(ns, req, DATA_PATTERN);
	if (rc < 0) {
		fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name);
		spdk_nvme_ctrlr_free_io_qpair(qpair);
		free_req(req);
		return -1;
	}

	fprintf(stdout, "%s: %s test passed\n", dev->name, test_name);
	spdk_nvme_ctrlr_free_io_qpair(qpair);
	free_req(req);
	return rc;
}
Beispiel #6
0
static int
u2_lat_bench(void)
{
	int i, rc;
	//int rc;

	void *buf;

	uint32_t io_size_blocks;
	uint64_t offset_in_ios, size_in_ios;

	uint64_t tsc_rate;
	uint64_t tsc_start, tsc_elapsed;
	//uint64_t tsc_end;

	buf = rte_malloc(NULL, io_size, U2_BUFFER_ALIGN);
	if (buf == NULL) {
		fprintf(stderr, "failed to rte_malloc buffer!\n");
		return 1;
	}
	memset(buf, 0xff, io_size);

	//io_num = 0;
	//io_depth = 0;

	io_size_blocks = io_size / u2_ns_sector;
	offset_in_ios = -1;
	size_in_ios = u2_ns_size / io_size;

	tsc_rate = rte_get_tsc_hz();
	tsc_elapsed = 0;
	tsc_start = rte_get_timer_cycles();
	//tsc_end = rte_get_timer_cycles() + time_in_sec * tsc_rate;
	for (i = 0; i < io_num; i++) {
	//while (1) {
		if (is_random) {
			offset_in_ios = rand_r(&seed) % size_in_ios;
		} else {
			if (++offset_in_ios >= size_in_ios) {
				offset_in_ios = 0;
			}
		}

		if (is_rw) {
			rc = spdk_nvme_ns_cmd_read (u2_ns, u2_qpair, buf, offset_in_ios * io_size_blocks, io_size_blocks, u2_io_complete, NULL, 0);
		} else {
			rc = spdk_nvme_ns_cmd_write(u2_ns, u2_qpair, buf, offset_in_ios * io_size_blocks, io_size_blocks, u2_io_complete, NULL, 0);
		}
		if (rc) {
			fprintf(stderr, "failed to submit request %d!\n", i);
			//fprintf(stderr, "failed to submit request %d!\n", io_num);
			return rc;
		}
		io_depth++;    // for latency benchmarking, queue depth stays at 1.

		while (io_depth > 0) {
			spdk_nvme_qpair_process_completions(u2_qpair, 0);
		}

		//if (rte_get_timer_cycles() > tsc_end) {
		//	break;
		//}
	}
	tsc_elapsed = rte_get_timer_cycles() - tsc_start;

	printf("\t\t%9.1f us", (float) (tsc_elapsed * 1000000) / (io_num * tsc_rate));
	printf("\t\t%10.1f s", (float) tsc_elapsed / tsc_rate);
	printf("\n");

	//printf("\t\t%9.1f us", (float) (time_in_sec * 1000000) / io_num);
	//printf("\t\t%12"PRIu64"\n", io_num);

	rte_free(buf);

	return 0;
}