static void submit_single_io(struct ns_worker_ctx *ns_ctx) { struct perf_task *task = NULL; uint64_t offset_in_ios; int rc; struct ns_entry *entry = ns_ctx->entry; if (rte_mempool_get(task_pool, (void **)&task) != 0) { fprintf(stderr, "task_pool rte_mempool_get failed\n"); exit(1); } task->ns_ctx = ns_ctx; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = ns_ctx->offset_in_ios++; if (ns_ctx->offset_in_ios == entry->size_in_ios) { ns_ctx->offset_in_ios = 0; } } task->submit_tsc = rte_get_timer_cycles(); if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD, task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, task); } else #endif { rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task, 0); } } else { #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE, task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, task); } else #endif { rc = spdk_nvme_ns_cmd_write(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task, 0); } } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } ns_ctx->current_queue_depth++; }
static void submit_single_io(void) { uint64_t offset_in_ios; uint64_t start; int rc; struct ns_entry *entry = g_ns; uint64_t tsc_submit; offset_in_ios = rand_r(&seed) % entry->size_in_ios; start = spdk_get_ticks(); spdk_mb(); #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(g_ns->u.aio.ctx, &g_task->iocb, entry->u.aio.fd, IO_CMD_PREAD, g_task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, g_task); } else #endif { rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, g_ns->u.nvme.qpair, g_task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, g_task, 0); } spdk_mb(); tsc_submit = spdk_get_ticks() - start; g_tsc_submit += tsc_submit; if (tsc_submit < g_tsc_submit_min) { g_tsc_submit_min = tsc_submit; } if (tsc_submit > g_tsc_submit_max) { g_tsc_submit_max = tsc_submit; } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } g_ns->current_queue_depth++; }
static void submit_single_io(struct ns_worker_ctx *ns_ctx) { struct reset_task *task = NULL; uint64_t offset_in_ios; int rc; struct ns_entry *entry = ns_ctx->entry; if (rte_mempool_get(task_pool, (void **)&task) != 0) { fprintf(stderr, "task_pool rte_mempool_get failed\n"); exit(1); } task->ns_ctx = ns_ctx; task->ns_ctx->io_submitted++; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = ns_ctx->offset_in_ios++; if (ns_ctx->offset_in_ios == entry->size_in_ios) { ns_ctx->offset_in_ios = 0; } } if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { rc = spdk_nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task, 0); } else { rc = spdk_nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task, 0); } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } ns_ctx->current_queue_depth++; }
static void write_complete(void *arg, const struct spdk_nvme_cpl *completion) { struct hello_world_sequence *sequence = arg; struct ns_entry *ns_entry = sequence->ns_entry; int rc; /* * The write I/O has completed. Free the buffer associated with * the write I/O and allocate a new zeroed buffer for reading * the data back from the NVMe namespace. */ rte_free(sequence->buf); sequence->buf = rte_zmalloc(NULL, 0x1000, 0x1000); rc = spdk_nvme_ns_cmd_read(ns_entry->ns, ns_entry->qpair, sequence->buf, 0, /* LBA start */ 1, /* number of LBAs */ read_complete, (void *)sequence, 0); if (rc != 0) { fprintf(stderr, "starting read I/O failed\n"); exit(1); } }
static bool nvmf_process_io_cmd(struct spdk_nvmf_request *req) { struct nvmf_session *session = req->conn->sess; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cpl *response; struct spdk_nvmf_subsystem *subsystem = session->subsys; struct spdk_nvmf_namespace *nvmf_ns; struct spdk_nvme_ctrlr *ctrlr = NULL; struct spdk_nvme_ns *ns = NULL; struct spdk_nvme_qpair *qpair; uint32_t nsid = 0; struct nvme_read_cdw12 *cdw12; uint64_t lba_address; uint32_t lba_count; uint32_t io_flags; int rc = 0; /* pre-set response details for this command */ response = &req->rsp->nvme_cpl; response->status.sc = SPDK_NVME_SC_SUCCESS; response->cid = cmd->cid; /* verify subsystem */ if (subsystem == NULL) { SPDK_ERRLOG("Subsystem Not Initialized!\n"); response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; return true; } /* verify that the contoller is ready to process commands */ if (session->vcprop.csts.bits.rdy == 0) { SPDK_ERRLOG("Subsystem Controller Not Ready!\n"); response->status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; return true; } /* verify namespace id */ if (cmd->nsid == 0 || cmd->nsid > MAX_PER_SUBSYSTEM_NAMESPACES) { SPDK_ERRLOG("Invalid NS_ID %u\n", cmd->nsid); response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; return true; } nvmf_ns = &subsystem->ns_list_map[cmd->nsid - 1]; ctrlr = nvmf_ns->ctrlr; nsid = nvmf_ns->nvme_ns_id; ns = nvmf_ns->ns; qpair = nvmf_ns->qpair; switch (cmd->opc) { case SPDK_NVME_OPC_READ: case SPDK_NVME_OPC_WRITE: cdw12 = (struct nvme_read_cdw12 *)&cmd->cdw12; /* NVMe library read/write interface expects non-0based lba_count value */ lba_count = cdw12->nlb + 1; lba_address = cmd->cdw11; lba_address = (lba_address << 32) + cmd->cdw10; io_flags = cmd->cdw12 & 0xFFFF0000U; if (cmd->opc == SPDK_NVME_OPC_READ) { SPDK_TRACELOG(SPDK_TRACE_NVMF, "Read LBA 0x%" PRIx64 ", 0x%x blocks\n", lba_address, lba_count); spdk_trace_record(TRACE_NVMF_LIB_READ_START, 0, 0, (uint64_t)req, 0); rc = spdk_nvme_ns_cmd_read(ns, qpair, req->data, lba_address, lba_count, nvmf_complete_cmd, req, io_flags); } else { SPDK_TRACELOG(SPDK_TRACE_NVMF, "Write LBA 0x%" PRIx64 ", 0x%x blocks\n", lba_address, lba_count); spdk_trace_record(TRACE_NVMF_LIB_WRITE_START, 0, 0, (uint64_t)req, 0); rc = spdk_nvme_ns_cmd_write(ns, qpair, req->data, lba_address, lba_count, nvmf_complete_cmd, req, io_flags); } break; default: SPDK_TRACELOG(SPDK_TRACE_NVMF, "io_cmd passthrough: opc 0x%02x\n", cmd->opc); cmd->nsid = nsid; rc = spdk_nvme_ctrlr_cmd_io_raw(ctrlr, qpair, cmd, req->data, req->length, nvmf_complete_cmd, req); break; } if (rc) { SPDK_ERRLOG("Failed to submit Opcode 0x%02x\n", cmd->opc); response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; return true; } return false; }
static int write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name) { int rc = 0; uint32_t lba_count; uint32_t io_flags = 0; struct io_request *req; struct spdk_nvme_ns *ns; struct spdk_nvme_qpair *qpair; const struct spdk_nvme_ns_data *nsdata; ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1); if (!ns) { fprintf(stderr, "Null namespace\n"); return 0; } if (!(spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED)) return 0; nsdata = spdk_nvme_ns_get_data(ns); if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) { fprintf(stderr, "Empty nsdata or wrong sector size\n"); return 0; } req = rte_zmalloc(NULL, sizeof(*req), 0); if (!req) { fprintf(stderr, "Allocate request failed\n"); return 0; } /* IO parameters setting */ lba_count = build_io_fn(ns, req, &io_flags); if (!lba_count) { fprintf(stderr, "%s: %s bypass the test case\n", dev->name, test_name); free_req(req); return 0; } qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, 0); if (!qpair) { free_req(req); return -1; } ns_data_buffer_reset(ns, req, DATA_PATTERN); if (req->use_extended_lba) rc = spdk_nvme_ns_cmd_write(ns, qpair, req->contig, req->lba, lba_count, io_complete, req, io_flags); else rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count, io_complete, req, io_flags, req->apptag_mask, req->apptag); if (rc != 0) { fprintf(stderr, "%s: %s write submit failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } io_complete_flag = 0; while (!io_complete_flag) spdk_nvme_qpair_process_completions(qpair, 1); if (io_complete_flag != 1) { fprintf(stderr, "%s: %s write exec failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } /* reset completion flag */ io_complete_flag = 0; ns_data_buffer_reset(ns, req, 0); if (req->use_extended_lba) rc = spdk_nvme_ns_cmd_read(ns, qpair, req->contig, req->lba, lba_count, io_complete, req, io_flags); else rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count, io_complete, req, io_flags, req->apptag_mask, req->apptag); if (rc != 0) { fprintf(stderr, "%s: %s read failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } while (!io_complete_flag) spdk_nvme_qpair_process_completions(qpair, 1); if (io_complete_flag != 1) { fprintf(stderr, "%s: %s read failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } rc = ns_data_buffer_compare(ns, req, DATA_PATTERN); if (rc < 0) { fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } fprintf(stdout, "%s: %s test passed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return rc; }
static int u2_lat_bench(void) { int i, rc; //int rc; void *buf; uint32_t io_size_blocks; uint64_t offset_in_ios, size_in_ios; uint64_t tsc_rate; uint64_t tsc_start, tsc_elapsed; //uint64_t tsc_end; buf = rte_malloc(NULL, io_size, U2_BUFFER_ALIGN); if (buf == NULL) { fprintf(stderr, "failed to rte_malloc buffer!\n"); return 1; } memset(buf, 0xff, io_size); //io_num = 0; //io_depth = 0; io_size_blocks = io_size / u2_ns_sector; offset_in_ios = -1; size_in_ios = u2_ns_size / io_size; tsc_rate = rte_get_tsc_hz(); tsc_elapsed = 0; tsc_start = rte_get_timer_cycles(); //tsc_end = rte_get_timer_cycles() + time_in_sec * tsc_rate; for (i = 0; i < io_num; i++) { //while (1) { if (is_random) { offset_in_ios = rand_r(&seed) % size_in_ios; } else { if (++offset_in_ios >= size_in_ios) { offset_in_ios = 0; } } if (is_rw) { rc = spdk_nvme_ns_cmd_read (u2_ns, u2_qpair, buf, offset_in_ios * io_size_blocks, io_size_blocks, u2_io_complete, NULL, 0); } else { rc = spdk_nvme_ns_cmd_write(u2_ns, u2_qpair, buf, offset_in_ios * io_size_blocks, io_size_blocks, u2_io_complete, NULL, 0); } if (rc) { fprintf(stderr, "failed to submit request %d!\n", i); //fprintf(stderr, "failed to submit request %d!\n", io_num); return rc; } io_depth++; // for latency benchmarking, queue depth stays at 1. while (io_depth > 0) { spdk_nvme_qpair_process_completions(u2_qpair, 0); } //if (rte_get_timer_cycles() > tsc_end) { // break; //} } tsc_elapsed = rte_get_timer_cycles() - tsc_start; printf("\t\t%9.1f us", (float) (tsc_elapsed * 1000000) / (io_num * tsc_rate)); printf("\t\t%10.1f s", (float) tsc_elapsed / tsc_rate); printf("\n"); //printf("\t\t%9.1f us", (float) (time_in_sec * 1000000) / io_num); //printf("\t\t%12"PRIu64"\n", io_num); rte_free(buf); return 0; }