static void submit_single_io(struct ns_entry *entry) { struct perf_task *task = NULL; uint64_t offset_in_ios; int rc; rte_mempool_get(task_pool, (void **)&task); task->entry = entry; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = entry->offset_in_ios++; if (entry->offset_in_ios == entry->size_in_ios) { entry->offset_in_ios = 0; } } if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { rc = nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } else { rc = nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } entry->current_queue_depth++; }
static void submit_single_io(struct ns_worker_ctx *ns_ctx) { struct perf_task *task = NULL; uint64_t offset_in_ios; int rc; struct ns_entry *entry = ns_ctx->entry; if (rte_mempool_get(task_pool, (void **)&task) != 0) { fprintf(stderr, "task_pool rte_mempool_get failed\n"); exit(1); } task->ns_ctx = ns_ctx; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = ns_ctx->offset_in_ios++; if (ns_ctx->offset_in_ios == entry->size_in_ios) { ns_ctx->offset_in_ios = 0; } } if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(ns_ctx->ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD, task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, task); } else #endif { rc = nvme_ns_cmd_read(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } } else { #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(ns_ctx->ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE, task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, task); } else #endif { rc = nvme_ns_cmd_write(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } ns_ctx->current_queue_depth++; }
static void nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl) { struct nvme_io_test_thread *tth = arg; struct timeval t; tth->io_completed++; if (nvme_completion_is_error(cpl)) { printf("%s: error occurred\n", __func__); wakeup_one(tth); return; } getmicrouptime(&t); timevalsub(&t, &tth->start); if (t.tv_sec >= tth->time) { wakeup_one(tth); return; } switch (tth->opc) { case NVME_OPC_WRITE: nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048, tth->size/nvme_ns_get_sector_size(tth->ns), nvme_ns_io_test_cb, tth); break; case NVME_OPC_READ: nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048, tth->size/nvme_ns_get_sector_size(tth->ns), nvme_ns_io_test_cb, tth); break; default: break; } }