static void submit_io(struct ns_worker_ctx *ns_ctx, int queue_depth) { while (queue_depth-- > 0) { submit_single_io(ns_ctx); } }
static void task_complete(struct reset_task *task, const struct spdk_nvme_cpl *completion) { struct ns_worker_ctx *ns_ctx; ns_ctx = task->ns_ctx; ns_ctx->current_queue_depth--; if (spdk_nvme_cpl_is_error(completion)) { ns_ctx->io_completed_error++; } else { ns_ctx->io_completed++; } rte_mempool_put(task_pool, task); /* * is_draining indicates when time has expired for the test run * and we are just waiting for the previously submitted I/O * to complete. In this case, do not submit a new I/O to replace * the one just completed. */ if (!ns_ctx->is_draining) { submit_single_io(ns_ctx); } }
static void task_complete(struct perf_task *task) { struct ns_worker_ctx *ns_ctx; uint64_t tsc_diff; ns_ctx = task->ns_ctx; ns_ctx->current_queue_depth--; ns_ctx->io_completed++; tsc_diff = rte_get_timer_cycles() - task->submit_tsc; ns_ctx->total_tsc += tsc_diff; if (ns_ctx->min_tsc > tsc_diff) { ns_ctx->min_tsc = tsc_diff; } if (ns_ctx->max_tsc < tsc_diff) { ns_ctx->max_tsc = tsc_diff; } rte_mempool_put(task_pool, task); /* * is_draining indicates when time has expired for the test run * and we are just waiting for the previously submitted I/O * to complete. In this case, do not submit a new I/O to replace * the one just completed. */ if (!ns_ctx->is_draining) { submit_single_io(ns_ctx); } }
static void submit_io(struct ns_entry *entry, int queue_depth) { while (queue_depth-- > 0) { submit_single_io(entry); } }
static void check_io(void) { uint64_t end, tsc_complete; spdk_mb(); #if HAVE_LIBAIO if (g_ns->type == ENTRY_TYPE_AIO_FILE) { aio_check_io(); } else #endif { spdk_nvme_qpair_process_completions(g_ns->u.nvme.qpair, 0); } spdk_mb(); end = spdk_get_ticks(); if (g_ns->current_queue_depth == 1) { /* * Account for race condition in AIO case where interrupt occurs * after checking for queue depth. If the timestamp capture * is too big compared to the last capture, assume that an * interrupt fired, and do not bump the start tsc forward. This * will ensure this extra time is accounted for next time through * when we see current_queue_depth drop to 0. */ if (g_ns->type == ENTRY_TYPE_NVME_NS || (end - g_complete_tsc_start) < 500) { g_complete_tsc_start = end; } } else { tsc_complete = end - g_complete_tsc_start; g_tsc_complete += tsc_complete; if (tsc_complete < g_tsc_complete_min) { g_tsc_complete_min = tsc_complete; } if (tsc_complete > g_tsc_complete_max) { g_tsc_complete_max = tsc_complete; } g_io_completed++; if (!g_ns->is_draining) { submit_single_io(); } g_complete_tsc_start = spdk_get_ticks(); } }
static void task_complete(struct perf_task *task) { struct ns_worker_ctx *ns_ctx; ns_ctx = task->ns_ctx; ns_ctx->current_queue_depth--; ns_ctx->io_completed++; rte_mempool_put(task_pool, task); /* * is_draining indicates when time has expired for the test run * and we are just waiting for the previously submitted I/O * to complete. In this case, do not submit a new I/O to replace * the one just completed. */ if (!ns_ctx->is_draining) { submit_single_io(ns_ctx); } }
static int work_fn(void) { uint64_t tsc_end; printf("Starting work_fn on core %u\n", rte_lcore_id()); /* Allocate a queue pair for each namespace. */ if (init_ns_worker_ctx() != 0) { printf("ERROR: init_ns_worker_ctx() failed\n"); return 1; } tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate; /* Submit initial I/O for each namespace. */ submit_single_io(); g_complete_tsc_start = spdk_get_ticks(); while (1) { /* * Check for completed I/O for each controller. A new * I/O will be submitted in the io_complete callback * to replace each I/O that is completed. */ check_io(); if (spdk_get_ticks() > tsc_end) { break; } } drain_io(); cleanup_ns_worker_ctx(); return 0; }
static void io_complete(void *ctx, const struct nvme_completion *completion) { struct perf_task *task; struct ns_entry *entry; task = (struct perf_task *)ctx; entry = task->entry; entry->current_queue_depth--; entry->io_completed++; rte_mempool_put(task_pool, task); /* * is_draining indicates when time has expired for the test run * and we are just waiting for the previously submitted I/O * to complete. In this case, do not submit a new I/O to replace * the one just completed. */ if (!entry->is_draining) { submit_single_io(entry); } }