static int work_fn(void *arg) { uint64_t tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate; struct worker_thread *worker = (struct worker_thread *)arg; struct ns_worker_ctx *ns_ctx = NULL; printf("Starting thread on core %u\n", worker->lcore); if (spdk_nvme_register_io_thread() != 0) { fprintf(stderr, "spdk_nvme_register_io_thread() failed on core %u\n", worker->lcore); return -1; } /* Submit initial I/O for each namespace. */ ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { submit_io(ns_ctx, g_queue_depth); ns_ctx = ns_ctx->next; } while (1) { /* * Check for completed I/O for each controller. A new * I/O will be submitted in the io_complete callback * to replace each I/O that is completed. */ ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { check_io(ns_ctx); ns_ctx = ns_ctx->next; } if (((tsc_end - rte_get_timer_cycles()) / g_tsc_rate) > (uint64_t)g_time_in_sec / 5 && ((tsc_end - rte_get_timer_cycles()) / g_tsc_rate) < (uint64_t)(g_time_in_sec / 5 + 10)) { ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { if (spdk_nvme_ctrlr_reset(ns_ctx->ctr_entry->ctrlr) < 0) { fprintf(stderr, "nvme reset failed.\n"); return -1; } ns_ctx = ns_ctx->next; } } if (rte_get_timer_cycles() > tsc_end) { break; } } ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { drain_io(ns_ctx); ns_ctx = ns_ctx->next; } spdk_nvme_unregister_io_thread(); return 0; }
static int work_fn(void *arg) { uint64_t tsc_end; struct worker_thread *worker = (struct worker_thread *)arg; struct ns_worker_ctx *ns_ctx = NULL; printf("Starting thread on core %u\n", worker->lcore); /* Allocate a queue pair for each namespace. */ ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { if (init_ns_worker_ctx(ns_ctx) != 0) { printf("ERROR: init_ns_worker_ctx() failed\n"); return 1; } ns_ctx = ns_ctx->next; } tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate; /* Submit initial I/O for each namespace. */ ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { submit_io(ns_ctx, g_queue_depth); ns_ctx = ns_ctx->next; } while (1) { /* * Check for completed I/O for each controller. A new * I/O will be submitted in the io_complete callback * to replace each I/O that is completed. */ ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { check_io(ns_ctx); ns_ctx = ns_ctx->next; } if (rte_get_timer_cycles() > tsc_end) { break; } } ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { drain_io(ns_ctx); cleanup_ns_worker_ctx(ns_ctx); ns_ctx = ns_ctx->next; } return 0; }
static int work_fn(void *arg) { uint64_t tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate; struct worker_thread *worker = (struct worker_thread *)arg; struct ns_entry *entry = NULL; printf("Starting thread on core %u\n", worker->lcore); nvme_register_io_thread(); /* Submit initial I/O for each namespace. */ entry = worker->namespaces; while (entry != NULL) { submit_io(entry, g_queue_depth); entry = entry->next; } while (1) { /* * Check for completed I/O for each controller. A new * I/O will be submitted in the io_complete callback * to replace each I/O that is completed. */ entry = worker->namespaces; while (entry != NULL) { check_io(entry); entry = entry->next; } if (rte_get_timer_cycles() > tsc_end) { break; } } entry = worker->namespaces; while (entry != NULL) { drain_io(entry); entry = entry->next; } nvme_unregister_io_thread(); return 0; }