static int work_fn(void *arg) { uint64_t tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate; struct worker_thread *worker = (struct worker_thread *)arg; struct ns_worker_ctx *ns_ctx = NULL; printf("Starting thread on core %u\n", worker->lcore); if (nvme_register_io_thread() != 0) { fprintf(stderr, "nvme_register_io_thread() failed on core %u\n", worker->lcore); return -1; } /* Submit initial I/O for each namespace. */ ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { submit_io(ns_ctx, g_queue_depth); ns_ctx = ns_ctx->next; } while (1) { /* * Check for completed I/O for each controller. A new * I/O will be submitted in the io_complete callback * to replace each I/O that is completed. */ ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { check_io(ns_ctx); ns_ctx = ns_ctx->next; } if (rte_get_timer_cycles() > tsc_end) { break; } } ns_ctx = worker->ns_ctx; while (ns_ctx != NULL) { drain_io(ns_ctx); ns_ctx = ns_ctx->next; } nvme_unregister_io_thread(); return 0; }
static int work_fn(void *arg) { uint64_t tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate; struct worker_thread *worker = (struct worker_thread *)arg; struct ns_entry *entry = NULL; printf("Starting thread on core %u\n", worker->lcore); nvme_register_io_thread(); /* Submit initial I/O for each namespace. */ entry = worker->namespaces; while (entry != NULL) { submit_io(entry, g_queue_depth); entry = entry->next; } while (1) { /* * Check for completed I/O for each controller. A new * I/O will be submitted in the io_complete callback * to replace each I/O that is completed. */ entry = worker->namespaces; while (entry != NULL) { check_io(entry); entry = entry->next; } if (rte_get_timer_cycles() > tsc_end) { break; } } entry = worker->namespaces; while (entry != NULL) { drain_io(entry); entry = entry->next; } nvme_unregister_io_thread(); return 0; }
int main(int argc, char **argv) { struct pci_device_iterator *pci_dev_iter; struct pci_device *pci_dev; struct dev *iter; struct pci_id_match match; int rc, i; rc = rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]), (char **)(void *)(uintptr_t)ealargs); if (rc < 0) { fprintf(stderr, "could not initialize dpdk\n"); exit(1); } request_mempool = rte_mempool_create("nvme_request", 8192, nvme_request_size(), 128, 0, NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); if (request_mempool == NULL) { fprintf(stderr, "could not initialize request mempool\n"); exit(1); } pci_system_init(); match.vendor_id = PCI_MATCH_ANY; match.subvendor_id = PCI_MATCH_ANY; match.subdevice_id = PCI_MATCH_ANY; match.device_id = PCI_MATCH_ANY; match.device_class = NVME_CLASS_CODE; match.device_class_mask = 0xFFFFFF; pci_dev_iter = pci_id_match_iterator_create(&match); rc = 0; while ((pci_dev = pci_device_next(pci_dev_iter))) { struct nvme_controller *ctrlr; struct dev *dev; if (pci_device_has_non_uio_driver(pci_dev)) { fprintf(stderr, "non-uio kernel driver attached to nvme\n"); fprintf(stderr, " controller at pci bdf %d:%d:%d\n", pci_dev->bus, pci_dev->dev, pci_dev->func); fprintf(stderr, " skipping...\n"); continue; } pci_device_probe(pci_dev); ctrlr = nvme_attach(pci_dev); if (ctrlr == NULL) { fprintf(stderr, "failed to attach to NVMe controller at PCI BDF %d:%d:%d\n", pci_dev->bus, pci_dev->dev, pci_dev->func); rc = 1; continue; } /* add to dev list */ dev = &devs[num_devs++]; dev->pci_dev = pci_dev; dev->ctrlr = ctrlr; } pci_iterator_destroy(pci_dev_iter); if (num_devs) { rc = nvme_register_io_thread(); if (rc != 0) return rc; } foreach_dev(iter) { reserve_controller(iter->ctrlr, iter->pci_dev); } printf("Cleaning up...\n"); for (i = 0; i < num_devs; i++) { struct dev *dev = &devs[i]; nvme_detach(dev->ctrlr); } if (num_devs) nvme_unregister_io_thread(); return rc; }
// Master's main process. // Each time we take care of the pending // queue first, then we focus on the new cmd static void master_fn(void){ nvme_register_io_thread(); // Init pending task master_pending.head = NULL; master_pending.tail = NULL; master_pending.cnt = 0; // Init task buffer resource for (int i = 0; i < QUEUE_NUM; i++){ tasks[i] = rte_malloc(NULL, sizeof(struct perf_task), 0x200); tasks[i]->buf = rte_malloc(NULL, (f_maxsize+1000)*512, 0x200); for (int j = 0; j < ISSUE_BUF_SIZE; j++){ issue_buf[i].issue_queue[j].io_completed = 1; issue_buf[i].issue_queue[j].qid = i; } } //Begin timing. uint64_t tsc_start = rte_get_timer_cycles(); uint64_t pos = 0; while (pos < f_len){ //printf("%lu\n", pos); clear_issue(-1); clear_pending(); int target = scheduler(pos); if (target >= 0) master_issue(pos, target); if (target != -2){ pos += 1; if ((pos % 100000) == 0) printf("Master has (allocated && (issued || pending)) %lu commands\n", pos); } } printf("Master has issued all of the I/O commands\n"); // Clear all the pending instruction while (master_pending.cnt != 0) { clear_issue(-1); clear_pending(); } // Check out all the issued commands int flag = 1; while (flag){ flag = 0; clear_issue(-1); for (int i = 0; i < QUEUE_NUM; i++){ if (g_master->queue_depth[i]) flag = 1; } } //Stop timing. uint64_t tsc_end = rte_get_timer_cycles(); //Get the total time. double sec = (tsc_end - tsc_start) / (double)g_tsc_rate; printf("Stat of pending count: %lu\n", pending_count); //Output the result infomation. printf("Time: %lf seconds\n", sec); printf("Throughput: %lf MB/S\n", (double)f_totalblocks/2048/sec); printf("IOPS: %lf /S\n", (double)f_len/sec); for (int i = 0; i < QUEUE_NUM; i++){ rte_free(tasks[i]->buf); rte_free(tasks[i]); } nvme_unregister_io_thread(); }