void split_test(void) { struct nvme_namespace ns; struct nvme_controller ctrlr; void *payload; uint64_t lba, cmd_lba; uint32_t lba_count, cmd_lba_count; int rc; prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); payload = malloc(512); lba = 0; lba_count = 1; rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL); CU_ASSERT(rc == 0); CU_ASSERT_FATAL(g_request != NULL); CU_ASSERT(g_request->num_children == 0); nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT(cmd_lba == lba); CU_ASSERT(cmd_lba_count == lba_count); free(payload); nvme_free_request(g_request); }
static void submit_single_io(struct ns_entry *entry) { struct perf_task *task = NULL; uint64_t offset_in_ios; int rc; rte_mempool_get(task_pool, (void **)&task); task->entry = entry; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = entry->offset_in_ios++; if (entry->offset_in_ios == entry->size_in_ios) { entry->offset_in_ios = 0; } } if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { rc = nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } else { rc = nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } entry->current_queue_depth++; }
static void submit_single_io(struct ns_worker_ctx *ns_ctx) { struct perf_task *task = NULL; uint64_t offset_in_ios; int rc; struct ns_entry *entry = ns_ctx->entry; if (rte_mempool_get(task_pool, (void **)&task) != 0) { fprintf(stderr, "task_pool rte_mempool_get failed\n"); exit(1); } task->ns_ctx = ns_ctx; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = ns_ctx->offset_in_ios++; if (ns_ctx->offset_in_ios == entry->size_in_ios) { ns_ctx->offset_in_ios = 0; } } if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(ns_ctx->ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD, task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, task); } else #endif { rc = nvme_ns_cmd_read(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } } else { #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(ns_ctx->ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE, task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, task); } else #endif { rc = nvme_ns_cmd_write(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } ns_ctx->current_queue_depth++; }
void split_test3(void) { struct nvme_namespace ns; struct nvme_controller ctrlr; struct nvme_request *child; void *payload; uint64_t lba, cmd_lba; uint32_t lba_count, cmd_lba_count; int rc; /* * Controller has max xfer of 128 KB (256 blocks). * Submit an I/O of 256 KB starting at LBA 10, which should be split * into two I/Os: * 1) LBA = 10, count = 256 blocks * 2) LBA = 266, count = 256 blocks */ prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); payload = malloc(256 * 1024); lba = 10; /* Start at an LBA that isn't aligned to the stripe size */ lba_count = (256 * 1024) / 512; rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL); CU_ASSERT(rc == 0); CU_ASSERT_FATAL(g_request != NULL); CU_ASSERT_FATAL(g_request->num_children == 2); child = TAILQ_FIRST(&g_request->children); TAILQ_REMOVE(&g_request->children, child, child_tailq); nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT(child->num_children == 0); CU_ASSERT(child->payload_size == 128 * 1024); CU_ASSERT(cmd_lba == 10); CU_ASSERT(cmd_lba_count == 256); child = TAILQ_FIRST(&g_request->children); TAILQ_REMOVE(&g_request->children, child, child_tailq); nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT(child->num_children == 0); CU_ASSERT(child->payload_size == 128 * 1024); CU_ASSERT(cmd_lba == 266); CU_ASSERT(cmd_lba_count == 256); CU_ASSERT(TAILQ_EMPTY(&g_request->children)); free(payload); nvme_free_request(g_request); }
void split_test2(void) { struct nvme_namespace ns; struct nvme_controller ctrlr; struct nvme_request *child; void *payload; uint64_t lba, cmd_lba; uint32_t lba_count, cmd_lba_count; int rc; /* * Controller has max xfer of 128 KB (256 blocks). * Submit an I/O of 256 KB starting at LBA 0, which should be split * on the max I/O boundary into two I/Os of 128 KB. */ prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); payload = malloc(256 * 1024); lba = 0; lba_count = (256 * 1024) / 512; rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL); CU_ASSERT(rc == 0); CU_ASSERT_FATAL(g_request != NULL); CU_ASSERT(g_request->num_children == 2); child = TAILQ_FIRST(&g_request->children); TAILQ_REMOVE(&g_request->children, child, child_tailq); nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT(child->num_children == 0); CU_ASSERT(child->payload_size == 128 * 1024); CU_ASSERT(cmd_lba == 0); CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */ child = TAILQ_FIRST(&g_request->children); TAILQ_REMOVE(&g_request->children, child, child_tailq); nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT(child->num_children == 0); CU_ASSERT(child->payload_size == 128 * 1024); CU_ASSERT(cmd_lba == 256); CU_ASSERT(cmd_lba_count == 256); CU_ASSERT(TAILQ_EMPTY(&g_request->children)); free(payload); nvme_free_request(g_request); }
static void nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl) { struct nvme_io_test_thread *tth = arg; struct timeval t; tth->io_completed++; if (nvme_completion_is_error(cpl)) { printf("%s: error occurred\n", __func__); wakeup_one(tth); return; } getmicrouptime(&t); timevalsub(&t, &tth->start); if (t.tv_sec >= tth->time) { wakeup_one(tth); return; } switch (tth->opc) { case NVME_OPC_WRITE: nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048, tth->size/nvme_ns_get_sector_size(tth->ns), nvme_ns_io_test_cb, tth); break; case NVME_OPC_READ: nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048, tth->size/nvme_ns_get_sector_size(tth->ns), nvme_ns_io_test_cb, tth); break; default: break; } }
void split_test4(void) { struct nvme_namespace ns; struct nvme_controller ctrlr; struct nvme_request *child; void *payload; uint64_t lba, cmd_lba; uint32_t lba_count, cmd_lba_count; int rc; /* * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB. * (Same as split_test3 except with driver-assisted striping enabled.) * Submit an I/O of 256 KB starting at LBA 10, which should be split * into three I/Os: * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size) * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size) * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size) */ prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 128 * 1024); payload = malloc(256 * 1024); lba = 10; /* Start at an LBA that isn't aligned to the stripe size */ lba_count = (256 * 1024) / 512; rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL); CU_ASSERT(rc == 0); CU_ASSERT_FATAL(g_request != NULL); CU_ASSERT_FATAL(g_request->num_children == 3); child = TAILQ_FIRST(&g_request->children); TAILQ_REMOVE(&g_request->children, child, child_tailq); nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT(child->num_children == 0); CU_ASSERT(child->payload_size == (256 - 10) * 512); CU_ASSERT(cmd_lba == 10); CU_ASSERT(cmd_lba_count == 256 - 10); child = TAILQ_FIRST(&g_request->children); TAILQ_REMOVE(&g_request->children, child, child_tailq); nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT(child->num_children == 0); CU_ASSERT(child->payload_size == 128 * 1024); CU_ASSERT(cmd_lba == 256); CU_ASSERT(cmd_lba_count == 256); child = TAILQ_FIRST(&g_request->children); TAILQ_REMOVE(&g_request->children, child, child_tailq); nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT(child->num_children == 0); CU_ASSERT(child->payload_size == 10 * 512); CU_ASSERT(cmd_lba == 512); CU_ASSERT(cmd_lba_count == 10); CU_ASSERT(TAILQ_EMPTY(&g_request->children)); free(payload); nvme_free_request(g_request); }