static void cleanup_ns_worker_ctx(void) { if (g_ns->type == ENTRY_TYPE_AIO_FILE) { #ifdef HAVE_LIBAIO io_destroy(g_ns->u.aio.ctx); free(g_ns->u.aio.events); #endif } else { spdk_nvme_ctrlr_free_io_qpair(g_ns->u.nvme.qpair); } }
static void u2_cleanup(void) { if (u2_qpair) { spdk_nvme_ctrlr_free_io_qpair(u2_qpair); } if (u2_ctrlr) { spdk_nvme_detach(u2_ctrlr); } if (core_mask) { free(ealargs[1]); } if (mem_chn >= 2 && mem_chn <= 4) { free(ealargs[2]); } }
static void hello_world(void) { struct ns_entry *ns_entry; struct hello_world_sequence sequence; int rc; ns_entry = g_namespaces; while (ns_entry != NULL) { /* * Allocate an I/O qpair that we can use to submit read/write requests * to namespaces on the controller. NVMe controllers typically support * many qpairs per controller. Any I/O qpair allocated for a controller * can submit I/O to any namespace on that controller. * * The SPDK NVMe driver provides no synchronization for qpair accesses - * the application must ensure only a single thread submits I/O to a * qpair, and that same thread must also check for completions on that * qpair. This enables extremely efficient I/O processing by making all * I/O operations completely lockless. */ ns_entry->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, 0); if (ns_entry->qpair == NULL) { printf("ERROR: init_ns_worker_ctx() failed\n"); return; } /* * Use DPDK rte_zmalloc to allocate a 4KB zeroed buffer. This memory * will be allocated from 2MB hugepages and will be pinned. These are * both requirements for data buffers used for SPDK NVMe I/O operations. */ sequence.buf = rte_zmalloc(NULL, 0x1000, 0x1000); sequence.is_completed = 0; sequence.ns_entry = ns_entry; /* * Print "Hello world!" to sequence.buf. We will write this data to LBA * 0 on the namespace, and then later read it back into a separate buffer * to demonstrate the full I/O path. */ sprintf(sequence.buf, "Hello world!\n"); /* * Write the data buffer to LBA 0 of this namespace. "write_complete" and * "&sequence" are specified as the completion callback function and * argument respectively. write_complete() will be called with the * value of &sequence as a parameter when the write I/O is completed. * This allows users to potentially specify different completion * callback routines for each I/O, as well as pass a unique handle * as an argument so the application knows which I/O has completed. * * Note that the SPDK NVMe driver will only check for completions * when the application calls spdk_nvme_qpair_process_completions(). * It is the responsibility of the application to trigger the polling * process. */ rc = spdk_nvme_ns_cmd_write(ns_entry->ns, ns_entry->qpair, sequence.buf, 0, /* LBA start */ 1, /* number of LBAs */ write_complete, &sequence, 0); if (rc != 0) { fprintf(stderr, "starting write I/O failed\n"); exit(1); } /* * Poll for completions. 0 here means process all available completions. * In certain usage models, the caller may specify a positive integer * instead of 0 to signify the maximum number of completions it should * process. This function will never block - if there are no * completions pending on the specified qpair, it will return immediately. * * When the write I/O completes, write_complete() will submit a new I/O * to read LBA 0 into a separate buffer, specifying read_complete() as its * completion routine. When the read I/O completes, read_complete() will * print the buffer contents and set sequence.is_completed = 1. That will * break this loop and then exit the program. */ while (!sequence.is_completed) { spdk_nvme_qpair_process_completions(ns_entry->qpair, 0); } /* * Free the I/O qpair. This typically is done when an application exits. * But SPDK does support freeing and then reallocating qpairs during * operation. It is the responsibility of the caller to ensure all * pending I/O are completed before trying to free the qpair. */ spdk_nvme_ctrlr_free_io_qpair(ns_entry->qpair); ns_entry = ns_entry->next; } }
static int write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name) { int rc = 0; uint32_t lba_count; uint32_t io_flags = 0; struct io_request *req; struct spdk_nvme_ns *ns; struct spdk_nvme_qpair *qpair; const struct spdk_nvme_ns_data *nsdata; ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1); if (!ns) { fprintf(stderr, "Null namespace\n"); return 0; } if (!(spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED)) return 0; nsdata = spdk_nvme_ns_get_data(ns); if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) { fprintf(stderr, "Empty nsdata or wrong sector size\n"); return 0; } req = rte_zmalloc(NULL, sizeof(*req), 0); if (!req) { fprintf(stderr, "Allocate request failed\n"); return 0; } /* IO parameters setting */ lba_count = build_io_fn(ns, req, &io_flags); if (!lba_count) { fprintf(stderr, "%s: %s bypass the test case\n", dev->name, test_name); free_req(req); return 0; } qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, 0); if (!qpair) { free_req(req); return -1; } ns_data_buffer_reset(ns, req, DATA_PATTERN); if (req->use_extended_lba) rc = spdk_nvme_ns_cmd_write(ns, qpair, req->contig, req->lba, lba_count, io_complete, req, io_flags); else rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count, io_complete, req, io_flags, req->apptag_mask, req->apptag); if (rc != 0) { fprintf(stderr, "%s: %s write submit failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } io_complete_flag = 0; while (!io_complete_flag) spdk_nvme_qpair_process_completions(qpair, 1); if (io_complete_flag != 1) { fprintf(stderr, "%s: %s write exec failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } /* reset completion flag */ io_complete_flag = 0; ns_data_buffer_reset(ns, req, 0); if (req->use_extended_lba) rc = spdk_nvme_ns_cmd_read(ns, qpair, req->contig, req->lba, lba_count, io_complete, req, io_flags); else rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count, io_complete, req, io_flags, req->apptag_mask, req->apptag); if (rc != 0) { fprintf(stderr, "%s: %s read failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } while (!io_complete_flag) spdk_nvme_qpair_process_completions(qpair, 1); if (io_complete_flag != 1) { fprintf(stderr, "%s: %s read failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } rc = ns_data_buffer_compare(ns, req, DATA_PATTERN); if (rc < 0) { fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return -1; } fprintf(stdout, "%s: %s test passed\n", dev->name, test_name); spdk_nvme_ctrlr_free_io_qpair(qpair); free_req(req); return rc; }