static void test_ctrlr_failed(void) { struct nvme_qpair qpair = {}; struct nvme_request *req; struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_registers regs = {}; char payload[4096]; prepare_submit_request_test(&qpair, &ctrlr, ®s); req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL); SPDK_CU_ASSERT_FATAL(req != NULL); /* Disable the queue and set the controller to failed. * Set the controller to resetting so that the qpair won't get re-enabled. */ qpair.is_enabled = false; ctrlr.is_failed = true; ctrlr.is_resetting = true; outbuf[0] = '\0'; CU_ASSERT(qpair.sq_tail == 0); nvme_qpair_submit_request(&qpair, req); CU_ASSERT(qpair.sq_tail == 0); /* Assert that command/completion data was printed to log. */ CU_ASSERT(strlen(outbuf) > 0); cleanup_submit_request_test(&qpair); }
int spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page, uint32_t nsid, void *payload, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_contig(payload, payload_size, cb_fn, cb_arg); if (req == NULL) { nvme_mutex_unlock(&ctrlr->ctrlr_lock); return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE; cmd->nsid = nsid; cmd->cdw10 = ((payload_size / sizeof(uint32_t)) - 1) << 16; cmd->cdw10 |= log_page; nvme_ctrlr_submit_admin_request(ctrlr, req); nvme_mutex_unlock(&ctrlr->ctrlr_lock); return 0; }
int spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns, struct spdk_nvme_reservation_acquire_data *payload, bool ignore_key, enum spdk_nvme_reservation_acquire_action action, enum spdk_nvme_reservation_type type, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_reservation_acquire_data), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE; cmd->nsid = ns->id; /* Bits 0-2 */ cmd->cdw10 = action; /* Bit 3 */ cmd->cdw10 |= ignore_key ? 1 << 3 : 0; /* Bits 8-15 */ cmd->cdw10 |= (uint32_t)type << 8; nvme_ctrlr_submit_io_request(ns->ctrlr, req); return 0; }
static void test4(void) { struct nvme_qpair qpair = {}; struct nvme_request *req; struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_registers regs = {}; char payload[4096]; prepare_submit_request_test(&qpair, &ctrlr, ®s); req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL); SPDK_CU_ASSERT_FATAL(req != NULL); /* Force vtophys to return a failure. This should * result in the nvme_qpair manually failing * the request with error status to signify * a bad payload buffer. */ fail_vtophys = true; outbuf[0] = '\0'; CU_ASSERT(qpair.sq_tail == 0); nvme_qpair_submit_request(&qpair, req); CU_ASSERT(qpair.sq_tail == 0); /* Assert that command/completion data was printed to log. */ CU_ASSERT(strlen(outbuf) > 0); cleanup_submit_request_test(&qpair); }
int spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, void *payload, uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { uint32_t num_dwords; struct nvme_request *req; struct spdk_nvme_cmd *cmd; if (len % 4) return EINVAL; num_dwords = len / 4; req = nvme_allocate_request_contig(payload, len, cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT; cmd->nsid = ns->id; cmd->cdw10 = num_dwords; nvme_ctrlr_submit_io_request(ns->ctrlr, req); return 0; }
int spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, void *payload, uint16_t num_ranges, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) { return EINVAL; } req = nvme_allocate_request_contig(payload, num_ranges * sizeof(struct spdk_nvme_dsm_range), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; cmd->nsid = ns->id; /* TODO: create a delete command data structure */ cmd->cdw10 = num_ranges - 1; cmd->cdw11 = SPDK_NVME_DSM_ATTR_DEALLOCATE; nvme_ctrlr_submit_io_request(ns->ctrlr, req); return 0; }
int spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns, struct spdk_nvme_reservation_register_data *payload, bool ignore_key, enum spdk_nvme_reservation_register_action action, enum spdk_nvme_reservation_register_cptpl cptpl, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_reservation_register_data), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER; cmd->nsid = ns->id; /* Bits 0-2 */ cmd->cdw10 = action; /* Bit 3 */ cmd->cdw10 |= ignore_key ? 1 << 3 : 0; /* Bits 30-31 */ cmd->cdw10 |= (uint32_t)cptpl << 30; nvme_ctrlr_submit_io_request(ns->ctrlr, req); return 0; }
/* * Only memset up to (but not including) the children * TAILQ_ENTRY. children, and following members, are * only used as part of I/O splitting so we avoid * memsetting them until it is actually needed. * They will be initialized in nvme_request_add_child() * if the request is split. */ memset(req, 0, offsetof(struct nvme_request, children)); req->cb_fn = cb_fn; req->cb_arg = cb_arg; req->payload = *payload; req->payload_size = payload_size; req->pid = getpid(); return req; } struct nvme_request * nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_payload payload; payload.type = NVME_PAYLOAD_TYPE_CONTIG; payload.u.contig = buffer; return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg); } struct nvme_request * nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg) { return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg); } void nvme_free_request(struct nvme_request *req) { spdk_mempool_put(_g_nvme_driver.request_mempool, req); } void nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child) { parent->num_children--; TAILQ_REMOVE(&parent->children, child, child_tailq); } int nvme_transport_qpair_enable(struct spdk_nvme_qpair *qpair) { return 0; } int nvme_transport_qpair_disable(struct spdk_nvme_qpair *qpair) { return 0; } int nvme_transport_qpair_fail(struct spdk_nvme_qpair *qpair) { return 0; } int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) { // TODO return 0; } int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) { // TODO return 0; } static void prepare_submit_request_test(struct spdk_nvme_qpair *qpair, struct spdk_nvme_ctrlr *ctrlr) { memset(ctrlr, 0, sizeof(*ctrlr)); ctrlr->free_io_qids = NULL; TAILQ_INIT(&ctrlr->active_io_qpairs); TAILQ_INIT(&ctrlr->active_procs); nvme_qpair_construct(qpair, 1, 128, ctrlr, 0); ut_fail_vtophys = false; } static void cleanup_submit_request_test(struct spdk_nvme_qpair *qpair) { } #if 0 /* TODO: move to PCIe-specific unit test */ static void ut_insert_cq_entry(struct spdk_nvme_qpair *qpair, uint32_t slot) { struct nvme_request *req; struct nvme_tracker *tr; struct spdk_nvme_cpl *cpl; req = spdk_mempool_get(_g_nvme_driver.request_mempool); SPDK_CU_ASSERT_FATAL(req != NULL); memset(req, 0, sizeof(*req)); tr = TAILQ_FIRST(&qpair->free_tr); TAILQ_REMOVE(&qpair->free_tr, tr, tq_list); /* remove tr from free_tr */ TAILQ_INSERT_HEAD(&qpair->outstanding_tr, tr, tq_list); req->cmd.cid = tr->cid; tr->req = req; qpair->tr[tr->cid].active = true; cpl = &qpair->cpl[slot]; cpl->status.p = qpair->phase; cpl->cid = tr->cid; } #endif static void expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl) { CU_ASSERT(!spdk_nvme_cpl_is_error(cpl)); } static void expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl) { CU_ASSERT(spdk_nvme_cpl_is_error(cpl)); } static void test3(void) { struct spdk_nvme_qpair qpair = {}; struct nvme_request *req; struct spdk_nvme_ctrlr ctrlr = {}; prepare_submit_request_test(&qpair, &ctrlr); req = nvme_allocate_request_null(expected_success_callback, NULL); SPDK_CU_ASSERT_FATAL(req != NULL); CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0); nvme_free_request(req); cleanup_submit_request_test(&qpair); } #if 0 /* TODO: move to PCIe-specific unit test */ static void test4(void) { struct spdk_nvme_qpair qpair = {}; struct nvme_request *req; struct spdk_nvme_ctrlr ctrlr = {}; char payload[4096]; prepare_submit_request_test(&qpair, &ctrlr); req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL); SPDK_CU_ASSERT_FATAL(req != NULL); /* Force vtophys to return a failure. This should * result in the nvme_qpair manually failing * the request with error status to signify * a bad payload buffer. */ ut_fail_vtophys = true; CU_ASSERT(qpair.sq_tail == 0); CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0); CU_ASSERT(qpair.sq_tail == 0); cleanup_submit_request_test(&qpair); }
/** * Allocate a request as well as a physically contiguous buffer to copy to/from the user's buffer. * * This is intended for use in non-fast-path functions (admin commands, reservations, etc.) * where the overhead of a copy is not a problem. */ struct nvme_request * nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller) { struct nvme_request *req; void *contig_buffer = NULL; uint64_t phys_addr; if (buffer && payload_size) { contig_buffer = nvme_malloc(payload_size, 4096, &phys_addr); if (!contig_buffer) { return NULL; } if (host_to_controller) { memcpy(contig_buffer, buffer, payload_size); } } req = nvme_allocate_request_contig(contig_buffer, payload_size, nvme_user_copy_cmd_complete, NULL); if (!req) { nvme_free(buffer); return NULL; } req->user_cb_fn = cb_fn; req->user_cb_arg = cb_arg; req->user_buffer = buffer; req->cb_arg = req; return req; }
int nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr, void *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_data), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_IDENTIFY; /* * TODO: create an identify command data structure, which * includes this CNS bit in cdw10. */ cmd->cdw10 = 1; nvme_ctrlr_submit_admin_request(ctrlr, req); return 0; }
int nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_list), cb_fn, cb_arg); if (req == NULL) { nvme_mutex_unlock(&ctrlr->ctrlr_lock); return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT; cmd->nsid = nsid; cmd->cdw10 = SPDK_NVME_NS_CTRLR_DETACH; nvme_ctrlr_submit_admin_request(ctrlr, req); nvme_mutex_unlock(&ctrlr->ctrlr_lock); return 0; }
int nvme_ctrlr_cmd_identify_namespace(struct spdk_nvme_ctrlr *ctrlr, uint16_t nsid, void *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ns_data), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_IDENTIFY; /* * TODO: create an identify command data structure */ cmd->nsid = nsid; nvme_ctrlr_submit_admin_request(ctrlr, req); return 0; }
int spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } memcpy(&req->cmd, cmd, sizeof(req->cmd)); nvme_ctrlr_submit_io_request(ctrlr, req); return 0; }
static void test_ctrlr_failed(void) { struct spdk_nvme_qpair qpair = {}; struct nvme_request *req; struct spdk_nvme_ctrlr ctrlr = {}; char payload[4096]; prepare_submit_request_test(&qpair, &ctrlr); req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL); SPDK_CU_ASSERT_FATAL(req != NULL); /* Set the controller to failed. * Set the controller to resetting so that the qpair won't get re-enabled. */ ctrlr.is_failed = true; ctrlr.is_resetting = true; CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0); cleanup_submit_request_test(&qpair); }
int nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ns_data), cb_fn, cb_arg); if (req == NULL) { nvme_mutex_unlock(&ctrlr->ctrlr_lock); return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT; cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_CREATE; nvme_ctrlr_submit_admin_request(ctrlr, req); nvme_mutex_unlock(&ctrlr->ctrlr_lock); return 0; }
struct nvme_request * nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg) { return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg); }