int spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_null(cb_fn, cb_arg); if (req == NULL) { nvme_mutex_unlock(&ctrlr->ctrlr_lock); return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_SET_FEATURES; cmd->cdw10 = feature; cmd->cdw11 = cdw11; cmd->cdw12 = cdw12; nvme_ctrlr_submit_admin_request(ctrlr, req); nvme_mutex_unlock(&ctrlr->ctrlr_lock); return 0; }
int nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_list), cb_fn, cb_arg); if (req == NULL) { nvme_mutex_unlock(&ctrlr->ctrlr_lock); return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT; cmd->nsid = nsid; cmd->cdw10 = SPDK_NVME_NS_CTRLR_DETACH; nvme_ctrlr_submit_admin_request(ctrlr, req); nvme_mutex_unlock(&ctrlr->ctrlr_lock); return 0; }
static int nvme_allocate_ioq_index(void) { struct nvme_driver *driver = &g_nvme_driver; uint32_t i; nvme_mutex_lock(&driver->lock); if (driver->ioq_index_pool == NULL) { driver->ioq_index_pool = calloc(driver->max_io_queues, sizeof(*driver->ioq_index_pool)); if (driver->ioq_index_pool) { for (i = 0; i < driver->max_io_queues; i++) { driver->ioq_index_pool[i] = i; } } else { nvme_mutex_unlock(&driver->lock); return -1; } driver->ioq_index_pool_next = 0; } if (driver->ioq_index_pool_next < driver->max_io_queues) { nvme_thread_ioq_index = driver->ioq_index_pool[driver->ioq_index_pool_next]; driver->ioq_index_pool[driver->ioq_index_pool_next] = -1; driver->ioq_index_pool_next++; } else { nvme_thread_ioq_index = -1; } nvme_mutex_unlock(&driver->lock); return 0; }
static void nvme_free_ioq_index(void) { struct nvme_driver *driver = &g_nvme_driver; nvme_mutex_lock(&driver->lock); if (nvme_thread_ioq_index >= 0) { driver->ioq_index_pool_next--; driver->ioq_index_pool[driver->ioq_index_pool_next] = nvme_thread_ioq_index; nvme_thread_ioq_index = -1; } nvme_mutex_unlock(&driver->lock); }
int spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg); if (req == NULL) { nvme_mutex_unlock(&ctrlr->ctrlr_lock); return ENOMEM; } memcpy(&req->cmd, cmd, sizeof(req->cmd)); nvme_ctrlr_submit_admin_request(ctrlr, req); nvme_mutex_unlock(&ctrlr->ctrlr_lock); return 0; }
int nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_null(cb_fn, cb_arg); if (req == NULL) { nvme_mutex_unlock(&ctrlr->ctrlr_lock); return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT; cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_DELETE; cmd->nsid = nsid; nvme_ctrlr_submit_admin_request(ctrlr, req); nvme_mutex_unlock(&ctrlr->ctrlr_lock); return 0; }
int nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { struct nvme_request *req; struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ns_data), cb_fn, cb_arg); if (req == NULL) { nvme_mutex_unlock(&ctrlr->ctrlr_lock); return ENOMEM; } cmd = &req->cmd; cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT; cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_CREATE; nvme_ctrlr_submit_admin_request(ctrlr, req); nvme_mutex_unlock(&ctrlr->ctrlr_lock); return 0; }