static void reserve_controller(struct nvme_controller *ctrlr, struct pci_device *pci_dev) { const struct nvme_controller_data *cdata; cdata = nvme_ctrlr_get_data(ctrlr); printf("=====================================================\n"); printf("NVMe Controller at PCI bus %d, device %d, function %d\n", pci_dev->bus, pci_dev->dev, pci_dev->func); printf("=====================================================\n"); printf("Reservations: %s\n", cdata->oncs.reservations ? "Supported" : "Not Supported"); if (!cdata->oncs.reservations) return; set_host_identifier(ctrlr); get_host_identifier(ctrlr); /* tested 1 namespace */ reservation_ns_register(ctrlr, 1); reservation_ns_acquire(ctrlr, 1); reservation_ns_report(ctrlr, 1); reservation_ns_release(ctrlr, 1); }
static void register_ns(struct nvme_controller *ctrlr, struct pci_device *pci_dev, struct nvme_namespace *ns) { struct ns_entry *entry; const struct nvme_controller_data *cdata; entry = malloc(sizeof(struct ns_entry)); if (entry == NULL) { perror("ns_entry malloc"); exit(1); } cdata = nvme_ctrlr_get_data(ctrlr); entry->type = ENTRY_TYPE_NVME_NS; entry->u.nvme.ctrlr = ctrlr; entry->u.nvme.ns = ns; entry->size_in_ios = nvme_ns_get_size(ns) / g_io_size_bytes; entry->io_size_blocks = g_io_size_bytes / nvme_ns_get_sector_size(ns); snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn); g_num_namespaces++; entry->next = g_namespaces; g_namespaces = entry; }
static void register_ns(struct nvme_controller *ctrlr, struct pci_device *pci_dev, struct nvme_namespace *ns) { struct worker_thread *worker; struct ns_entry *entry = malloc(sizeof(struct ns_entry)); const struct nvme_controller_data *cdata = nvme_ctrlr_get_data(ctrlr); worker = g_current_worker; entry->ctrlr = ctrlr; entry->ns = ns; entry->next = worker->namespaces; entry->io_completed = 0; entry->current_queue_depth = 0; entry->offset_in_ios = 0; entry->size_in_ios = nvme_ns_get_size(ns) / g_io_size_bytes; entry->io_size_blocks = g_io_size_bytes / nvme_ns_get_sector_size(ns); entry->is_draining = false; snprintf(entry->name, sizeof(cdata->mn), "%s", cdata->mn); printf("Assigning namespace %s to lcore %u\n", entry->name, worker->lcore); worker->namespaces = entry; if (worker->next == NULL) { g_current_worker = g_workers; } else { g_current_worker = worker->next; } }
static void * nvme_sim_new_ns(struct nvme_namespace *ns, void *sc_arg) { struct nvme_sim_softc *sc = sc_arg; struct nvme_controller *ctrlr = sc->s_ctrlr; int i; sc->s_ns = ns; /* * XXX this is creating one bus per ns, but it should be one * XXX target per controller, and one LUN per namespace. * XXX Current drives only support one NS, so there's time * XXX to fix it later when new drives arrive. * * XXX I'm pretty sure the xpt_bus_register() call below is * XXX like super lame and it really belongs in the sim_new_ctrlr * XXX callback. Then the create_path below would be pretty close * XXX to being right. Except we should be per-ns not per-ctrlr * XXX data. */ mtx_lock(&ctrlr->lock); /* Create bus */ /* * XXX do I need to lock ctrlr->lock ? * XXX do I need to lock the path? * ata and scsi seem to in their code, but their discovery is * somewhat more asynchronous. We're only every called one at a * time, and nothing is in parallel. */ i = 0; if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS) goto error; i++; if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim), 1, ns->id) != CAM_REQ_CMP) goto error; i++; sc->s_path->device->nvme_data = nvme_ns_get_data(ns); sc->s_path->device->nvme_cdata = nvme_ctrlr_get_data(ns->ctrlr); /* Scan bus */ nvme_sim_rescan_target(ctrlr, sc->s_path); mtx_unlock(&ctrlr->lock); return ns; error: switch (i) { case 2: xpt_free_path(sc->s_path); case 1: xpt_bus_deregister(cam_sim_path(sc->s_sim)); case 0: cam_sim_free(sc->s_sim, /*free_devq*/TRUE); } mtx_unlock(&ctrlr->lock); return NULL; }