static int nvme_attach(device_t dev) { struct nvme_controller *ctrlr = DEVICE2SOFTC(dev); int status; status = nvme_ctrlr_construct(ctrlr, dev); if (status != 0) { nvme_ctrlr_destruct(ctrlr, dev); return (status); } /* * Reset controller twice to ensure we do a transition from cc.en==1 * to cc.en==0. This is because we don't really know what status * the controller was left in when boot handed off to OS. */ status = nvme_ctrlr_hw_reset(ctrlr); if (status != 0) { nvme_ctrlr_destruct(ctrlr, dev); return (status); } status = nvme_ctrlr_hw_reset(ctrlr); if (status != 0) { nvme_ctrlr_destruct(ctrlr, dev); return (status); } nvme_sysctl_initialize_ctrlr(ctrlr); pci_enable_busmaster(dev); ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook; ctrlr->config_hook.ich_arg = ctrlr; config_intrhook_establish(&ctrlr->config_hook); return (0); }
static int ntb_detach(device_t device) { struct ntb_softc *ntb; ntb = DEVICE2SOFTC(device); callout_drain(&ntb->heartbeat_timer); callout_drain(&ntb->lr_timer); if (ntb->type == NTB_XEON) ntb_teardown_xeon(ntb); ntb_teardown_interrupts(ntb); /* * Redetect total MWs so we unmap properly -- in case we lowered the * maximum to work around Xeon errata. */ ntb_detect_max_mw(ntb); ntb_unmap_pci_bar(ntb); return (0); }
static int ioat_detach(device_t device) { struct ioat_softc *ioat; ioat = DEVICE2SOFTC(device); ioat_test_detach(); mtx_lock(IOAT_REFLK); ioat->quiescing = TRUE; ioat_channel[ioat->chan_idx] = NULL; ioat_drain_locked(ioat); mtx_unlock(IOAT_REFLK); ioat_teardown_intr(ioat); callout_drain(&ioat->timer); pci_disable_busmaster(device); if (ioat->pci_resource != NULL) bus_release_resource(device, SYS_RES_MEMORY, ioat->pci_resource_id, ioat->pci_resource); if (ioat->ring != NULL) ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); if (ioat->comp_update != NULL) { bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, ioat->comp_update_map); bus_dma_tag_destroy(ioat->comp_update_tag); } bus_dma_tag_destroy(ioat->hw_desc_tag); return (0); }
static void nvme_notify_consumer(struct nvme_consumer *cons) { device_t *devlist; struct nvme_controller *ctrlr; struct nvme_namespace *ns; void *ctrlr_cookie; int dev_idx, ns_idx, devcount; if (devclass_get_devices(nvme_devclass, &devlist, &devcount)) return; for (dev_idx = 0; dev_idx < devcount; dev_idx++) { ctrlr = DEVICE2SOFTC(devlist[dev_idx]); if (cons->ctrlr_fn != NULL) ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr); else ctrlr_cookie = NULL; ctrlr->cons_cookie[cons->id] = ctrlr_cookie; if (ctrlr->is_failed) { if (cons->fail_fn != NULL) (*cons->fail_fn)(ctrlr_cookie); /* * Do not notify consumers about the namespaces of a * failed controller. */ continue; } for (ns_idx = 0; ns_idx < ctrlr->cdata.nn; ns_idx++) { ns = &ctrlr->ns[ns_idx]; if (cons->ns_fn != NULL) ns->cons_cookie[cons->id] = (*cons->ns_fn)(ns, ctrlr_cookie); } } free(devlist, M_TEMP); }
static int ntb_attach(device_t device) { struct ntb_softc *ntb = DEVICE2SOFTC(device); struct ntb_hw_info *p = ntb_get_device_info(pci_get_devid(device)); int error; ntb->device = device; ntb->type = p->type; ntb->features = p->features; /* Heartbeat timer for NTB_SOC since there is no link interrupt */ callout_init(&ntb->heartbeat_timer, CALLOUT_MPSAFE); callout_init(&ntb->lr_timer, CALLOUT_MPSAFE); DETACH_ON_ERROR(ntb_map_pci_bars(ntb)); DETACH_ON_ERROR(ntb_initialize_hw(ntb)); DETACH_ON_ERROR(ntb_setup_interrupts(ntb)); pci_enable_busmaster(ntb->device); return (error); }
static int isci_attach(device_t device) { int error; struct isci_softc *isci = DEVICE2SOFTC(device); g_isci = isci; isci->device = device; isci_allocate_pci_memory(isci); error = isci_initialize(isci); if (error) { isci_detach(device); return (error); } isci_interrupt_setup(isci); isci_sysctl_initialize(isci); return (0); }
static int isci_detach(device_t device) { struct isci_softc *isci = DEVICE2SOFTC(device); int i, phy; for (i = 0; i < isci->controller_count; i++) { struct ISCI_CONTROLLER *controller = &isci->controllers[i]; SCI_STATUS status; void *unmap_buffer; if (controller->scif_controller_handle != NULL) { scic_controller_disable_interrupts( scif_controller_get_scic_handle(controller->scif_controller_handle)); mtx_lock(&controller->lock); status = scif_controller_stop(controller->scif_controller_handle, 0); mtx_unlock(&controller->lock); while (controller->is_started == TRUE) { /* Now poll for interrupts until the controller stop complete * callback is received. */ mtx_lock(&controller->lock); isci_interrupt_poll_handler(controller); mtx_unlock(&controller->lock); pause("isci", 1); } if(controller->sim != NULL) { mtx_lock(&controller->lock); xpt_free_path(controller->path); xpt_bus_deregister(cam_sim_path(controller->sim)); cam_sim_free(controller->sim, TRUE); mtx_unlock(&controller->lock); } } if (controller->timer_memory != NULL) free(controller->timer_memory, M_ISCI); if (controller->remote_device_memory != NULL) free(controller->remote_device_memory, M_ISCI); for (phy = 0; phy < SCI_MAX_PHYS; phy++) { if (controller->phys[phy].cdev_fault) led_destroy(controller->phys[phy].cdev_fault); if (controller->phys[phy].cdev_locate) led_destroy(controller->phys[phy].cdev_locate); } while (1) { sci_pool_get(controller->unmap_buffer_pool, unmap_buffer); if (unmap_buffer == NULL) break; contigfree(unmap_buffer, PAGE_SIZE, M_ISCI); } } /* The SCIF controllers have been stopped, so we can now * free the SCI library memory. */ if (isci->sci_library_memory != NULL) free(isci->sci_library_memory, M_ISCI); for (i = 0; i < ISCI_NUM_PCI_BARS; i++) { struct ISCI_PCI_BAR *pci_bar = &isci->pci_bar[i]; if (pci_bar->resource != NULL) bus_release_resource(device, SYS_RES_MEMORY, pci_bar->resource_id, pci_bar->resource); } for (i = 0; i < isci->num_interrupts; i++) { struct ISCI_INTERRUPT_INFO *interrupt_info; interrupt_info = &isci->interrupt_info[i]; if(interrupt_info->tag != NULL) bus_teardown_intr(device, interrupt_info->res, interrupt_info->tag); if(interrupt_info->res != NULL) bus_release_resource(device, SYS_RES_IRQ, rman_get_rid(interrupt_info->res), interrupt_info->res); pci_release_msi(device); } pci_disable_busmaster(device); return (0); }
/* * Initialize Hardware */ static int ioat3_attach(device_t device) { struct ioat_softc *ioat; struct ioat_descriptor **ring; struct ioat_descriptor *next; struct ioat_dma_hw_descriptor *dma_hw_desc; int i, num_descriptors; int error; uint8_t xfercap; error = 0; ioat = DEVICE2SOFTC(device); ioat->capabilities = ioat_read_dmacapability(ioat); ioat_log_message(1, "Capabilities: %b\n", (int)ioat->capabilities, IOAT_DMACAP_STR); xfercap = ioat_read_xfercap(ioat); ioat->max_xfer_size = 1 << xfercap; ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_SUPPORTED) != 0; if (ioat->intrdelay_supported) ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; /* TODO: need to check DCA here if we ever do XOR/PQ */ mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); callout_init(&ioat->timer, 1); /* Establish lock order for Witness */ mtx_lock(&ioat->submit_lock); mtx_lock(&ioat->cleanup_lock); mtx_unlock(&ioat->cleanup_lock); mtx_unlock(&ioat->submit_lock); ioat->is_resize_pending = FALSE; ioat->is_completion_pending = FALSE; ioat->is_reset_pending = FALSE; ioat->is_channel_running = FALSE; bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, &ioat->comp_update_tag); error = bus_dmamem_alloc(ioat->comp_update_tag, (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map); if (ioat->comp_update == NULL) return (ENOMEM); error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 0); if (error != 0) return (error); ioat->ring_size_order = IOAT_MIN_ORDER; num_descriptors = 1 << ioat->ring_size_order; bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct ioat_dma_hw_descriptor), 1, sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL, &ioat->hw_desc_tag); ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, M_ZERO | M_WAITOK); if (ioat->ring == NULL) return (ENOMEM); ring = ioat->ring; for (i = 0; i < num_descriptors; i++) { ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK); if (ring[i] == NULL) return (ENOMEM); ring[i]->id = i; } for (i = 0; i < num_descriptors - 1; i++) { next = ring[i + 1]; dma_hw_desc = ring[i]->u.dma; dma_hw_desc->next = next->hw_desc_bus_addr; } ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr; ioat->head = ioat->hw_head = 0; ioat->tail = 0; ioat->last_seen = 0; return (0); }
static void ioat_setup_sysctl(device_t device) { struct sysctl_oid_list *par, *statpar, *state, *hammer; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree, *tmp; struct ioat_softc *ioat; ioat = DEVICE2SOFTC(device); ctx = device_get_sysctl_ctx(device); tree = device_get_sysctl_tree(device); par = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, &ioat->version, 0, "HW version (0xMM form)"); SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, &ioat->max_xfer_size, 0, "HW maximum transfer size"); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, &ioat->intrdelay_max, 0, "Maximum configurable INTRDELAY on this channel (microseconds)"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, "IOAT channel internal state"); state = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, &ioat->ring_size_order, 0, "SW descriptor ring size order"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 0, "SW descriptor head pointer index"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 0, "SW descriptor tail pointer index"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD, &ioat->hw_head, 0, "HW DMACOUNT"); SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, ioat->comp_update, "HW addr of last completion"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD, &ioat->is_resize_pending, 0, "resize pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending", CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD, &ioat->is_reset_pending, 0, "reset pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD, &ioat->is_channel_running, 0, "channel running"); SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", "String of the channel status"); SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, &ioat->cached_intrdelay, 0, "Current INTRDELAY on this channel (cached, microseconds)"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, "Big hammers (mostly for testing)"); hammer = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", "Set to non-zero to reset the hardware"); SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_error", CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I", "Set to non-zero to inject a recoverable hardware error"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, "IOAT channel statistics"); statpar = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, &ioat->stats.interrupts, "Number of interrupts processed on this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, &ioat->stats.descriptors_processed, "Number of descriptors processed on this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, &ioat->stats.descriptors_submitted, "Number of descriptors submitted to this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, &ioat->stats.descriptors_error, "Number of descriptors failed by channel errors"); SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, &ioat->stats.channel_halts, 0, "Number of times the channel has halted"); SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, &ioat->stats.last_halt_chanerr, 0, "The raw CHANERR when the channel was last halted"); SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", "Descriptors per interrupt"); }