void isci_interrupt_msix_handler(void *arg) { struct ISCI_INTERRUPT_INFO *interrupt_info = (struct ISCI_INTERRUPT_INFO *)arg; struct ISCI_CONTROLLER *controller = (struct ISCI_CONTROLLER *)interrupt_info->interrupt_target_handle; SCIC_CONTROLLER_INTERRUPT_HANDLER interrupt_handler; SCIC_CONTROLLER_COMPLETION_HANDLER completion_handler; interrupt_handler = interrupt_info->handlers->interrupt_handler; completion_handler = interrupt_info->handlers->completion_handler; SCI_CONTROLLER_HANDLE_T scic_controller_handle; scic_controller_handle = scif_controller_get_scic_handle( controller->scif_controller_handle); if (interrupt_handler(scic_controller_handle)) { mtx_lock(&controller->lock); completion_handler(scic_controller_handle); /* * isci_controller_release_queued_ccb() is a relatively * expensive routine, so we don't call it until the controller * level flag is set to TRUE. */ if (controller->release_queued_ccbs == TRUE) isci_controller_release_queued_ccbs(controller); mtx_unlock(&controller->lock); } }
void isci_interrupt_legacy_handler(void *arg) { struct ISCI_INTERRUPT_INFO *interrupt_info = (struct ISCI_INTERRUPT_INFO *)arg; struct isci_softc *isci = (struct isci_softc *)interrupt_info->interrupt_target_handle; SCIC_CONTROLLER_INTERRUPT_HANDLER interrupt_handler; SCIC_CONTROLLER_COMPLETION_HANDLER completion_handler; int index; interrupt_handler = interrupt_info->handlers->interrupt_handler; completion_handler = interrupt_info->handlers->completion_handler; for (index = 0; index < isci->controller_count; index++) { struct ISCI_CONTROLLER *controller = &isci->controllers[index]; /* If controller_count > 0, we will get interrupts here for * controller 0 before controller 1 has even started. So * we need to make sure we don't call the completion handler * for a non-started controller. */ if (controller->is_started == TRUE) { SCI_CONTROLLER_HANDLE_T scic_controller_handle = scif_controller_get_scic_handle( controller->scif_controller_handle); if (interrupt_handler(scic_controller_handle)) { mtx_lock(&controller->lock); completion_handler(scic_controller_handle); mtx_unlock(&controller->lock); } } } }
static void isci_sysctl_stop(struct ISCI_CONTROLLER *controller, uint32_t phy_to_be_stopped) { SCI_PHY_HANDLE_T phy_handle = NULL; scic_controller_get_phy_handle( scif_controller_get_scic_handle(controller->scif_controller_handle), phy_to_be_stopped, &phy_handle); scic_phy_stop(phy_handle); }
void isci_controller_start(void *controller_handle) { struct ISCI_CONTROLLER *controller = (struct ISCI_CONTROLLER *)controller_handle; SCI_CONTROLLER_HANDLE_T scif_controller_handle = controller->scif_controller_handle; scif_controller_start(scif_controller_handle, scif_controller_get_suggested_start_timeout(scif_controller_handle)); scic_controller_enable_interrupts( scif_controller_get_scic_handle(controller->scif_controller_handle)); }
void isci_interrupt_poll_handler(struct ISCI_CONTROLLER *controller) { SCI_CONTROLLER_HANDLE_T scic_controller = scif_controller_get_scic_handle(controller->scif_controller_handle); SCIC_CONTROLLER_HANDLER_METHODS_T handlers; scic_controller_get_handler_methods(SCIC_NO_INTERRUPTS, 0x0, &handlers); if(handlers.interrupt_handler(scic_controller) == TRUE) { /* Do not acquire controller lock in this path. xpt * poll routine will get called with this lock already * held, so we can't acquire it again here. Other users * of this function must acquire the lock explicitly * before calling this handler. */ handlers.completion_handler(scic_controller); } }
uint32_t isci_remote_device_get_bitrate(struct ISCI_REMOTE_DEVICE *remote_device) { struct ISCI_DOMAIN *domain = remote_device->domain; struct ISCI_CONTROLLER *controller = domain->controller; SCI_PORT_HANDLE_T port_handle; SCIC_PORT_PROPERTIES_T port_properties; uint8_t phy_index; SCI_PHY_HANDLE_T phy_handle; SCIC_PHY_PROPERTIES_T phy_properties; /* get a handle to the port associated with this remote device's * domain */ port_handle = scif_domain_get_scic_port_handle(domain->sci_object); scic_port_get_properties(port_handle, &port_properties); /* get the lowest numbered phy in the port */ phy_index = 0; while ((port_properties.phy_mask != 0) && !(port_properties.phy_mask & 0x1)) { phy_index++; port_properties.phy_mask >>= 1; } /* get the properties for the lowest numbered phy */ scic_controller_get_phy_handle( scif_controller_get_scic_handle(controller->scif_controller_handle), phy_index, &phy_handle); scic_phy_get_properties(phy_handle, &phy_properties); switch (phy_properties.negotiated_link_rate) { case SCI_SAS_150_GB: return (150000); case SCI_SAS_300_GB: return (300000); case SCI_SAS_600_GB: return (600000); default: return (0); } }
void isci_interrupt_msix_handler(void *arg) { struct ISCI_INTERRUPT_INFO *interrupt_info = (struct ISCI_INTERRUPT_INFO *)arg; struct ISCI_CONTROLLER *controller = (struct ISCI_CONTROLLER *)interrupt_info->interrupt_target_handle; SCIC_CONTROLLER_INTERRUPT_HANDLER interrupt_handler; SCIC_CONTROLLER_COMPLETION_HANDLER completion_handler; interrupt_handler = interrupt_info->handlers->interrupt_handler; completion_handler = interrupt_info->handlers->completion_handler; SCI_CONTROLLER_HANDLE_T scic_controller_handle; scic_controller_handle = scif_controller_get_scic_handle( controller->scif_controller_handle); if (interrupt_handler(scic_controller_handle)) { mtx_lock(&controller->lock); completion_handler(scic_controller_handle); mtx_unlock(&controller->lock); } }
SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller) { SCIC_USER_PARAMETERS_T scic_user_parameters; SCI_CONTROLLER_HANDLE_T scic_controller_handle; unsigned long tunable; int i; scic_controller_handle = scif_controller_get_scic_handle(controller->scif_controller_handle); if (controller->isci->oem_parameters_found == TRUE) { scic_oem_parameters_set( scic_controller_handle, &controller->oem_parameters, (uint8_t)(controller->oem_parameters_version)); } scic_user_parameters_get(scic_controller_handle, &scic_user_parameters); if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable)) scic_user_parameters.sds1.no_outbound_task_timeout = (uint8_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable)) scic_user_parameters.sds1.ssp_max_occupancy_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable)) scic_user_parameters.sds1.stp_max_occupancy_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable)) scic_user_parameters.sds1.ssp_inactivity_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable)) scic_user_parameters.sds1.stp_inactivity_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable)) for (i = 0; i < SCI_MAX_PHYS; i++) scic_user_parameters.sds1.phys[i].max_speed_generation = (uint8_t)tunable; scic_user_parameters_set(scic_controller_handle, &scic_user_parameters); /* Scheduler bug in SCU requires SCIL to reserve some task contexts as a * a workaround - one per domain. */ controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS; if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth", &controller->queue_depth)) { controller->queue_depth = max(1, min(controller->queue_depth, SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS)); } /* Reserve one request so that we can ensure we have one available TC * to do internal device resets. */ controller->sim_queue_depth = controller->queue_depth - 1; /* Although we save one TC to do internal device resets, it is possible * we could end up using several TCs for simultaneous device resets * while at the same time having CAM fill our controller queue. To * simulate this condition, and how our driver handles it, we can set * this io_shortage parameter, which will tell CAM that we have a * large queue depth than we really do. */ uint32_t io_shortage = 0; TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage); controller->sim_queue_depth += io_shortage; return (scif_controller_initialize(controller->scif_controller_handle)); }
static int isci_detach(device_t device) { struct isci_softc *isci = DEVICE2SOFTC(device); int i, phy; for (i = 0; i < isci->controller_count; i++) { struct ISCI_CONTROLLER *controller = &isci->controllers[i]; SCI_STATUS status; void *unmap_buffer; if (controller->scif_controller_handle != NULL) { scic_controller_disable_interrupts( scif_controller_get_scic_handle(controller->scif_controller_handle)); mtx_lock(&controller->lock); status = scif_controller_stop(controller->scif_controller_handle, 0); mtx_unlock(&controller->lock); while (controller->is_started == TRUE) { /* Now poll for interrupts until the controller stop complete * callback is received. */ mtx_lock(&controller->lock); isci_interrupt_poll_handler(controller); mtx_unlock(&controller->lock); pause("isci", 1); } if(controller->sim != NULL) { mtx_lock(&controller->lock); xpt_free_path(controller->path); xpt_bus_deregister(cam_sim_path(controller->sim)); cam_sim_free(controller->sim, TRUE); mtx_unlock(&controller->lock); } } if (controller->timer_memory != NULL) free(controller->timer_memory, M_ISCI); if (controller->remote_device_memory != NULL) free(controller->remote_device_memory, M_ISCI); for (phy = 0; phy < SCI_MAX_PHYS; phy++) { if (controller->phys[phy].cdev_fault) led_destroy(controller->phys[phy].cdev_fault); if (controller->phys[phy].cdev_locate) led_destroy(controller->phys[phy].cdev_locate); } while (1) { sci_pool_get(controller->unmap_buffer_pool, unmap_buffer); if (unmap_buffer == NULL) break; contigfree(unmap_buffer, PAGE_SIZE, M_ISCI); } } /* The SCIF controllers have been stopped, so we can now * free the SCI library memory. */ if (isci->sci_library_memory != NULL) free(isci->sci_library_memory, M_ISCI); for (i = 0; i < ISCI_NUM_PCI_BARS; i++) { struct ISCI_PCI_BAR *pci_bar = &isci->pci_bar[i]; if (pci_bar->resource != NULL) bus_release_resource(device, SYS_RES_MEMORY, pci_bar->resource_id, pci_bar->resource); } for (i = 0; i < isci->num_interrupts; i++) { struct ISCI_INTERRUPT_INFO *interrupt_info; interrupt_info = &isci->interrupt_info[i]; if(interrupt_info->tag != NULL) bus_teardown_intr(device, interrupt_info->res, interrupt_info->tag); if(interrupt_info->res != NULL) bus_release_resource(device, SYS_RES_IRQ, rman_get_rid(interrupt_info->res), interrupt_info->res); pci_release_msi(device); } pci_disable_busmaster(device); return (0); }
SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller) { SCIC_USER_PARAMETERS_T scic_user_parameters; SCI_CONTROLLER_HANDLE_T scic_controller_handle; char led_name[64]; unsigned long tunable; uint32_t io_shortage; uint32_t fail_on_timeout; int i; scic_controller_handle = scif_controller_get_scic_handle(controller->scif_controller_handle); if (controller->isci->oem_parameters_found == TRUE) { scic_oem_parameters_set( scic_controller_handle, &controller->oem_parameters, (uint8_t)(controller->oem_parameters_version)); } scic_user_parameters_get(scic_controller_handle, &scic_user_parameters); if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable)) scic_user_parameters.sds1.no_outbound_task_timeout = (uint8_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable)) scic_user_parameters.sds1.ssp_max_occupancy_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable)) scic_user_parameters.sds1.stp_max_occupancy_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable)) scic_user_parameters.sds1.ssp_inactivity_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable)) scic_user_parameters.sds1.stp_inactivity_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable)) for (i = 0; i < SCI_MAX_PHYS; i++) scic_user_parameters.sds1.phys[i].max_speed_generation = (uint8_t)tunable; scic_user_parameters_set(scic_controller_handle, &scic_user_parameters); /* Scheduler bug in SCU requires SCIL to reserve some task contexts as a * a workaround - one per domain. */ controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS; if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth", &controller->queue_depth)) { controller->queue_depth = max(1, min(controller->queue_depth, SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS)); } /* Reserve one request so that we can ensure we have one available TC * to do internal device resets. */ controller->sim_queue_depth = controller->queue_depth - 1; /* Although we save one TC to do internal device resets, it is possible * we could end up using several TCs for simultaneous device resets * while at the same time having CAM fill our controller queue. To * simulate this condition, and how our driver handles it, we can set * this io_shortage parameter, which will tell CAM that we have a * large queue depth than we really do. */ io_shortage = 0; TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage); controller->sim_queue_depth += io_shortage; fail_on_timeout = 1; TUNABLE_INT_FETCH("hw.isci.fail_on_task_timeout", &fail_on_timeout); controller->fail_on_task_timeout = fail_on_timeout; /* Attach to CAM using xpt_bus_register now, then immediately freeze * the simq. It will get released later when initial domain discovery * is complete. */ controller->has_been_scanned = FALSE; mtx_lock(&controller->lock); isci_controller_attach_to_cam(controller); xpt_freeze_simq(controller->sim, 1); mtx_unlock(&controller->lock); for (i = 0; i < SCI_MAX_PHYS; i++) { controller->phys[i].handle = scic_controller_handle; controller->phys[i].index = i; /* fault */ controller->phys[i].led_fault = 0; sprintf(led_name, "isci.bus%d.port%d.fault", controller->index, i); controller->phys[i].cdev_fault = led_create(isci_led_fault_func, &controller->phys[i], led_name); /* locate */ controller->phys[i].led_locate = 0; sprintf(led_name, "isci.bus%d.port%d.locate", controller->index, i); controller->phys[i].cdev_locate = led_create(isci_led_locate_func, &controller->phys[i], led_name); } return (scif_controller_initialize(controller->scif_controller_handle)); }