static void sci_remote_node_context_await_suspend_state_exit( struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev = rnc_to_dev(rnc); if (dev_is_sata(idev->domain_dev)) isci_dev_set_hang_detection_timeout(idev, 0); }
static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) { struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; int rni = sci_rnc->remote_node_index; union scu_remote_node_context *rnc; struct isci_host *ihost; __le64 sas_addr; ihost = idev->owning_port->owning_controller; rnc = sci_rnc_by_id(ihost, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) * sci_remote_device_node_count(idev)); rnc->ssp.remote_node_index = rni; rnc->ssp.remote_node_port_width = idev->device_port_width; rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; /* sas address is __be64, context ram format is __le64 */ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr); rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr); rnc->ssp.nexus_loss_timer_enable = true; rnc->ssp.check_bit = false; rnc->ssp.is_valid = false; rnc->ssp.is_remote_node_context = true; rnc->ssp.function_number = 0; rnc->ssp.arbitration_wait_time = 0; if (dev_is_sata(dev)) { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.stp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = ihost->user_parameters.stp_inactivity_timeout; } else { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.ssp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = ihost->user_parameters.ssp_inactivity_timeout; } rnc->ssp.initial_arbitration_wait_time = 0; /* Open Address Frame Parameters */ rnc->ssp.oaf_connection_rate = idev->connection_rate; rnc->ssp.oaf_features = 0; rnc->ssp.oaf_source_zone_group = 0; rnc->ssp.oaf_more_compatibility_features = 0; }
int pm8001_slave_alloc(struct scsi_device *scsi_dev) { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); if (dev_is_sata(dev)) { /* We don't need to rescan targets * if REPORT_LUNS request is failed */ if (scsi_dev->lun > 0) return -ENXIO; scsi_dev->tagged_supported = 1; } return sas_slave_alloc(scsi_dev); }
static int isci_reset_device(struct isci_host *ihost, struct domain_device *dev, struct isci_remote_device *idev) { int rc; unsigned long flags; enum sci_status status; struct sas_phy *phy = sas_get_local_phy(dev); struct isci_port *iport = dev->port->lldd_port; dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); spin_lock_irqsave(&ihost->scic_lock, flags); status = sci_remote_device_reset(idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: sci_remote_device_reset(%p) returned %d!\n", __func__, idev, status); rc = TMF_RESP_FUNC_FAILED; goto out; } if (scsi_is_sas_phy_local(phy)) { struct isci_phy *iphy = &ihost->phys[phy->number]; rc = isci_port_perform_hard_reset(ihost, iport, iphy); } else rc = sas_phy_reset(phy, !dev_is_sata(dev)); /* Terminate in-progress I/O now. */ isci_remote_device_nuke_requests(ihost, idev); /* Since all pending TCs have been cleaned, resume the RNC. */ spin_lock_irqsave(&ihost->scic_lock, flags); status = sci_remote_device_reset_complete(idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: sci_remote_device_reset_complete(%p) " "returned %d!\n", __func__, idev, status); } dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); out: sas_put_local_phy(phy); return rc; }
int pm8001_slave_configure(struct scsi_device *sdev) { struct domain_device *dev = sdev_to_domain_dev(sdev); int ret = sas_slave_configure(sdev); if (ret) return ret; if (dev_is_sata(dev)) { #ifdef PM8001_DISABLE_NCQ struct ata_port *ap = dev->sata_dev.ap; struct ata_device *adev = ap->link.device; adev->flags |= ATA_DFLAG_NCQ_OFF; scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); #endif } return 0; }
int isci_task_lu_reset(struct domain_device *dev, u8 *lun) { struct isci_host *ihost = dev_to_ihost(dev); struct isci_remote_device *idev; unsigned long flags; int ret = TMF_RESP_FUNC_COMPLETE; spin_lock_irqsave(&ihost->scic_lock, flags); idev = isci_get_device(dev->lldd_dev); spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_dbg(&ihost->pdev->dev, "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", __func__, dev, ihost, idev); if (!idev) { /* If the device is gone, escalate to I_T_Nexus_Reset. */ dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); ret = TMF_RESP_FUNC_FAILED; goto out; } /* Suspend the RNC, kill all TCs */ if (isci_remote_device_suspend_terminate(ihost, idev, NULL) != SCI_SUCCESS) { /* The suspend/terminate only fails if isci_get_device fails */ ret = TMF_RESP_FUNC_FAILED; goto out; } /* All pending I/Os have been terminated and cleaned up. */ if (!test_bit(IDEV_GONE, &idev->flags)) { if (dev_is_sata(dev)) sas_ata_schedule_reset(dev); else /* Send the task management part of the reset. */ ret = isci_task_send_lu_reset_sas(ihost, idev, lun); } out: isci_put_device(idev); return ret; }
static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev; struct domain_device *dev; idev = rnc_to_dev(rnc); dev = idev->domain_dev; /* * For direct attached SATA devices we need to clear the TLCR * NCQ to TCi tag mapping on the phy and in cases where we * resume because of a target reset we also need to update * the STPTLDARNI register with the RNi of the device */ if (dev_is_sata(dev) && !dev->parent) sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); }
static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; struct isci_host *ihost = idev->owning_port->owning_controller; rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = true; if (dev_is_sata(dev) && dev->parent) { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); } else { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); if (!dev->parent) sci_port_setup_transports(idev->owning_port, sci_rnc->remote_node_index); } }
static int isci_reset_device(struct isci_host *ihost, struct domain_device *dev, struct isci_remote_device *idev) { int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1; struct sas_phy *phy = sas_get_local_phy(dev); struct isci_port *iport = dev->port->lldd_port; dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); /* Suspend the RNC, terminate all outstanding TCs. */ if (isci_remote_device_suspend_terminate(ihost, idev, NULL) != SCI_SUCCESS) { rc = TMF_RESP_FUNC_FAILED; goto out; } /* Note that since the termination for outstanding requests succeeded, * this function will return success. This is because the resets will * only fail if the device has been removed (ie. hotplug), and the * primary duty of this function is to cleanup tasks, so that is the * relevant status. */ if (!test_bit(IDEV_GONE, &idev->flags)) { if (scsi_is_sas_phy_local(phy)) { struct isci_phy *iphy = &ihost->phys[phy->number]; reset_stat = isci_port_perform_hard_reset(ihost, iport, iphy); } else reset_stat = sas_phy_reset(phy, !dev_is_sata(dev)); } /* Explicitly resume the RNC here, since there was no task sent. */ isci_remote_device_resume_from_abort(ihost, idev); dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n", __func__, idev, reset_stat); out: sas_put_local_phy(phy); return rc; }
int isci_task_lu_reset(struct domain_device *dev, u8 *lun) { struct isci_host *isci_host = dev_to_ihost(dev); struct isci_remote_device *isci_device; unsigned long flags; int ret; spin_lock_irqsave(&isci_host->scic_lock, flags); isci_device = isci_lookup_device(dev); spin_unlock_irqrestore(&isci_host->scic_lock, flags); dev_dbg(&isci_host->pdev->dev, "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", __func__, dev, isci_host, isci_device); if (!isci_device) { /* If the device is gone, stop the escalations. */ dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__); ret = TMF_RESP_FUNC_COMPLETE; goto out; } /* Send the task management part of the reset. */ if (dev_is_sata(dev)) { sas_ata_schedule_reset(dev); ret = TMF_RESP_FUNC_COMPLETE; } else ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); /* If the LUN reset worked, all the I/O can now be terminated. */ if (ret == TMF_RESP_FUNC_COMPLETE) /* Terminate all I/O now. */ isci_terminate_pending_requests(isci_host, isci_device); out: isci_put_device(isci_device); return ret; }
int isci_task_lu_reset(struct domain_device *dev, u8 *lun) { struct isci_host *isci_host = dev_to_ihost(dev); struct isci_remote_device *isci_device; unsigned long flags; int ret; spin_lock_irqsave(&isci_host->scic_lock, flags); isci_device = isci_lookup_device(dev); spin_unlock_irqrestore(&isci_host->scic_lock, flags); dev_dbg(&isci_host->pdev->dev, "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", __func__, dev, isci_host, isci_device); if (!isci_device) { dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__); ret = TMF_RESP_FUNC_COMPLETE; goto out; } if (dev_is_sata(dev)) { sas_ata_schedule_reset(dev); ret = TMF_RESP_FUNC_COMPLETE; } else ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); if (ret == TMF_RESP_FUNC_COMPLETE) isci_terminate_pending_requests(isci_host, isci_device); out: isci_put_device(isci_device); return ret; }
/** * isci_task_abort_task() - This function is one of the SAS Domain Template * functions. This function is called by libsas to abort a specified task. * @task: This parameter specifies the SAS task to abort. * * status, zero indicates success. */ int isci_task_abort_task(struct sas_task *task) { struct isci_host *ihost = dev_to_ihost(task->dev); DECLARE_COMPLETION_ONSTACK(aborted_io_completion); struct isci_request *old_request = NULL; struct isci_remote_device *idev = NULL; struct isci_tmf tmf; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; int target_done_already = 0; /* Get the isci_request reference from the task. Note that * this check does not depend on the pending request list * in the device, because tasks driving resets may land here * after completion in the core. */ spin_lock_irqsave(&ihost->scic_lock, flags); spin_lock(&task->task_state_lock); old_request = task->lldd_task; /* If task is already done, the request isn't valid */ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && (task->task_state_flags & SAS_TASK_AT_INITIATOR) && old_request) { idev = isci_get_device(task->dev->lldd_dev); target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); } spin_unlock(&task->task_state_lock); spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_warn(&ihost->pdev->dev, "%s: dev = %p (%s%s), task = %p, old_request == %p\n", __func__, idev, (dev_is_sata(task->dev) ? "STP/SATA" : ((dev_is_expander(task->dev)) ? "SMP" : "SSP")), ((idev) ? ((test_bit(IDEV_GONE, &idev->flags)) ? " IDEV_GONE" : "") : " <NULL>"), task, old_request); /* Device reset conditions signalled in task_state_flags are the * responsbility of libsas to observe at the start of the error * handler thread. */ if (!idev || !old_request) { /* The request has already completed and there * is nothing to do here other than to set the task * done bit, and indicate that the task abort function * was successful. */ spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | SAS_TASK_STATE_PENDING); spin_unlock_irqrestore(&task->task_state_lock, flags); ret = TMF_RESP_FUNC_COMPLETE; dev_warn(&ihost->pdev->dev, "%s: abort task not needed for %p\n", __func__, task); goto out; } /* Suspend the RNC, kill the TC */ if (isci_remote_device_suspend_terminate(ihost, idev, old_request) != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: isci_remote_device_reset_terminate(dev=%p, " "req=%p, task=%p) failed\n", __func__, idev, old_request, task); ret = TMF_RESP_FUNC_FAILED; goto out; } spin_lock_irqsave(&ihost->scic_lock, flags); if (task->task_proto == SAS_PROTOCOL_SMP || sas_protocol_ata(task->task_proto) || target_done_already || test_bit(IDEV_GONE, &idev->flags)) { spin_unlock_irqrestore(&ihost->scic_lock, flags); /* No task to send, so explicitly resume the device here */ isci_remote_device_resume_from_abort(ihost, idev); dev_warn(&ihost->pdev->dev, "%s: %s request" " or complete_in_target (%d), " "or IDEV_GONE (%d), thus no TMF\n", __func__, ((task->task_proto == SAS_PROTOCOL_SMP) ? "SMP" : (sas_protocol_ata(task->task_proto) ? "SATA/STP" : "<other>") ), test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags), test_bit(IDEV_GONE, &idev->flags)); spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | SAS_TASK_STATE_PENDING); task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); ret = TMF_RESP_FUNC_COMPLETE; } else { /* Fill in the tmf stucture */ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, old_request); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Send the task management request. */ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_ABORT_TASK_TIMEOUT_MS); } out: dev_warn(&ihost->pdev->dev, "%s: Done; dev = %p, task = %p , old_request == %p\n", __func__, idev, task, old_request); isci_put_device(idev); return ret; }
enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum scis_sds_remote_node_context_states state; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); state = sci_rnc->sm.current_state_id; dev_dbg(scirdev_to_dev(idev), "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; " "dev resume path %s\n", __func__, rnc_state_name(state), cb_fn, cb_p, sci_rnc->destination_state, test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags) ? "<abort active>" : "<normal>"); switch (state) { case SCI_RNC_INITIAL: if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_STATE; sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p, RNC_DEST_READY); if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { sci_remote_node_context_construct_buffer(sci_rnc); sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); } return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_INVALIDATING: case SCI_RNC_RESUMING: /* We are still waiting to post when a resume was * requested. */ switch (sci_rnc->destination_state) { case RNC_DEST_SUSPENDED: case RNC_DEST_SUSPENDED_RESUME: /* Previously waiting to suspend after posting. * Now continue onto resumption. */ sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); break; default: sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_READY); break; } return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: { struct domain_device *dev = idev->domain_dev; /* If this is an expander attached SATA device we must * invalidate and repost the RNC since this is the only * way to clear the TCi to NCQ tag mapping table for * the RNi. All other device types we can just resume. */ sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_READY); if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { if ((dev_is_sata(dev) && dev->parent) || (sci_rnc->destination_state == RNC_DEST_FINAL)) sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); else sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); } } return SCI_SUCCESS; case SCI_RNC_AWAIT_SUSPENSION: sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } }