/** * isci_terminate_request_core() - This function will terminate the given * request, and wait for it to complete. This function must only be called * from a thread that can wait. Note that the request is terminated and * completed (back to the host, if started there). * @ihost: This SCU. * @idev: The target. * @isci_request: The I/O request to be terminated. * */ static void isci_terminate_request_core(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *isci_request) { enum sci_status status = SCI_SUCCESS; bool was_terminated = false; bool needs_cleanup_handling = false; unsigned long flags; unsigned long termination_completed = 1; struct completion *io_request_completion; dev_dbg(&ihost->pdev->dev, "%s: device = %p; request = %p\n", __func__, idev, isci_request); spin_lock_irqsave(&ihost->scic_lock, flags); io_request_completion = isci_request->io_request_completion; /* Note that we are not going to control * the target to abort the request. */ set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); /* Make sure the request wasn't just sitting around signalling * device condition (if the request handle is NULL, then the * request completed but needed additional handling here). */ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { was_terminated = true; needs_cleanup_handling = true; status = sci_controller_terminate_request(ihost, idev, isci_request); } spin_unlock_irqrestore(&ihost->scic_lock, flags); /* * The only time the request to terminate will * fail is when the io request is completed and * being aborted. */ if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: sci_controller_terminate_request" " returned = 0x%x\n", __func__, status); isci_request->io_request_completion = NULL; } else { if (was_terminated) { dev_dbg(&ihost->pdev->dev, "%s: before completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); /* Wait here for the request to complete. */ termination_completed = wait_for_completion_timeout( io_request_completion, msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); if (!termination_completed) { /* The request to terminate has timed out. */ spin_lock_irqsave(&ihost->scic_lock, flags); /* Check for state changes. */ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { /* The best we can do is to have the * request die a silent death if it * ever really completes. */ isci_request_mark_zombie(ihost, isci_request); needs_cleanup_handling = true; } else termination_completed = 1; spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!termination_completed) { dev_dbg(&ihost->pdev->dev, "%s: *** Timeout waiting for " "termination(%p/%p)\n", __func__, io_request_completion, isci_request); /* The request can no longer be referenced * safely since it may go away if the * termination every really does complete. */ isci_request = NULL; } } if (termination_completed) dev_dbg(&ihost->pdev->dev, "%s: after completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); } if (termination_completed) { isci_request->io_request_completion = NULL; /* Peek at the status of the request. This will tell * us if there was special handling on the request such that it * needs to be detached and freed here. */ spin_lock_irqsave(&isci_request->state_lock, flags); needs_cleanup_handling = isci_request_is_dealloc_managed( isci_request->status); spin_unlock_irqrestore(&isci_request->state_lock, flags); } if (needs_cleanup_handling) { dev_dbg(&ihost->pdev->dev, "%s: cleanup isci_device=%p, request=%p\n", __func__, idev, isci_request); if (isci_request != NULL) { spin_lock_irqsave(&ihost->scic_lock, flags); isci_free_tag(ihost, isci_request->io_tag); isci_request_change_state(isci_request, unallocated); list_del_init(&isci_request->dev_node); spin_unlock_irqrestore(&ihost->scic_lock, flags); } } } }
static void isci_terminate_request_core(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *isci_request) { enum sci_status status = SCI_SUCCESS; bool was_terminated = false; bool needs_cleanup_handling = false; unsigned long flags; unsigned long termination_completed = 1; struct completion *io_request_completion; dev_dbg(&ihost->pdev->dev, "%s: device = %p; request = %p\n", __func__, idev, isci_request); spin_lock_irqsave(&ihost->scic_lock, flags); io_request_completion = isci_request->io_request_completion; set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { was_terminated = true; needs_cleanup_handling = true; status = sci_controller_terminate_request(ihost, idev, isci_request); } spin_unlock_irqrestore(&ihost->scic_lock, flags); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: sci_controller_terminate_request" " returned = 0x%x\n", __func__, status); isci_request->io_request_completion = NULL; } else { if (was_terminated) { dev_dbg(&ihost->pdev->dev, "%s: before completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); termination_completed = wait_for_completion_timeout( io_request_completion, msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); if (!termination_completed) { spin_lock_irqsave(&ihost->scic_lock, flags); if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { isci_request_mark_zombie(ihost, isci_request); needs_cleanup_handling = true; } else termination_completed = 1; spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!termination_completed) { dev_dbg(&ihost->pdev->dev, "%s: *** Timeout waiting for " "termination(%p/%p)\n", __func__, io_request_completion, isci_request); isci_request = NULL; } } if (termination_completed) dev_dbg(&ihost->pdev->dev, "%s: after completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); } if (termination_completed) { isci_request->io_request_completion = NULL; spin_lock_irqsave(&isci_request->state_lock, flags); needs_cleanup_handling = isci_request_is_dealloc_managed( isci_request->status); spin_unlock_irqrestore(&isci_request->state_lock, flags); } if (needs_cleanup_handling) { dev_dbg(&ihost->pdev->dev, "%s: cleanup isci_device=%p, request=%p\n", __func__, idev, isci_request); if (isci_request != NULL) { spin_lock_irqsave(&ihost->scic_lock, flags); isci_free_tag(ihost, isci_request->io_tag); isci_request_change_state(isci_request, unallocated); list_del_init(&isci_request->dev_node); spin_unlock_irqrestore(&ihost->scic_lock, flags); } } } }
static int isci_task_execute_tmf(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_task_status status = SCI_TASK_FAILURE; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; unsigned long timeleft; u16 tag; spin_lock_irqsave(&ihost->scic_lock, flags); tag = isci_alloc_tag(ihost); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (tag == SCI_CONTROLLER_INVALID_IO_TAG) return ret; /* sanity check, return TMF_RESP_FUNC_FAILED * if the device is not there and ready. */ if (!idev || (!test_bit(IDEV_IO_READY, &idev->flags) && !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { dev_dbg(&ihost->pdev->dev, "%s: idev = %p not ready (%#lx)\n", __func__, idev, idev ? idev->flags : 0); goto err_tci; } else dev_dbg(&ihost->pdev->dev, "%s: idev = %p\n", __func__, idev); /* Assign the pointer to the TMF's completion kernel wait structure. */ tmf->complete = &completion; tmf->status = SCI_FAILURE_TIMEOUT; ireq = isci_task_request_build(ihost, idev, tag, tmf); if (!ireq) goto err_tci; spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ status = sci_controller_start_task(ihost, idev, ireq); if (status != SCI_TASK_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: start_io failed - status = 0x%x, request = %p\n", __func__, status, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); goto err_tci; } if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); isci_request_change_state(ireq, started); /* add the request to the remote device request list. */ list_add(&ireq->dev_node, &idev->reqs_in_process); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Wait for the TMF to complete, or a timeout. */ timeleft = wait_for_completion_timeout(&completion, msecs_to_jiffies(timeout_ms)); if (timeleft == 0) { /* The TMF did not complete - this could be because * of an unplug. Terminate the TMF request now. */ spin_lock_irqsave(&ihost->scic_lock, flags); if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); sci_controller_terminate_request(ihost, idev, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); timeleft = wait_for_completion_timeout( &completion, msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); if (!timeleft) { /* Strange condition - the termination of the TMF * request timed-out. */ spin_lock_irqsave(&ihost->scic_lock, flags); /* If the TMF status has not changed, kill it. */ if (tmf->status == SCI_FAILURE_TIMEOUT) isci_request_mark_zombie(ihost, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); } } isci_print_tmf(ihost, tmf); if (tmf->status == SCI_SUCCESS) ret = TMF_RESP_FUNC_COMPLETE; else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { dev_dbg(&ihost->pdev->dev, "%s: tmf.status == " "SCI_FAILURE_IO_RESPONSE_VALID\n", __func__); ret = TMF_RESP_FUNC_COMPLETE; } /* Else - leave the default "failed" status alone. */ dev_dbg(&ihost->pdev->dev, "%s: completed request = %p\n", __func__, ireq); return ret; err_tci: spin_lock_irqsave(&ihost->scic_lock, flags); isci_tci_free(ihost, ISCI_TAG_TCI(tag)); spin_unlock_irqrestore(&ihost->scic_lock, flags); return ret; }
static int isci_task_execute_tmf(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_task_status status = SCI_TASK_FAILURE; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; unsigned long timeleft; u16 tag; spin_lock_irqsave(&ihost->scic_lock, flags); tag = isci_alloc_tag(ihost); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (tag == SCI_CONTROLLER_INVALID_IO_TAG) return ret; if (!idev || (!test_bit(IDEV_IO_READY, &idev->flags) && !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { dev_dbg(&ihost->pdev->dev, "%s: idev = %p not ready (%#lx)\n", __func__, idev, idev ? idev->flags : 0); goto err_tci; } else dev_dbg(&ihost->pdev->dev, "%s: idev = %p\n", __func__, idev); tmf->complete = &completion; tmf->status = SCI_FAILURE_TIMEOUT; ireq = isci_task_request_build(ihost, idev, tag, tmf); if (!ireq) goto err_tci; spin_lock_irqsave(&ihost->scic_lock, flags); status = sci_controller_start_task(ihost, idev, ireq); if (status != SCI_TASK_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: start_io failed - status = 0x%x, request = %p\n", __func__, status, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); goto err_tci; } if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); isci_request_change_state(ireq, started); list_add(&ireq->dev_node, &idev->reqs_in_process); spin_unlock_irqrestore(&ihost->scic_lock, flags); timeleft = wait_for_completion_timeout(&completion, msecs_to_jiffies(timeout_ms)); if (timeleft == 0) { spin_lock_irqsave(&ihost->scic_lock, flags); if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); sci_controller_terminate_request(ihost, idev, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); timeleft = wait_for_completion_timeout( &completion, msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); if (!timeleft) { spin_lock_irqsave(&ihost->scic_lock, flags); if (tmf->status == SCI_FAILURE_TIMEOUT) isci_request_mark_zombie(ihost, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); } } isci_print_tmf(ihost, tmf); if (tmf->status == SCI_SUCCESS) ret = TMF_RESP_FUNC_COMPLETE; else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { dev_dbg(&ihost->pdev->dev, "%s: tmf.status == " "SCI_FAILURE_IO_RESPONSE_VALID\n", __func__); ret = TMF_RESP_FUNC_COMPLETE; } dev_dbg(&ihost->pdev->dev, "%s: completed request = %p\n", __func__, ireq); return ret; err_tci: spin_lock_irqsave(&ihost->scic_lock, flags); isci_tci_free(ihost, ISCI_TAG_TCI(tag)); spin_unlock_irqrestore(&ihost->scic_lock, flags); return ret; }