/** * isci_request_mark_zombie() - This function must be called with scic_lock held. */ static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq) { struct completion *tmf_completion = NULL; struct completion *req_completion; /* Set the request state to "dead". */ ireq->status = dead; req_completion = ireq->io_request_completion; ireq->io_request_completion = NULL; if (test_bit(IREQ_TMF, &ireq->flags)) { /* Break links with the TMF request. */ struct isci_tmf *tmf = isci_request_access_tmf(ireq); /* In the case where a task request is dying, * the thread waiting on the complete will sit and * timeout unless we wake it now. Since the TMF * has a default error status, complete it here * to wake the waiting thread. */ if (tmf) { tmf_completion = tmf->complete; tmf->complete = NULL; } ireq->ttype_ptr.tmf_task_ptr = NULL; dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n", __func__, tmf->tmf_code, tmf->io_tag); } else { /* Break links with the sas_task - the callback is done * elsewhere. */ struct sas_task *task = isci_request_access_task(ireq); if (task) task->lldd_task = NULL; ireq->ttype_ptr.io_task_ptr = NULL; } dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n", ireq->io_tag); /* Don't force waiting threads to timeout. */ if (req_completion) complete(req_completion); if (tmf_completion != NULL) complete(tmf_completion); }
void isci_terminate_pending_requests(struct isci_host *ihost, struct isci_remote_device *idev) { struct completion request_completion; enum isci_request_status old_state; unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&ihost->scic_lock, flags); list_splice_init(&idev->reqs_in_process, &list); while (!list_empty(&list)) { struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node); old_state = isci_request_change_started_to_newstate(ireq, &request_completion, terminating); switch (old_state) { case started: case completed: case aborting: break; default: list_move(&ireq->dev_node, &idev->reqs_in_process); ireq = NULL; break; } if (!ireq) continue; spin_unlock_irqrestore(&ihost->scic_lock, flags); init_completion(&request_completion); dev_dbg(&ihost->pdev->dev, "%s: idev=%p request=%p; task=%p old_state=%d\n", __func__, idev, ireq, (!test_bit(IREQ_TMF, &ireq->flags) ? isci_request_access_task(ireq) : NULL), old_state); isci_terminate_request_core(ihost, idev, ireq); spin_lock_irqsave(&ihost->scic_lock, flags); } spin_unlock_irqrestore(&ihost->scic_lock, flags); }
static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq) { struct completion *tmf_completion = NULL; struct completion *req_completion; ireq->status = dead; req_completion = ireq->io_request_completion; ireq->io_request_completion = NULL; if (test_bit(IREQ_TMF, &ireq->flags)) { struct isci_tmf *tmf = isci_request_access_tmf(ireq); if (tmf) { tmf_completion = tmf->complete; tmf->complete = NULL; } ireq->ttype_ptr.tmf_task_ptr = NULL; dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n", __func__, tmf->tmf_code, tmf->io_tag); } else { struct sas_task *task = isci_request_access_task(ireq); if (task) task->lldd_task = NULL; ireq->ttype_ptr.io_task_ptr = NULL; } dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n", ireq->io_tag); if (req_completion) complete(req_completion); if (tmf_completion != NULL) complete(tmf_completion); }
/** * isci_terminate_pending_requests() - This function will change the all of the * requests on the given device's state to "aborting", will terminate the * requests, and wait for them to complete. This function must only be * called from a thread that can wait. Note that the requests are all * terminated and completed (back to the host, if started there). * @isci_host: This parameter specifies SCU. * @idev: This parameter specifies the target. * */ void isci_terminate_pending_requests(struct isci_host *ihost, struct isci_remote_device *idev) { struct completion request_completion; enum isci_request_status old_state; unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&ihost->scic_lock, flags); list_splice_init(&idev->reqs_in_process, &list); /* assumes that isci_terminate_request_core deletes from the list */ while (!list_empty(&list)) { struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node); /* Change state to "terminating" if it is currently * "started". */ old_state = isci_request_change_started_to_newstate(ireq, &request_completion, terminating); switch (old_state) { case started: case completed: case aborting: break; default: /* termination in progress, or otherwise dispositioned. * We know the request was on 'list' so should be safe * to move it back to reqs_in_process */ list_move(&ireq->dev_node, &idev->reqs_in_process); ireq = NULL; break; } if (!ireq) continue; spin_unlock_irqrestore(&ihost->scic_lock, flags); init_completion(&request_completion); dev_dbg(&ihost->pdev->dev, "%s: idev=%p request=%p; task=%p old_state=%d\n", __func__, idev, ireq, (!test_bit(IREQ_TMF, &ireq->flags) ? isci_request_access_task(ireq) : NULL), old_state); /* If the old_state is started: * This request was not already being aborted. If it had been, * then the aborting I/O (ie. the TMF request) would not be in * the aborting state, and thus would be terminated here. Note * that since the TMF completion's call to the kernel function * "complete()" does not happen until the pending I/O request * terminate fully completes, we do not have to implement a * special wait here for already aborting requests - the * termination of the TMF request will force the request * to finish it's already started terminate. * * If old_state == completed: * This request completed from the SCU hardware perspective * and now just needs cleaning up in terms of freeing the * request and potentially calling up to libsas. * * If old_state == aborting: * This request has already gone through a TMF timeout, but may * not have been terminated; needs cleaning up at least. */ isci_terminate_request_core(ihost, idev, ireq); spin_lock_irqsave(&ihost->scic_lock, flags); } spin_unlock_irqrestore(&ihost->scic_lock, flags); }