/* * isci_task_request_complete() - This function is called by the sci core when * an task request completes. * @ihost: This parameter specifies the ISCI host object * @ireq: This parameter is the completed isci_request object. * @completion_status: This parameter specifies the completion status from the * sci core. * * none. */ void isci_task_request_complete(struct isci_host *ihost, struct isci_request *ireq, enum sci_task_status completion_status) { struct isci_tmf *tmf = isci_request_access_tmf(ireq); struct completion *tmf_complete = NULL; struct completion *request_complete = ireq->io_request_completion; dev_dbg(&ihost->pdev->dev, "%s: request = %p, status=%d\n", __func__, ireq, completion_status); isci_request_change_state(ireq, completed); set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); if (tmf) { tmf->status = completion_status; if (tmf->proto == SAS_PROTOCOL_SSP) { memcpy(&tmf->resp.resp_iu, &ireq->ssp.rsp, SSP_RESP_IU_MAX_SIZE); } else if (tmf->proto == SAS_PROTOCOL_SATA) { memcpy(&tmf->resp.d2h_fis, &ireq->stp.rsp, sizeof(struct dev_to_host_fis)); } /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ tmf_complete = tmf->complete; } sci_controller_complete_io(ihost, ireq->target_device, ireq); /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ set_bit(IREQ_TERMINATED, &ireq->flags); /* As soon as something is in the terminate path, deallocation is * managed there. Note that the final non-managed state of a task * request is "completed". */ if ((ireq->status == completed) || !isci_request_is_dealloc_managed(ireq->status)) { isci_request_change_state(ireq, unallocated); isci_free_tag(ihost, ireq->io_tag); list_del_init(&ireq->dev_node); } /* "request_complete" is set if the task was being terminated. */ if (request_complete) complete(request_complete); /* The task management part completes last. */ if (tmf_complete) complete(tmf_complete); }
void isci_task_request_complete(struct isci_host *ihost, struct isci_request *ireq, enum sci_task_status completion_status) { struct isci_tmf *tmf = isci_request_access_tmf(ireq); struct completion *tmf_complete = NULL; struct completion *request_complete = ireq->io_request_completion; dev_dbg(&ihost->pdev->dev, "%s: request = %p, status=%d\n", __func__, ireq, completion_status); isci_request_change_state(ireq, completed); set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); if (tmf) { tmf->status = completion_status; if (tmf->proto == SAS_PROTOCOL_SSP) { memcpy(&tmf->resp.resp_iu, &ireq->ssp.rsp, SSP_RESP_IU_MAX_SIZE); } else if (tmf->proto == SAS_PROTOCOL_SATA) { memcpy(&tmf->resp.d2h_fis, &ireq->stp.rsp, sizeof(struct dev_to_host_fis)); } tmf_complete = tmf->complete; } sci_controller_complete_io(ihost, ireq->target_device, ireq); set_bit(IREQ_TERMINATED, &ireq->flags); if ((ireq->status == completed) || !isci_request_is_dealloc_managed(ireq->status)) { isci_request_change_state(ireq, unallocated); isci_free_tag(ihost, ireq->io_tag); list_del_init(&ireq->dev_node); } if (request_complete) complete(request_complete); if (tmf_complete) complete(tmf_complete); }
static void isci_abort_task_process_cb( enum isci_tmf_cb_state cb_state, struct isci_tmf *tmf, void *cb_data) { struct isci_request *old_request; old_request = (struct isci_request *)cb_data; dev_dbg(&old_request->isci_host->pdev->dev, "%s: tmf=%p, old_request=%p\n", __func__, tmf, old_request); switch (cb_state) { case isci_tmf_started: if ((old_request->status != aborted) && (old_request->status != completed)) dev_dbg(&old_request->isci_host->pdev->dev, "%s: Bad request status (%d): tmf=%p, old_request=%p\n", __func__, old_request->status, tmf, old_request); break; case isci_tmf_timed_out: isci_request_change_state(old_request, aborting); break; default: dev_dbg(&old_request->isci_host->pdev->dev, "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n", __func__, cb_state, tmf, old_request); break; } }
/** * isci_abort_task_process_cb() - This is a helper function for the abort task * TMF command. It manages the request state with respect to the successful * transmission / completion of the abort task request. * @cb_state: This parameter specifies when this function was called - after * the TMF request has been started and after it has timed-out. * @tmf: This parameter specifies the TMF in progress. * * */ static void isci_abort_task_process_cb( enum isci_tmf_cb_state cb_state, struct isci_tmf *tmf, void *cb_data) { struct isci_request *old_request; old_request = (struct isci_request *)cb_data; dev_dbg(&old_request->isci_host->pdev->dev, "%s: tmf=%p, old_request=%p\n", __func__, tmf, old_request); switch (cb_state) { case isci_tmf_started: /* The TMF has been started. Nothing to do here, since the * request state was already set to "aborted" by the abort * task function. */ if ((old_request->status != aborted) && (old_request->status != completed)) dev_dbg(&old_request->isci_host->pdev->dev, "%s: Bad request status (%d): tmf=%p, old_request=%p\n", __func__, old_request->status, tmf, old_request); break; case isci_tmf_timed_out: /* Set the task's state to "aborting", since the abort task * function thread set it to "aborted" (above) in anticipation * of the task management request working correctly. Since the * timeout has now fired, the TMF request failed. We set the * state such that the request completion will indicate the * device is no longer present. */ isci_request_change_state(old_request, aborting); break; default: dev_dbg(&old_request->isci_host->pdev->dev, "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n", __func__, cb_state, tmf, old_request); break; } }
/** * isci_terminate_request_core() - This function will terminate the given * request, and wait for it to complete. This function must only be called * from a thread that can wait. Note that the request is terminated and * completed (back to the host, if started there). * @ihost: This SCU. * @idev: The target. * @isci_request: The I/O request to be terminated. * */ static void isci_terminate_request_core(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *isci_request) { enum sci_status status = SCI_SUCCESS; bool was_terminated = false; bool needs_cleanup_handling = false; unsigned long flags; unsigned long termination_completed = 1; struct completion *io_request_completion; dev_dbg(&ihost->pdev->dev, "%s: device = %p; request = %p\n", __func__, idev, isci_request); spin_lock_irqsave(&ihost->scic_lock, flags); io_request_completion = isci_request->io_request_completion; /* Note that we are not going to control * the target to abort the request. */ set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); /* Make sure the request wasn't just sitting around signalling * device condition (if the request handle is NULL, then the * request completed but needed additional handling here). */ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { was_terminated = true; needs_cleanup_handling = true; status = sci_controller_terminate_request(ihost, idev, isci_request); } spin_unlock_irqrestore(&ihost->scic_lock, flags); /* * The only time the request to terminate will * fail is when the io request is completed and * being aborted. */ if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: sci_controller_terminate_request" " returned = 0x%x\n", __func__, status); isci_request->io_request_completion = NULL; } else { if (was_terminated) { dev_dbg(&ihost->pdev->dev, "%s: before completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); /* Wait here for the request to complete. */ termination_completed = wait_for_completion_timeout( io_request_completion, msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); if (!termination_completed) { /* The request to terminate has timed out. */ spin_lock_irqsave(&ihost->scic_lock, flags); /* Check for state changes. */ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { /* The best we can do is to have the * request die a silent death if it * ever really completes. */ isci_request_mark_zombie(ihost, isci_request); needs_cleanup_handling = true; } else termination_completed = 1; spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!termination_completed) { dev_dbg(&ihost->pdev->dev, "%s: *** Timeout waiting for " "termination(%p/%p)\n", __func__, io_request_completion, isci_request); /* The request can no longer be referenced * safely since it may go away if the * termination every really does complete. */ isci_request = NULL; } } if (termination_completed) dev_dbg(&ihost->pdev->dev, "%s: after completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); } if (termination_completed) { isci_request->io_request_completion = NULL; /* Peek at the status of the request. This will tell * us if there was special handling on the request such that it * needs to be detached and freed here. */ spin_lock_irqsave(&isci_request->state_lock, flags); needs_cleanup_handling = isci_request_is_dealloc_managed( isci_request->status); spin_unlock_irqrestore(&isci_request->state_lock, flags); } if (needs_cleanup_handling) { dev_dbg(&ihost->pdev->dev, "%s: cleanup isci_device=%p, request=%p\n", __func__, idev, isci_request); if (isci_request != NULL) { spin_lock_irqsave(&ihost->scic_lock, flags); isci_free_tag(ihost, isci_request->io_tag); isci_request_change_state(isci_request, unallocated); list_del_init(&isci_request->dev_node); spin_unlock_irqrestore(&ihost->scic_lock, flags); } } } }
static int isci_task_execute_tmf(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_task_status status = SCI_TASK_FAILURE; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; unsigned long timeleft; u16 tag; spin_lock_irqsave(&ihost->scic_lock, flags); tag = isci_alloc_tag(ihost); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (tag == SCI_CONTROLLER_INVALID_IO_TAG) return ret; /* sanity check, return TMF_RESP_FUNC_FAILED * if the device is not there and ready. */ if (!idev || (!test_bit(IDEV_IO_READY, &idev->flags) && !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { dev_dbg(&ihost->pdev->dev, "%s: idev = %p not ready (%#lx)\n", __func__, idev, idev ? idev->flags : 0); goto err_tci; } else dev_dbg(&ihost->pdev->dev, "%s: idev = %p\n", __func__, idev); /* Assign the pointer to the TMF's completion kernel wait structure. */ tmf->complete = &completion; tmf->status = SCI_FAILURE_TIMEOUT; ireq = isci_task_request_build(ihost, idev, tag, tmf); if (!ireq) goto err_tci; spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ status = sci_controller_start_task(ihost, idev, ireq); if (status != SCI_TASK_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: start_io failed - status = 0x%x, request = %p\n", __func__, status, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); goto err_tci; } if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); isci_request_change_state(ireq, started); /* add the request to the remote device request list. */ list_add(&ireq->dev_node, &idev->reqs_in_process); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Wait for the TMF to complete, or a timeout. */ timeleft = wait_for_completion_timeout(&completion, msecs_to_jiffies(timeout_ms)); if (timeleft == 0) { /* The TMF did not complete - this could be because * of an unplug. Terminate the TMF request now. */ spin_lock_irqsave(&ihost->scic_lock, flags); if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); sci_controller_terminate_request(ihost, idev, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); timeleft = wait_for_completion_timeout( &completion, msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); if (!timeleft) { /* Strange condition - the termination of the TMF * request timed-out. */ spin_lock_irqsave(&ihost->scic_lock, flags); /* If the TMF status has not changed, kill it. */ if (tmf->status == SCI_FAILURE_TIMEOUT) isci_request_mark_zombie(ihost, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); } } isci_print_tmf(ihost, tmf); if (tmf->status == SCI_SUCCESS) ret = TMF_RESP_FUNC_COMPLETE; else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { dev_dbg(&ihost->pdev->dev, "%s: tmf.status == " "SCI_FAILURE_IO_RESPONSE_VALID\n", __func__); ret = TMF_RESP_FUNC_COMPLETE; } /* Else - leave the default "failed" status alone. */ dev_dbg(&ihost->pdev->dev, "%s: completed request = %p\n", __func__, ireq); return ret; err_tci: spin_lock_irqsave(&ihost->scic_lock, flags); isci_tci_free(ihost, ISCI_TAG_TCI(tag)); spin_unlock_irqrestore(&ihost->scic_lock, flags); return ret; }
static void isci_terminate_request_core(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *isci_request) { enum sci_status status = SCI_SUCCESS; bool was_terminated = false; bool needs_cleanup_handling = false; unsigned long flags; unsigned long termination_completed = 1; struct completion *io_request_completion; dev_dbg(&ihost->pdev->dev, "%s: device = %p; request = %p\n", __func__, idev, isci_request); spin_lock_irqsave(&ihost->scic_lock, flags); io_request_completion = isci_request->io_request_completion; set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { was_terminated = true; needs_cleanup_handling = true; status = sci_controller_terminate_request(ihost, idev, isci_request); } spin_unlock_irqrestore(&ihost->scic_lock, flags); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: sci_controller_terminate_request" " returned = 0x%x\n", __func__, status); isci_request->io_request_completion = NULL; } else { if (was_terminated) { dev_dbg(&ihost->pdev->dev, "%s: before completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); termination_completed = wait_for_completion_timeout( io_request_completion, msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); if (!termination_completed) { spin_lock_irqsave(&ihost->scic_lock, flags); if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { isci_request_mark_zombie(ihost, isci_request); needs_cleanup_handling = true; } else termination_completed = 1; spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!termination_completed) { dev_dbg(&ihost->pdev->dev, "%s: *** Timeout waiting for " "termination(%p/%p)\n", __func__, io_request_completion, isci_request); isci_request = NULL; } } if (termination_completed) dev_dbg(&ihost->pdev->dev, "%s: after completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); } if (termination_completed) { isci_request->io_request_completion = NULL; spin_lock_irqsave(&isci_request->state_lock, flags); needs_cleanup_handling = isci_request_is_dealloc_managed( isci_request->status); spin_unlock_irqrestore(&isci_request->state_lock, flags); } if (needs_cleanup_handling) { dev_dbg(&ihost->pdev->dev, "%s: cleanup isci_device=%p, request=%p\n", __func__, idev, isci_request); if (isci_request != NULL) { spin_lock_irqsave(&ihost->scic_lock, flags); isci_free_tag(ihost, isci_request->io_tag); isci_request_change_state(isci_request, unallocated); list_del_init(&isci_request->dev_node); spin_unlock_irqrestore(&ihost->scic_lock, flags); } } } }
static int isci_task_execute_tmf(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_task_status status = SCI_TASK_FAILURE; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; unsigned long timeleft; u16 tag; spin_lock_irqsave(&ihost->scic_lock, flags); tag = isci_alloc_tag(ihost); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (tag == SCI_CONTROLLER_INVALID_IO_TAG) return ret; if (!idev || (!test_bit(IDEV_IO_READY, &idev->flags) && !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { dev_dbg(&ihost->pdev->dev, "%s: idev = %p not ready (%#lx)\n", __func__, idev, idev ? idev->flags : 0); goto err_tci; } else dev_dbg(&ihost->pdev->dev, "%s: idev = %p\n", __func__, idev); tmf->complete = &completion; tmf->status = SCI_FAILURE_TIMEOUT; ireq = isci_task_request_build(ihost, idev, tag, tmf); if (!ireq) goto err_tci; spin_lock_irqsave(&ihost->scic_lock, flags); status = sci_controller_start_task(ihost, idev, ireq); if (status != SCI_TASK_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: start_io failed - status = 0x%x, request = %p\n", __func__, status, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); goto err_tci; } if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); isci_request_change_state(ireq, started); list_add(&ireq->dev_node, &idev->reqs_in_process); spin_unlock_irqrestore(&ihost->scic_lock, flags); timeleft = wait_for_completion_timeout(&completion, msecs_to_jiffies(timeout_ms)); if (timeleft == 0) { spin_lock_irqsave(&ihost->scic_lock, flags); if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); sci_controller_terminate_request(ihost, idev, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); timeleft = wait_for_completion_timeout( &completion, msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); if (!timeleft) { spin_lock_irqsave(&ihost->scic_lock, flags); if (tmf->status == SCI_FAILURE_TIMEOUT) isci_request_mark_zombie(ihost, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); } } isci_print_tmf(ihost, tmf); if (tmf->status == SCI_SUCCESS) ret = TMF_RESP_FUNC_COMPLETE; else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { dev_dbg(&ihost->pdev->dev, "%s: tmf.status == " "SCI_FAILURE_IO_RESPONSE_VALID\n", __func__); ret = TMF_RESP_FUNC_COMPLETE; } dev_dbg(&ihost->pdev->dev, "%s: completed request = %p\n", __func__, ireq); return ret; err_tci: spin_lock_irqsave(&ihost->scic_lock, flags); isci_tci_free(ihost, ISCI_TAG_TCI(tag)); spin_unlock_irqrestore(&ihost->scic_lock, flags); return ret; }