void tws_reset(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; mtx_lock(&sc->gen_lock); if ( tws_get_state(sc) == TWS_RESET ) { mtx_unlock(&sc->gen_lock); return; } tws_teardown_intr(sc); xpt_freeze_simq(sc->sim, 1); tws_send_event(sc, TWS_RESET_START); device_printf(sc->tws_dev, "Resetting controller\n"); tws_assert_soft_reset(sc); tws_turn_off_interrupts(sc); tws_reset_cb( (void*) sc ); tws_reinit( (void*) sc ); // device_printf(sc->tws_dev, "Controller Reset complete!\n"); tws_send_event(sc, TWS_RESET_COMPLETE); mtx_unlock(&sc->gen_lock); xpt_release_simq(sc->sim, 1); tws_setup_intr(sc, sc->irqs); }
void isci_controller_domain_discovery_complete( struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain) { if (!isci_controller->has_been_scanned) { /* Controller has not been scanned yet. We'll clear * the discovery bit for this domain, then check if all bits * are now clear. That would indicate that all domains are * done with discovery and we can then proceed with initial * scan. */ isci_controller->initial_discovery_mask &= ~(1 << isci_domain->index); if (isci_controller->initial_discovery_mask == 0) { struct isci_softc *driver = isci_controller->isci; uint8_t next_index = isci_controller->index + 1; isci_controller->has_been_scanned = TRUE; /* Unfreeze simq to allow initial scan to proceed. */ xpt_release_simq(isci_controller->sim, TRUE); #if __FreeBSD_version < 800000 /* When driver is loaded after boot, we need to * explicitly rescan here for versions <8.0, because * CAM only automatically scans new buses at boot * time. */ union ccb *ccb = xpt_alloc_ccb_nowait(); xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(isci_controller->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_rescan(ccb); #endif if (next_index < driver->controller_count) { /* There are more controllers that need to * start. So start the next one. */ isci_controller_start( &driver->controllers[next_index]); } else { /* All controllers have been started and completed discovery. * Disestablish the config hook while will signal to the * kernel during boot that it is safe to try to find and * mount the root partition. */ config_intrhook_disestablish( &driver->config_hook); } } } }
static int ahci_em_resume(device_t dev) { struct ahci_enclosure *enc = device_get_softc(dev); mtx_lock(&enc->mtx); ahci_em_reset(dev); xpt_release_simq(enc->sim, TRUE); mtx_unlock(&enc->mtx); return (0); }
void tws_timeout(void *arg) { struct tws_request *req = (struct tws_request *)arg; struct tws_softc *sc = req->sc; if ( req->error_code == TWS_REQ_RET_RESET ) { return; } mtx_lock(&sc->gen_lock); if ( req->error_code == TWS_REQ_RET_RESET ) { mtx_unlock(&sc->gen_lock); return; } if ( tws_get_state(sc) == TWS_RESET ) { mtx_unlock(&sc->gen_lock); return; } tws_teardown_intr(sc); xpt_freeze_simq(sc->sim, 1); tws_send_event(sc, TWS_RESET_START); if (req->type == TWS_REQ_TYPE_SCSI_IO) { device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n"); } else if (req->type == TWS_REQ_TYPE_PASSTHRU) { device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n"); } else { device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n"); } tws_assert_soft_reset(sc); tws_turn_off_interrupts(sc); tws_reset_cb( (void*) sc ); tws_reinit( (void*) sc ); // device_printf(sc->tws_dev, "Controller Reset complete!\n"); tws_send_event(sc, TWS_RESET_COMPLETE); mtx_unlock(&sc->gen_lock); xpt_release_simq(sc->sim, 1); tws_setup_intr(sc, sc->irqs); }
int ata_resume(device_t dev) { struct ata_channel *ch; int error; /* check for valid device */ if (!dev || !(ch = device_get_softc(dev))) return ENXIO; mtx_lock(&ch->state_mtx); error = ata_reinit(dev); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->state_mtx); if (ch->flags & ATA_PERIODIC_POLL) callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); return error; }
static void aac_cam_event(struct aac_softc *sc, struct aac_event *event, void *arg) { union ccb *ccb; struct aac_cam *camsc; switch (event->ev_type) { case AAC_EVENT_CMFREE: ccb = arg; camsc = ccb->ccb_h.sim_priv.entries[0].ptr; free(event, M_AACCAM); xpt_release_simq(camsc->sim, 1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; default: device_printf(sc->aac_dev, "unknown event %d in aac_cam\n", event->ev_type); break; } return; }
int ata_reinit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct ata_request *request; xpt_freeze_simq(ch->sim, 1); if ((request = ch->running)) { ch->running = NULL; if (ch->state == ATA_ACTIVE) ch->state = ATA_IDLE; callout_stop(&request->callout); if (ch->dma.unload) ch->dma.unload(request); request->result = ERESTART; ata_cam_end_transaction(dev, request); } /* reset the controller HW, the channel and device(s) */ ATA_RESET(dev); /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); xpt_release_simq(ch->sim, TRUE); return(0); }
void isp_done(struct ccb_scsiio *sccb) { struct ispsoftc *isp = XS_ISP(sccb); if (XS_NOERR(sccb)) XS_SETERR(sccb, CAM_REQ_CMP); sccb->ccb_h.status &= ~CAM_STATUS_MASK; sccb->ccb_h.status |= sccb->ccb_h.spriv_field0; if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { sccb->ccb_h.status &= ~CAM_STATUS_MASK; sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { IDPRINTF(3, ("%s: freeze devq %d.%d ccbstat 0x%x\n", isp->isp_name, sccb->ccb_h.target_id, sccb->ccb_h.target_lun, sccb->ccb_h.status)); xpt_freeze_devq(sccb->ccb_h.path, 1); sccb->ccb_h.status |= CAM_DEV_QFRZN; } } if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; sccb->ccb_h.status |= CAM_RELEASE_SIMQ; xpt_release_simq(isp->isp_sim, 1); } sccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB) && (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_print_path(sccb->ccb_h.path); printf("cam completion status 0x%x\n", sccb->ccb_h.status); } xpt_done((union ccb *) sccb); }
static void wds_done(struct wds *wp, struct wds_req *r, u_int8_t stat) { struct ccb_hdr *ccb_h; struct ccb_scsiio *csio; int status; smallog('d'); if (r->flags & WR_DONE) { device_printf(wp->dev, "request %d reported done twice\n", r->id); smallog2('x', r->id + '0'); return; } smallog(r->id + '0'); ccb_h = &r->ccb->ccb_h; csio = &r->ccb->csio; status = CAM_REQ_CMP_ERR; DBG(DBX "wds%d: %s stat=0x%x c->stat=0x%x c->venderr=0x%x\n", wp->unit, r->flags & WR_SENSE ? "(sense)" : "", stat, r->cmd.stat, r->cmd.venderr); if (r->flags & WR_SENSE) { if (stat == ICMB_OK || (stat == ICMB_OKERR && r->cmd.stat == 0)) { DBG(DBX "wds%d: sense 0x%x\n", wp->unit, r->buf[0]); /* it has the same size now but for future */ bcopy(r->buf, &csio->sense_data, sizeof(struct scsi_sense_data) > csio->sense_len ? csio->sense_len : sizeof(struct scsi_sense_data)); if (sizeof(struct scsi_sense_data) >= csio->sense_len) csio->sense_resid = 0; else csio->sense_resid = csio->sense_len - sizeof(struct scsi_sense_data); status = CAM_AUTOSNS_VALID | CAM_SCSI_STATUS_ERROR; } else { status = CAM_AUTOSENSE_FAIL; } } else { switch (stat) { case ICMB_OK: if (ccb_h) { csio->resid = 0; csio->scsi_status = r->cmd.stat; status = CAM_REQ_CMP; } break; case ICMB_OKERR: if (ccb_h) { csio->scsi_status = r->cmd.stat; if (r->cmd.stat) { if (ccb_h->flags & CAM_DIS_AUTOSENSE) status = CAM_SCSI_STATUS_ERROR; else { if ( wds_runsense(wp, r) == CAM_REQ_CMP ) return; /* in case of error continue with freeing of CCB */ } } else { csio->resid = 0; status = CAM_REQ_CMP; } } break; case ICMB_ETIME: if (ccb_h) status = CAM_SEL_TIMEOUT; break; case ICMB_ERESET: case ICMB_ETARCMD: case ICMB_ERESEL: case ICMB_ESEL: case ICMB_EABORT: case ICMB_ESRESET: case ICMB_EHRESET: if (ccb_h) status = CAM_REQ_CMP_ERR; break; } if (ccb_h && (ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) { /* we accept only virtual addresses in wds_action() */ bcopy(r->buf, csio->data_ptr, csio->dxfer_len); } } r->flags |= WR_DONE; wp->dx->ombs[r->ombn].stat = 0; if (ccb_h) { wdsr_ccb_done(wp, r, r->ccb, status); smallog3('-', ccb_h->target_id + '0', ccb_h->target_lun + '0'); } else { frag_free(wp, r->mask); if (wp->want_wdsr) { wp->want_wdsr = 0; xpt_release_simq(wp->sim, /* run queue */ 1); } wp->wdsr_free |= (1 << r->id); } DBG(DBX "wds%d: request %p done\n", wp->unit, r); }
void mrsas_xpt_release(struct mrsas_softc *sc) { xpt_release_simq(sc->sim_0, 1); xpt_release_simq(sc->sim_1, 1); }
void isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) { struct ISCI_CONTROLLER *isci_controller; struct ISCI_REMOTE_DEVICE *isci_remote_device; union ccb *ccb; BOOL complete_ccb; complete_ccb = TRUE; isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); isci_remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); ccb = isci_request->ccb; ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (completion_status) { case SCI_IO_SUCCESS: case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: #if __FreeBSD_version >= 900026 if (ccb->ccb_h.func_code == XPT_SMP_IO) { void *smp_response = scif_io_request_get_response_iu_address( isci_request->sci_object); memcpy(ccb->smpio.smp_response, smp_response, ccb->smpio.smp_response_len); } #endif ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCI_IO_SUCCESS_IO_DONE_EARLY: ccb->ccb_h.status |= CAM_REQ_CMP; ccb->csio.resid = ccb->csio.dxfer_len - scif_io_request_get_number_of_bytes_transferred( isci_request->sci_object); break; case SCI_IO_FAILURE_RESPONSE_VALID: { SCI_SSP_RESPONSE_IU_T * response_buffer; uint32_t sense_length; int error_code, sense_key, asc, ascq; struct ccb_scsiio *csio = &ccb->csio; response_buffer = (SCI_SSP_RESPONSE_IU_T *) scif_io_request_get_response_iu_address( isci_request->sci_object); sense_length = sci_ssp_get_sense_data_length( response_buffer->sense_data_length); sense_length = MIN(csio->sense_len, sense_length); memcpy(&csio->sense_data, response_buffer->data, sense_length); csio->sense_resid = csio->sense_len - sense_length; csio->scsi_status = response_buffer->status; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, &asc, &ascq ); isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->scsi_status, sense_key, asc, ascq); break; } case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: isci_remote_device_reset(isci_remote_device, NULL); /* drop through */ case SCI_IO_FAILURE_TERMINATED: ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); break; case SCI_IO_FAILURE_INVALID_STATE: case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: complete_ccb = FALSE; break; case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: ccb->ccb_h.status |= CAM_DEV_NOT_THERE; break; case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: { struct ccb_relsim ccb_relsim; struct cam_path *path; xpt_create_path(&path, NULL, cam_sim_path(isci_controller->sim), isci_remote_device->index, 0); xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; ccb_relsim.openings = scif_remote_device_get_max_queue_depth(remote_device); xpt_action((union ccb *)&ccb_relsim); xpt_free_path(path); complete_ccb = FALSE; } break; case SCI_IO_FAILURE: case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_IO_FAILURE_PROTOCOL_VIOLATION: case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: default: isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], completion_status); ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } callout_stop(&isci_request->parent.timer); bus_dmamap_sync(isci_request->parent.dma_tag, isci_request->parent.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(isci_request->parent.dma_tag, isci_request->parent.dma_map); isci_request->ccb = NULL; sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_request); if (complete_ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* ccb will be completed with some type of non-success * status. So temporarily freeze the queue until the * upper layers can act on the status. The * CAM_DEV_QFRZN flag will then release the queue * after the status is acted upon. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); TAILQ_REMOVE(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; /* * This CCB that was in the queue was completed, so * set the in_progress pointer to NULL denoting that * we can retry another CCB from the queue. We only * allow one CCB at a time from the queue to be * in progress so that we can effectively maintain * ordering. */ isci_remote_device->queued_ccb_in_progress = NULL; } if (isci_remote_device->frozen_lun_mask != 0) { isci_remote_device_release_device_queue(isci_remote_device); } xpt_done(ccb); if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } } else { isci_remote_device_freeze_lun_queue(isci_remote_device, ccb->ccb_h.target_lun); if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); /* * Do nothing, CCB is already on the device's queue. * We leave it on the queue, to be retried again * next time a CCB on this device completes, or we * get a ready notification for this device. */ isci_log_message(1, "ISCI", "already queued %p %x\n", ccb, ccb->csio.cdb_io.cdb_bytes[0]); isci_remote_device->queued_ccb_in_progress = NULL; } else { isci_log_message(1, "ISCI", "queue %p %x\n", ccb, ccb->csio.cdb_io.cdb_bytes[0]); ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); } } }
void isci_task_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, SCI_TASK_REQUEST_HANDLE_T task_request, SCI_TASK_STATUS completion_status) { struct ISCI_TASK_REQUEST *isci_task_request = (struct ISCI_TASK_REQUEST *)sci_object_get_association(task_request); struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); struct ISCI_REMOTE_DEVICE *isci_remote_device = (struct ISCI_REMOTE_DEVICE *)sci_object_get_association(remote_device); struct ISCI_REMOTE_DEVICE *pending_remote_device; BOOL retry_task = FALSE; union ccb *ccb = isci_task_request->ccb; isci_remote_device->is_resetting = FALSE; switch ((int)completion_status) { case SCI_TASK_SUCCESS: case SCI_TASK_FAILURE_RESPONSE_VALID: break; case SCI_TASK_FAILURE_INVALID_STATE: retry_task = TRUE; isci_log_message(0, "ISCI", "task failure (invalid state) - retrying\n"); break; case SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES: retry_task = TRUE; isci_log_message(0, "ISCI", "task failure (insufficient resources) - retrying\n"); break; case SCI_FAILURE_TIMEOUT: if (isci_controller->fail_on_task_timeout) { retry_task = FALSE; isci_log_message(0, "ISCI", "task timeout - not retrying\n"); scif_cb_domain_device_removed(isci_controller, isci_remote_device->domain, isci_remote_device); } else { retry_task = TRUE; isci_log_message(0, "ISCI", "task timeout - retrying\n"); } break; case SCI_TASK_FAILURE: case SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_TASK_FAILURE_INVALID_TAG: case SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR: case SCI_TASK_FAILURE_TERMINATED: case SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE: isci_log_message(0, "ISCI", "unhandled task completion code 0x%x\n", completion_status); break; default: isci_log_message(0, "ISCI", "unhandled task completion code 0x%x\n", completion_status); break; } if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_task_request); /* Make sure we release the device queue, since it may have been frozen * if someone tried to start an I/O while the task was in progress. */ isci_remote_device_release_device_queue(isci_remote_device); if (retry_task == TRUE) isci_remote_device_reset(isci_remote_device, ccb); else { pending_remote_device = sci_fast_list_remove_head( &isci_controller->pending_device_reset_list); if (pending_remote_device != NULL) { /* Any resets that were triggered from an XPT_RESET_DEV * CCB are never put in the pending list if the request * pool is empty - they are given back to CAM to be * requeued. So we will alawys pass NULL here, * denoting that there is no CCB associated with the * device reset. */ isci_remote_device_reset(pending_remote_device, NULL); } else if (ccb != NULL) { /* There was a CCB associated with this reset, so mark * it complete and return it to CAM. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(ccb); } } }
void isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) { struct ISCI_CONTROLLER *isci_controller; struct ISCI_REMOTE_DEVICE *isci_remote_device; union ccb *ccb; isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); isci_remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); ccb = isci_request->ccb; ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (completion_status) { case SCI_IO_SUCCESS: case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: #if __FreeBSD_version >= 900026 if (ccb->ccb_h.func_code == XPT_SMP_IO) { void *smp_response = scif_io_request_get_response_iu_address( isci_request->sci_object); memcpy(ccb->smpio.smp_response, smp_response, ccb->smpio.smp_response_len); } #endif ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCI_IO_SUCCESS_IO_DONE_EARLY: ccb->ccb_h.status |= CAM_REQ_CMP; ccb->csio.resid = ccb->csio.dxfer_len - scif_io_request_get_number_of_bytes_transferred( isci_request->sci_object); break; case SCI_IO_FAILURE_RESPONSE_VALID: { SCI_SSP_RESPONSE_IU_T * response_buffer; uint32_t sense_length; int error_code, sense_key, asc, ascq; struct ccb_scsiio *csio = &ccb->csio; response_buffer = (SCI_SSP_RESPONSE_IU_T *) scif_io_request_get_response_iu_address( isci_request->sci_object); sense_length = sci_ssp_get_sense_data_length( response_buffer->sense_data_length); sense_length = MIN(csio->sense_len, sense_length); memcpy(&csio->sense_data, response_buffer->data, sense_length); csio->sense_resid = csio->sense_len - sense_length; csio->scsi_status = response_buffer->status; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, &asc, &ascq ); isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->scsi_status, sense_key, asc, ascq); break; } case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: isci_remote_device_reset(isci_remote_device, NULL); /* drop through */ case SCI_IO_FAILURE_TERMINATED: ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); break; case SCI_IO_FAILURE_INVALID_STATE: case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: ccb->ccb_h.status |= CAM_REQUEUE_REQ; isci_remote_device_freeze_lun_queue(isci_remote_device, ccb->ccb_h.target_lun); break; case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: ccb->ccb_h.status |= CAM_DEV_NOT_THERE; break; case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: { struct ccb_relsim ccb_relsim; struct cam_path *path; xpt_create_path(&path, NULL, cam_sim_path(isci_controller->sim), isci_remote_device->index, 0); xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; ccb_relsim.openings = scif_remote_device_get_max_queue_depth(remote_device); xpt_action((union ccb *)&ccb_relsim); xpt_free_path(path); ccb->ccb_h.status |= CAM_REQUEUE_REQ; } break; case SCI_IO_FAILURE: case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_IO_FAILURE_PROTOCOL_VIOLATION: case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: default: isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], completion_status); ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } if (ccb->ccb_h.status != CAM_REQ_CMP) { /* ccb will be completed with some type of non-success * status. So temporarily freeze the queue until the * upper layers can act on the status. The CAM_DEV_QFRZN * flag will then release the queue after the status is * acted upon. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } callout_stop(&isci_request->parent.timer); bus_dmamap_sync(isci_request->parent.dma_tag, isci_request->parent.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(isci_request->parent.dma_tag, isci_request->parent.dma_map); if (isci_remote_device->frozen_lun_mask != 0 && !(ccb->ccb_h.status & CAM_REQUEUE_REQ)) isci_remote_device_release_device_queue(isci_remote_device); xpt_done(ccb); isci_request->ccb = NULL; if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_request); }