/* * mrsas_bus_scan: Perform bus scan * input: Adapter instance soft state * * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should * not be called in FreeBSD 8.x and later versions, where the bus scan is * automatic. */ int mrsas_bus_scan(struct mrsas_softc *sc) { union ccb *ccb_0; union ccb *ccb_1; lockmgr(&sc->sim_lock, LK_EXCLUSIVE); if ((ccb_0 = xpt_alloc_ccb()) == NULL) { lockmgr(&sc->sim_lock, LK_RELEASE); return(ENOMEM); } if ((ccb_1 = xpt_alloc_ccb()) == NULL) { xpt_free_ccb(ccb_0); lockmgr(&sc->sim_lock, LK_RELEASE); return(ENOMEM); } if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){ xpt_free_ccb(ccb_0); xpt_free_ccb(ccb_1); lockmgr(&sc->sim_lock, LK_RELEASE); return(EIO); } if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){ xpt_free_ccb(ccb_0); xpt_free_ccb(ccb_1); lockmgr(&sc->sim_lock, LK_RELEASE); return(EIO); } xpt_setup_ccb(&ccb_0->ccb_h, ccb_0->ccb_h.path, 5/*priority (low)*/); ccb_0->ccb_h.func_code = XPT_SCAN_BUS; ccb_0->ccb_h.cbfcnp = mrsas_rescan_callback; ccb_0->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb_0); /* scan is now in progress */ xpt_setup_ccb(&ccb_1->ccb_h, ccb_1->ccb_h.path, 5/*priority (low)*/); ccb_1->ccb_h.func_code = XPT_SCAN_BUS; ccb_1->ccb_h.cbfcnp = mrsas_rescan_callback; ccb_1->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb_1); /* scan is now in progress */ lockmgr(&sc->sim_lock, LK_RELEASE); return(0); }
static void ptinit(void) { cam_status status; struct cam_path *path; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status == CAM_REQ_CMP) { struct ccb_setasync csa; xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = ptasync; csa.callback_arg = NULL; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; xpt_free_path(path); } if (status != CAM_REQ_CMP) { printf("pt: Failed to attach master async callback " "due to status 0x%x!\n", status); } }
int tws_bus_scan(struct tws_softc *sc) { struct cam_path *path; union ccb *ccb; TWS_TRACE_DEBUG(sc, "entry", sc, 0); KASSERT(sc->sim, ("sim not allocated")); KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0); ccb = sc->scan_ccb; if (xpt_create_path(&path, xpt_periph, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { /* lockmgr(&sc->sim_lock, LK_RELEASE); */ return(EIO); } xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_BUS; ccb->ccb_h.cbfcnp = tws_bus_scan_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); return(0); }
static void ahci_cam_rescan(struct ahci_port *ap) { struct cam_path *path; union ccb *ccb; int status; int i; if (ap->ap_flags & AP_F_SCAN_RUNNING) { ap->ap_flags |= AP_F_SCAN_REQUESTED; return; } ap->ap_flags |= AP_F_SCAN_RUNNING; for (i = 0; i < AHCI_MAX_PMPORTS; ++i) { ap->ap_ata[i]->at_features |= ATA_PORT_F_RESCAN; } status = xpt_create_path(&path, xpt_periph, cam_sim_path(ap->ap_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return; ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, path, 5); /* 5 = low priority */ ccb->ccb_h.func_code = XPT_ENG_EXEC; ccb->ccb_h.cbfcnp = ahci_cam_rescan_callback; ccb->ccb_h.sim_priv.entries[0].ptr = ap; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action_async(ccb); }
/* * Function name: tw_osli_request_bus_scan * Description: Requests CAM for a scan of the bus. * * Input: sc -- ptr to per ctlr structure * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_request_bus_scan(struct twa_softc *sc) { union ccb *ccb; tw_osli_dbg_dprintf(3, sc, "entering"); /* If we get here before sc->sim is initialized, return an error. */ if (!(sc->sim)) return(ENXIO); if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); lockmgr(sc->sim_lock, LK_EXCLUSIVE); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); lockmgr(sc->sim_lock, LK_RELEASE); return(EIO); } xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/); ccb->ccb_h.func_code = XPT_SCAN_BUS; ccb->ccb_h.cbfcnp = twa_bus_scan_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); lockmgr(sc->sim_lock, LK_RELEASE); return(0); }
static void ptoninvalidate(struct cam_periph *periph) { int s; struct pt_softc *softc; struct buf *q_bp; struct ccb_setasync csa; softc = (struct pt_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = ptasync; csa.callback_arg = periph; xpt_action((union ccb *)&csa); softc->flags |= PT_FLAG_DEVICE_INVALID; /* * Although the oninvalidate() routines are always called at * splsoftcam, we need to be at splbio() here to keep the buffer * queue from being modified while we traverse it. */ s = splbio(); /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ bufq_remove(&softc->buf_queue, q_bp); q_bp->b_resid = q_bp->b_bcount; q_bp->b_error = ENXIO; q_bp->b_flags |= B_ERROR; biodone(q_bp); } splx(s); xpt_print_path(periph->path); printf("lost device\n"); }
static int ic_scan(isc_session_t *sp) { union ccb *ccb; debug_called(8); sdebug(2, "scanning sid=%d", sp->sid); if((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO)) == NULL) { xdebug("scan failed (can't allocate CCB)"); return ENOMEM; // XXX } sp->flags &= ~ISC_CAMDEVS; sp->flags |= ISC_SCANWAIT; CAM_LOCK(sp); if(xpt_create_path(&sp->cam_path, NULL, cam_sim_path(sp->cam_sim), 0, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xdebug("can't create cam path"); CAM_UNLOCK(sp); free(ccb, M_TEMP); return ENODEV; // XXX } xpt_setup_ccb(&ccb->ccb_h, sp->cam_path, 5/*priority (low)*/); ccb->ccb_h.func_code = XPT_SCAN_BUS; ccb->ccb_h.cbfcnp = scan_callback; ccb->crcn.flags = CAM_FLAG_NONE; ccb->ccb_h.spriv_ptr0 = sp; xpt_action(ccb); CAM_UNLOCK(sp); while(sp->flags & ISC_SCANWAIT) tsleep(sp, PRIBIO, "ffp", 5*hz); // the timeout time should // be configurable sdebug(2, "# of luns=%d", sp->target_nluns); if(sp->target_nluns > 0) { sp->flags |= ISC_CAMDEVS; return 0; } return ENODEV; }
/* * Function name: twa_request_bus_scan * Description: Requests CAM for a scan of the bus. * * Input: sc -- ptr to per ctlr structure * Output: None * Return value: None */ void twa_request_bus_scan(struct twa_softc *sc) { struct cam_path *path; union ccb *ccb; if ((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK)) == NULL) return; bzero(ccb, sizeof(union ccb)); if (xpt_create_path(&path, xpt_periph, cam_sim_path(sc->twa_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) return; xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_BUS; ccb->ccb_h.cbfcnp = twa_bus_scan_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); }
static void ahci_xpt_rescan(struct ahci_port *ap) { struct cam_path *path; union ccb *ccb; int status; status = xpt_create_path(&path, xpt_periph, cam_sim_path(ap->ap_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return; ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, path, 5); /* 5 = low priority */ ccb->ccb_h.func_code = XPT_SCAN_BUS; ccb->ccb_h.cbfcnp = ahci_cam_rescan_callback; ccb->ccb_h.sim_priv.entries[0].ptr = ap; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action_async(ccb); }
static void vpo_cam_rescan(struct vpo_data *vpo) { struct cam_path *path; union ccb *ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO); if (xpt_create_path(&path, xpt_periph, cam_sim_path(vpo->sim), 0, 0) != CAM_REQ_CMP) { /* A failure is benign as the user can do a manual rescan */ return; } xpt_setup_ccb(&ccb->ccb_h, path, 5/*priority (low)*/); ccb->ccb_h.func_code = XPT_SCAN_BUS; ccb->ccb_h.cbfcnp = vpo_cam_rescan_callback; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); /* The scan is in progress now. */ }
int cfcs_init(void) { struct cfcs_softc *softc; struct ccb_setasync csa; struct ctl_frontend *fe; #ifdef NEEDTOPORT char wwnn[8]; #endif int retval; /* Don't continue if CTL is disabled */ if (ctl_disable != 0) return (0); softc = &cfcs_softc; retval = 0; bzero(softc, sizeof(*softc)); sprintf(softc->lock_desc, "ctl2cam"); mtx_init(&softc->lock, softc->lock_desc, NULL, MTX_DEF); fe = &softc->fe; fe->port_type = CTL_PORT_INTERNAL; /* XXX KDM what should the real number be here? */ fe->num_requested_ctl_io = 4096; snprintf(softc->port_name, sizeof(softc->port_name), "ctl2cam"); fe->port_name = softc->port_name; fe->port_online = cfcs_online; fe->port_offline = cfcs_offline; fe->onoff_arg = softc; fe->targ_enable = cfcs_targ_enable; fe->targ_disable = cfcs_targ_disable; fe->lun_enable = cfcs_lun_enable; fe->lun_disable = cfcs_lun_disable; fe->targ_lun_arg = softc; fe->fe_datamove = cfcs_datamove; fe->fe_done = cfcs_done; /* XXX KDM what should we report here? */ /* XXX These should probably be fetched from CTL. */ fe->max_targets = 1; fe->max_target_id = 15; retval = ctl_frontend_register(fe, /*master_SC*/ 1); if (retval != 0) { printf("%s: ctl_frontend_register() failed with error %d!\n", __func__, retval); mtx_destroy(&softc->lock); return (1); } /* * Get the WWNN out of the database, and create a WWPN as well. */ #ifdef NEEDTOPORT ddb_GetWWNN((char *)wwnn); softc->wwnn = be64dec(wwnn); softc->wwpn = softc->wwnn + (softc->fe.targ_port & 0xff); #endif /* * If the CTL frontend didn't tell us what our WWNN/WWPN is, go * ahead and set something random. */ if (fe->wwnn == 0) { uint64_t random_bits; arc4rand(&random_bits, sizeof(random_bits), 0); softc->wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000000000000000ULL | /* NL-Port */ 0x0300; softc->wwpn = softc->wwnn + fe->targ_port + 1; fe->wwnn = softc->wwnn; fe->wwpn = softc->wwpn; } else { softc->wwnn = fe->wwnn; softc->wwpn = fe->wwpn; } mtx_lock(&softc->lock); softc->devq = cam_simq_alloc(fe->num_requested_ctl_io); if (softc->devq == NULL) { printf("%s: error allocating devq\n", __func__); retval = ENOMEM; goto bailout; } softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name, softc, /*unit*/ 0, &softc->lock, 1, fe->num_requested_ctl_io, softc->devq); if (softc->sim == NULL) { printf("%s: error allocating SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) { printf("%s: error registering SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_create_path(&softc->path, /*periph*/NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: error creating path\n", __func__); xpt_bus_deregister(cam_sim_path(softc->sim)); retval = 1; goto bailout; } xpt_setup_ccb(&csa.ccb_h, softc->path, CAM_PRIORITY_NONE); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = cfcs_async; csa.callback_arg = softc->sim; xpt_action((union ccb *)&csa); mtx_unlock(&softc->lock); return (retval); bailout: if (softc->sim) cam_sim_free(softc->sim, /*free_devq*/ TRUE); else if (softc->devq) cam_simq_free(softc->devq); mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); return (retval); }
int cfcs_init(void) { struct cfcs_softc *softc; struct ccb_setasync csa; struct ctl_port *port; int retval; softc = &cfcs_softc; bzero(softc, sizeof(*softc)); mtx_init(&softc->lock, "ctl2cam", NULL, MTX_DEF); port = &softc->port; port->frontend = &cfcs_frontend; port->port_type = CTL_PORT_INTERNAL; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; snprintf(softc->port_name, sizeof(softc->port_name), "camsim"); port->port_name = softc->port_name; port->port_online = cfcs_online; port->port_offline = cfcs_offline; port->onoff_arg = softc; port->fe_datamove = cfcs_datamove; port->fe_done = cfcs_done; /* XXX KDM what should we report here? */ /* XXX These should probably be fetched from CTL. */ port->max_targets = 1; port->max_target_id = 15; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with error %d!\n", __func__, retval); mtx_destroy(&softc->lock); return (retval); } /* * If the CTL frontend didn't tell us what our WWNN/WWPN is, go * ahead and set something random. */ if (port->wwnn == 0) { uint64_t random_bits; arc4rand(&random_bits, sizeof(random_bits), 0); softc->wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000000000000000ULL | /* NL-Port */ 0x0300; softc->wwpn = softc->wwnn + port->targ_port + 1; ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn); } else { softc->wwnn = port->wwnn; softc->wwpn = port->wwpn; } mtx_lock(&softc->lock); softc->devq = cam_simq_alloc(port->num_requested_ctl_io); if (softc->devq == NULL) { printf("%s: error allocating devq\n", __func__); retval = ENOMEM; goto bailout; } softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name, softc, /*unit*/ 0, &softc->lock, 1, port->num_requested_ctl_io, softc->devq); if (softc->sim == NULL) { printf("%s: error allocating SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) { printf("%s: error registering SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_create_path(&softc->path, /*periph*/NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: error creating path\n", __func__); xpt_bus_deregister(cam_sim_path(softc->sim)); retval = EINVAL; goto bailout; } xpt_setup_ccb(&csa.ccb_h, softc->path, CAM_PRIORITY_NONE); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = cfcs_async; csa.callback_arg = softc->sim; xpt_action((union ccb *)&csa); mtx_unlock(&softc->lock); return (retval); bailout: if (softc->sim) cam_sim_free(softc->sim, /*free_devq*/ TRUE); else if (softc->devq) cam_simq_free(softc->devq); mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); return (retval); }
/* * Function name: twa_cam_setup * Description: Attaches the driver to CAM. * * Input: sc -- ptr to per ctlr structure * Output: None * Return value: 0 -- success * non-zero-- failure */ int twa_cam_setup(struct twa_softc *sc) { struct cam_devq *devq; struct ccb_setasync csa; twa_dbg_dprint(3, sc, "sc = %p", sc); /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(TWA_Q_LENGTH); if (devq == NULL) return(ENOMEM); /* * Create a SIM entry. Though we can support TWA_Q_LENGTH simultaneous * requests, we claim to be able to handle only (TWA_Q_LENGTH - 1), so * that we always have a request packet available to service attention * interrupts. */ twa_dbg_dprint(3, sc, "Calling cam_sim_alloc"); sc->twa_sim = cam_sim_alloc(twa_action, twa_poll, "twa", sc, device_get_unit(sc->twa_bus_dev), TWA_Q_LENGTH - 1, 1, devq); if (sc->twa_sim == NULL) { cam_simq_free(devq); return(ENOMEM); } /* * Register the bus. */ twa_dbg_dprint(3, sc, "Calling xpt_bus_register"); if (xpt_bus_register(sc->twa_sim, 0) != CAM_SUCCESS) { cam_sim_free(sc->twa_sim, TRUE); sc->twa_sim = NULL; /* so twa_cam_detach will not try to free it */ return(ENXIO); } twa_dbg_dprint(3, sc, "Calling xpt_create_path"); if (xpt_create_path(&sc->twa_path, NULL, cam_sim_path(sc->twa_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path (sc->twa_sim)); cam_sim_free(sc->twa_sim, TRUE); /* passing TRUE will free the devq as well */ return(ENXIO); } twa_dbg_dprint(3, sc, "Calling xpt_setup_ccb"); xpt_setup_ccb(&csa.ccb_h, sc->twa_path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE | AC_LOST_DEVICE; csa.callback = twa_async; csa.callback_arg = sc; xpt_action((union ccb *)&csa); twa_dbg_dprint(3, sc, "Calling twa_request_bus_scan"); /* * Request a bus scan, so that CAM gets to know of * the logical units that we control. */ twa_request_bus_scan(sc); twa_dbg_dprint(3, sc, "Exiting"); return(0); }
void isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) { struct ISCI_CONTROLLER *isci_controller; struct ISCI_REMOTE_DEVICE *isci_remote_device; union ccb *ccb; BOOL complete_ccb; complete_ccb = TRUE; isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); isci_remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); ccb = isci_request->ccb; ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (completion_status) { case SCI_IO_SUCCESS: case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: #if __FreeBSD_version >= 900026 if (ccb->ccb_h.func_code == XPT_SMP_IO) { void *smp_response = scif_io_request_get_response_iu_address( isci_request->sci_object); memcpy(ccb->smpio.smp_response, smp_response, ccb->smpio.smp_response_len); } #endif ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCI_IO_SUCCESS_IO_DONE_EARLY: ccb->ccb_h.status |= CAM_REQ_CMP; ccb->csio.resid = ccb->csio.dxfer_len - scif_io_request_get_number_of_bytes_transferred( isci_request->sci_object); break; case SCI_IO_FAILURE_RESPONSE_VALID: { SCI_SSP_RESPONSE_IU_T * response_buffer; uint32_t sense_length; int error_code, sense_key, asc, ascq; struct ccb_scsiio *csio = &ccb->csio; response_buffer = (SCI_SSP_RESPONSE_IU_T *) scif_io_request_get_response_iu_address( isci_request->sci_object); sense_length = sci_ssp_get_sense_data_length( response_buffer->sense_data_length); sense_length = MIN(csio->sense_len, sense_length); memcpy(&csio->sense_data, response_buffer->data, sense_length); csio->sense_resid = csio->sense_len - sense_length; csio->scsi_status = response_buffer->status; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, &asc, &ascq ); isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->scsi_status, sense_key, asc, ascq); break; } case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: isci_remote_device_reset(isci_remote_device, NULL); /* drop through */ case SCI_IO_FAILURE_TERMINATED: ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); break; case SCI_IO_FAILURE_INVALID_STATE: case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: complete_ccb = FALSE; break; case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: ccb->ccb_h.status |= CAM_DEV_NOT_THERE; break; case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: { struct ccb_relsim ccb_relsim; struct cam_path *path; xpt_create_path(&path, NULL, cam_sim_path(isci_controller->sim), isci_remote_device->index, 0); xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; ccb_relsim.openings = scif_remote_device_get_max_queue_depth(remote_device); xpt_action((union ccb *)&ccb_relsim); xpt_free_path(path); complete_ccb = FALSE; } break; case SCI_IO_FAILURE: case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_IO_FAILURE_PROTOCOL_VIOLATION: case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: default: isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], completion_status); ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } callout_stop(&isci_request->parent.timer); bus_dmamap_sync(isci_request->parent.dma_tag, isci_request->parent.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(isci_request->parent.dma_tag, isci_request->parent.dma_map); isci_request->ccb = NULL; sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_request); if (complete_ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* ccb will be completed with some type of non-success * status. So temporarily freeze the queue until the * upper layers can act on the status. The * CAM_DEV_QFRZN flag will then release the queue * after the status is acted upon. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); TAILQ_REMOVE(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; /* * This CCB that was in the queue was completed, so * set the in_progress pointer to NULL denoting that * we can retry another CCB from the queue. We only * allow one CCB at a time from the queue to be * in progress so that we can effectively maintain * ordering. */ isci_remote_device->queued_ccb_in_progress = NULL; } if (isci_remote_device->frozen_lun_mask != 0) { isci_remote_device_release_device_queue(isci_remote_device); } xpt_done(ccb); if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } } else { isci_remote_device_freeze_lun_queue(isci_remote_device, ccb->ccb_h.target_lun); if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); /* * Do nothing, CCB is already on the device's queue. * We leave it on the queue, to be retried again * next time a CCB on this device completes, or we * get a ready notification for this device. */ isci_log_message(1, "ISCI", "already queued %p %x\n", ccb, ccb->csio.cdb_io.cdb_bytes[0]); isci_remote_device->queued_ccb_in_progress = NULL; } else { isci_log_message(1, "ISCI", "queue %p %x\n", ccb, ccb->csio.cdb_io.cdb_bytes[0]); ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); } } }
/* * Attach all the sub-devices we can find */ int adv_attach(struct adv_softc *adv) { struct ccb_setasync *csa; int max_sg; /* * Allocate an array of ccb mapping structures. We put the * index of the ccb_info structure into the queue representing * a transaction and use it for mapping the queue to the * upper level SCSI transaction it represents. */ adv->ccb_infos = kmalloc(sizeof(*adv->ccb_infos) * adv->max_openings, M_DEVBUF, M_WAITOK); adv->init_level++; /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. * * The ASC boards use chains of "queues" (the transactional * resources on the board) to represent long S/G lists. * The first queue represents the command and holds a * single address and data pair. The queues that follow * can each hold ADV_SG_LIST_PER_Q entries. Given the * total number of queues, we can express the largest * transaction we can map. We reserve a few queues for * error recovery. Take those into account as well. * * There is a way to take an interrupt to download the * next batch of S/G entries if there are more than 255 * of them (the counter in the queue structure is a u_int8_t). * We don't use this feature, so limit the S/G list size * accordingly. */ max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q; if (max_sg > 255) max_sg = 255; /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXPHYS, /*nsegments*/max_sg, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/BUS_DMA_ALLOCNOW, &adv->buffer_dmat) != 0) { return (ENXIO); } adv->init_level++; /* DMA tag for our sense buffers */ if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, sizeof(struct scsi_sense_data)*adv->max_openings, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &adv->sense_dmat) != 0) { return (ENXIO); } adv->init_level++; /* Allocation for our sense buffers */ if (bus_dmamem_alloc(adv->sense_dmat, (void *)&adv->sense_buffers, BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { return (ENOMEM); } adv->init_level++; /* And permanently map them */ bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, adv->sense_buffers, sizeof(struct scsi_sense_data)*adv->max_openings, adv_map, &adv->sense_physbase, /*flags*/0); adv->init_level++; /* * Fire up the chip */ if (adv_start_chip(adv) != 1) { kprintf("adv%d: Unable to start on board processor. Aborting.\n", adv->unit); return (ENXIO); } /* * Construct our SIM entry. */ adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit, &sim_mplock, 1, adv->max_openings, NULL); if (adv->sim == NULL) return (ENOMEM); /* * Register the bus. * * XXX Twin Channel EISA Cards??? */ if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) { cam_sim_free(adv->sim); return (ENXIO); } if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(adv->sim)); cam_sim_free(adv->sim); return (ENXIO); } csa = &xpt_alloc_ccb()->csa; xpt_setup_ccb(&csa->ccb_h, adv->path, /*priority*/5); csa->ccb_h.func_code = XPT_SASYNC_CB; csa->event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; csa->callback = advasync; csa->callback_arg = adv; xpt_action((union ccb *)csa); xpt_free_ccb(&csa->ccb_h); return (0); }
static void pmpdone(struct cam_periph *periph, union ccb *done_ccb) { struct ccb_trans_settings cts; struct pmp_softc *softc; struct ccb_ataio *ataio; struct cam_path *dpath; u_int32_t priority, res; int i; softc = (struct pmp_softc *)periph->softc; ataio = &done_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("pmpdone\n")); priority = done_ccb->ccb_h.pinfo.priority; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, 0, NULL) == ERESTART) { return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } goto done; } if (softc->restart) { softc->restart = 0; xpt_release_ccb(done_ccb); softc->state = min(softc->state, PMP_STATE_PRECONFIG); xpt_schedule(periph, priority); return; } switch (softc->state) { case PMP_STATE_PORTS: softc->pm_ports = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; if (pmp_hide_special) { /* * This PMP declares 6 ports, while only 5 of them * are real. Port 5 is a SEMB port, probing which * causes timeouts if external SEP is not connected * to PMP over I2C. */ if (softc->pm_pid == 0x37261095 && softc->pm_ports == 6) softc->pm_ports = 5; /* * This PMP declares 7 ports, while only 5 of them * are real. Port 5 is a fake "Config Disk" with * 640 sectors size. Port 6 is a SEMB port. */ if (softc->pm_pid == 0x47261095 && softc->pm_ports == 7) softc->pm_ports = 5; /* * These PMPs have extra configuration port. */ if (softc->pm_pid == 0x57231095 || softc->pm_pid == 0x57331095 || softc->pm_pid == 0x57341095 || softc->pm_pid == 0x57441095) softc->pm_ports--; } printf("%s%d: %d fan-out ports\n", periph->periph_name, periph->unit_number, softc->pm_ports); softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PRECONFIG: softc->pm_step = 0; softc->state = PMP_STATE_RESET; softc->reset |= ~softc->found; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_RESET: softc->pm_step++; if (softc->pm_step >= softc->pm_ports) { softc->pm_step = 0; cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/5, /*getcount_only*/0); softc->state = PMP_STATE_CONNECT; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CONNECT: softc->pm_step++; if (softc->pm_step >= softc->pm_ports) { softc->pm_step = 0; softc->pm_try = 0; cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/10, /*getcount_only*/0); softc->state = PMP_STATE_CHECK; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CHECK: res = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; if (((res & 0xf0f) == 0x103 && (res & 0x0f0) != 0) || (res & 0x600) != 0) { if (bootverbose) { printf("%s%d: port %d status: %08x\n", periph->periph_name, periph->unit_number, softc->pm_step, res); } /* Report device speed if it is online. */ if ((res & 0xf0f) == 0x103 && xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), softc->pm_step, 0) == CAM_REQ_CMP) { bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, dpath, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.revision = (res & 0x0f0) >> 4; cts.xport_specific.sata.valid = CTS_SATA_VALID_REVISION; cts.xport_specific.sata.caps = softc->caps & (CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_DMAAA | CTS_SATA_CAPS_H_AN); cts.xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; xpt_action((union ccb *)&cts); xpt_free_path(dpath); } softc->found |= (1 << softc->pm_step); softc->pm_step++; } else { if (softc->pm_try < 10) {
static void pmpstart(struct cam_periph *periph, union ccb *start_ccb) { struct ccb_trans_settings cts; struct ccb_ataio *ataio; struct pmp_softc *softc; struct cam_path *dpath; int revision = 0; softc = (struct pmp_softc *)periph->softc; ataio = &start_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("pmpstart\n")); if (softc->restart) { softc->restart = 0; softc->state = min(softc->state, PMP_STATE_PRECONFIG); } /* Fetch user wanted device speed. */ if (softc->state == PMP_STATE_RESET || softc->state == PMP_STATE_CONNECT) { if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), softc->pm_step, 0) == CAM_REQ_CMP) { bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, dpath, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_REVISION) revision = cts.xport_specific.sata.revision; xpt_free_path(dpath); } } switch (softc->state) { case PMP_STATE_PORTS: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_read_cmd(ataio, 2, 15); break; case PMP_STATE_PRECONFIG: /* Get/update host SATA capabilities. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) softc->caps = cts.xport_specific.sata.caps; cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 0x60, 15, 0x0); break; case PMP_STATE_RESET: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 2, softc->pm_step, (revision << 4) | ((softc->found & (1 << softc->pm_step)) ? 0 : 1)); break; case PMP_STATE_CONNECT: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 2, softc->pm_step, (revision << 4)); break; case PMP_STATE_CHECK: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_read_cmd(ataio, 0, softc->pm_step); break; case PMP_STATE_CLEAR: softc->reset = 0; cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 1, softc->pm_step, 0xFFFFFFFF); break; case PMP_STATE_CONFIG: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 0x60, 15, 0x07 | ((softc->caps & CTS_SATA_CAPS_H_AN) ? 0x08 : 0)); break; default: break; } xpt_action(start_ccb); }
static void ahbprocesserror(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb) { struct hardware_ecb *hecb; struct ecb_status *status; hecb = &ecb->hecb; status = &ecb->status; switch (status->ha_status) { case HS_OK: ccb->csio.scsi_status = status->scsi_status; if (status->scsi_status != 0) { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; if (status->sense_stored) { ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.sense_resid = ccb->csio.sense_len - status->sense_len; bcopy(&ecb->sense, &ccb->csio.sense_data, status->sense_len); } } break; case HS_TARGET_NOT_ASSIGNED: ccb->ccb_h.status = CAM_PATH_INVALID; break; case HS_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case HS_DATA_RUN_ERR: ahbcalcresid(ahb, ecb, ccb); break; case HS_UNEXPECTED_BUSFREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case HS_INVALID_PHASE: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case HS_REQUEST_SENSE_FAILED: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case HS_TAG_MSG_REJECTED: { struct ccb_trans_settings neg; struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi; xpt_print_path(ccb->ccb_h.path); printf("refuses tagged commands. Performing " "non-tagged I/O\n"); memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; scsi->flags = CTS_SCSI_VALID_TQ; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); ahb->tags_permitted &= ~(0x01 << ccb->ccb_h.target_id); ccb->ccb_h.status = CAM_MSG_REJECT_REC; break; } case HS_FIRMWARE_LOAD_REQ: case HS_HARDWARE_ERR: /* * Tell the system that the Adapter * is no longer functional. */ ccb->ccb_h.status = CAM_NO_HBA; break; case HS_CMD_ABORTED_HOST: case HS_CMD_ABORTED_ADAPTER: case HS_ATN_TARGET_FAILED: case HS_SCSI_RESET_ADAPTER: case HS_SCSI_RESET_INCOMING: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case HS_INVALID_ECB_PARAM: printf("ahb%ld: opcode 0x%02x, flag_word1 0x%02x, flag_word2 0x%02x\n", ahb->unit, hecb->opcode, hecb->flag_word1, hecb->flag_word2); ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case HS_DUP_TCB_RECEIVED: case HS_INVALID_OPCODE: case HS_INVALID_CMD_LINK: case HS_PROGRAM_CKSUM_ERROR: panic("ahb%ld: Can't happen host status %x occurred", ahb->unit, status->ha_status); break; } if (ccb->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } }
static cam_status ptctor(struct cam_periph *periph, void *arg) { struct pt_softc *softc; struct ccb_setasync csa; struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (periph == NULL) { printf("ptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } if (cgd == NULL) { printf("ptregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); LIST_INIT(&softc->pending_ccbs); softc->state = PT_STATE_NORMAL; bioq_init(&softc->bio_queue); softc->io_timeout = SCSI_PT_DEFAULT_TIMEOUT * 1000; periph->softc = softc; devstat_add_entry(&softc->device_stats, "pt", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE, SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI, DEVSTAT_PRIORITY_OTHER); softc->dev = make_dev(&pt_cdevsw, periph->unit_number, UID_ROOT, GID_OPERATOR, 0600, "%s%d", periph->periph_name, periph->unit_number); softc->dev->si_drv1 = periph; /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; csa.callback = ptasync; csa.callback_arg = periph; xpt_action((union ccb *)&csa); /* Tell the user we've attached to the device */ xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); }
void isp_attach(struct ispsoftc *isp) { int primary, secondary; struct ccb_setasync csa; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; /* * Establish (in case of 12X0) which bus is the primary. */ primary = 0; secondary = 1; /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(MAXISPREQUEST); if (devq == NULL) { return; } /* * Construct our SIM entry. */ sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, isp->isp_unit, 1, MAXISPREQUEST, devq); if (sim == NULL) { cam_simq_free(devq); return; } if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { cam_sim_free(sim, TRUE); return; } if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, TRUE); return; } xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = isp_cam_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); isp->isp_sim = sim; isp->isp_path = path; /* * If we have a second channel, construct SIM entry for that. */ if (IS_12X0(isp)) { sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, isp->isp_unit, 1, MAXISPREQUEST, devq); if (sim == NULL) { xpt_bus_deregister(cam_sim_path(isp->isp_sim)); xpt_free_path(isp->isp_path); cam_simq_free(devq); return; } if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { xpt_bus_deregister(cam_sim_path(isp->isp_sim)); xpt_free_path(isp->isp_path); cam_sim_free(sim, TRUE); return; } if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(isp->isp_sim)); xpt_free_path(isp->isp_path); xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, TRUE); return; } xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = isp_cam_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); isp->isp_sim2 = sim; isp->isp_path2 = path; } if (isp->isp_state == ISP_INITSTATE) isp->isp_state = ISP_RUNSTATE; }
void isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) { struct ISCI_CONTROLLER *isci_controller; struct ISCI_REMOTE_DEVICE *isci_remote_device; union ccb *ccb; isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); isci_remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); ccb = isci_request->ccb; ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (completion_status) { case SCI_IO_SUCCESS: case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: #if __FreeBSD_version >= 900026 if (ccb->ccb_h.func_code == XPT_SMP_IO) { void *smp_response = scif_io_request_get_response_iu_address( isci_request->sci_object); memcpy(ccb->smpio.smp_response, smp_response, ccb->smpio.smp_response_len); } #endif ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCI_IO_SUCCESS_IO_DONE_EARLY: ccb->ccb_h.status |= CAM_REQ_CMP; ccb->csio.resid = ccb->csio.dxfer_len - scif_io_request_get_number_of_bytes_transferred( isci_request->sci_object); break; case SCI_IO_FAILURE_RESPONSE_VALID: { SCI_SSP_RESPONSE_IU_T * response_buffer; uint32_t sense_length; int error_code, sense_key, asc, ascq; struct ccb_scsiio *csio = &ccb->csio; response_buffer = (SCI_SSP_RESPONSE_IU_T *) scif_io_request_get_response_iu_address( isci_request->sci_object); sense_length = sci_ssp_get_sense_data_length( response_buffer->sense_data_length); sense_length = MIN(csio->sense_len, sense_length); memcpy(&csio->sense_data, response_buffer->data, sense_length); csio->sense_resid = csio->sense_len - sense_length; csio->scsi_status = response_buffer->status; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, &asc, &ascq ); isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->scsi_status, sense_key, asc, ascq); break; } case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: isci_remote_device_reset(isci_remote_device, NULL); /* drop through */ case SCI_IO_FAILURE_TERMINATED: ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); break; case SCI_IO_FAILURE_INVALID_STATE: case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: ccb->ccb_h.status |= CAM_REQUEUE_REQ; isci_remote_device_freeze_lun_queue(isci_remote_device, ccb->ccb_h.target_lun); break; case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: ccb->ccb_h.status |= CAM_DEV_NOT_THERE; break; case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: { struct ccb_relsim ccb_relsim; struct cam_path *path; xpt_create_path(&path, NULL, cam_sim_path(isci_controller->sim), isci_remote_device->index, 0); xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; ccb_relsim.openings = scif_remote_device_get_max_queue_depth(remote_device); xpt_action((union ccb *)&ccb_relsim); xpt_free_path(path); ccb->ccb_h.status |= CAM_REQUEUE_REQ; } break; case SCI_IO_FAILURE: case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_IO_FAILURE_PROTOCOL_VIOLATION: case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: default: isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], completion_status); ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } if (ccb->ccb_h.status != CAM_REQ_CMP) { /* ccb will be completed with some type of non-success * status. So temporarily freeze the queue until the * upper layers can act on the status. The CAM_DEV_QFRZN * flag will then release the queue after the status is * acted upon. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } callout_stop(&isci_request->parent.timer); bus_dmamap_sync(isci_request->parent.dma_tag, isci_request->parent.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(isci_request->parent.dma_tag, isci_request->parent.dma_map); if (isci_remote_device->frozen_lun_mask != 0 && !(ccb->ccb_h.status & CAM_REQUEUE_REQ)) isci_remote_device_release_device_queue(isci_remote_device); xpt_done(ccb); isci_request->ccb = NULL; if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_request); }