static void wds_action(struct cam_sim * sim, union ccb * ccb) { int unit = cam_sim_unit(sim); int s; DBG(DBX "wds%d: action 0x%x\n", unit, ccb->ccb_h.func_code); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: s = splcam(); DBG(DBX "wds%d: SCSI IO entered\n", unit); wds_scsi_io(sim, &ccb->csio); DBG(DBX "wds%d: SCSI IO returned\n", unit); splx(s); break; case XPT_RESET_BUS: /* how to do it right ? */ printf("wds%d: reset\n", unit); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_ABORT: ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); ccg->heads = 64; ccg->secs_per_track = 16; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; /* nothing fancy */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = WDS_HBA_ID; cpi->hba_misc = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "WD/FDC", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
static void ahci_em_emulate_ses_on_led(device_t dev, union ccb *ccb) { struct ahci_enclosure *enc; struct ses_status_page *page; struct ses_status_array_dev_slot *ads, *ads0; struct ses_elm_desc_hdr *elmd; uint8_t *buf; int i; enc = device_get_softc(dev); buf = ccb->ataio.data_ptr; /* General request validation. */ if (ccb->ataio.cmd.command != ATA_SEP_ATTN || ccb->ataio.dxfer_len < ccb->ataio.cmd.sector_count * 4) { ccb->ccb_h.status = CAM_REQ_INVALID; goto out; } /* SEMB IDENTIFY */ if (ccb->ataio.cmd.features == 0xEC && ccb->ataio.cmd.sector_count >= 16) { bzero(buf, ccb->ataio.dxfer_len); buf[0] = 64; /* Valid bytes. */ buf[2] = 0x30; /* NAA Locally Assigned. */ strncpy(&buf[3], device_get_nameunit(dev), 7); strncpy(&buf[10], "AHCI ", SID_VENDOR_SIZE); strncpy(&buf[18], "SGPIO Enclosure ", SID_PRODUCT_SIZE); strncpy(&buf[34], "1.00", SID_REVISION_SIZE); strncpy(&buf[39], "0001", 4); strncpy(&buf[43], "S-E-S ", 6); strncpy(&buf[49], "2.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB RECEIVE DIAGNOSTIC RESULT (0) */ page = (struct ses_status_page *)buf; if (ccb->ataio.cmd.lba_low == 0x02 && ccb->ataio.cmd.features == 0x00 && ccb->ataio.cmd.sector_count >= 2) { bzero(buf, ccb->ataio.dxfer_len); page->hdr.page_code = 0; scsi_ulto2b(3, page->hdr.length); buf[4] = 0; buf[5] = 1; buf[6] = 2; ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB RECEIVE DIAGNOSTIC RESULT (1) */ if (ccb->ataio.cmd.lba_low == 0x02 && ccb->ataio.cmd.features == 0x01 && ccb->ataio.cmd.sector_count >= 13) { struct ses_enc_desc *ed; struct ses_elm_type_desc *td; bzero(buf, ccb->ataio.dxfer_len); page->hdr.page_code = 0x01; scsi_ulto2b(4 + 4 + 36 + 4, page->hdr.length); ed = (struct ses_enc_desc *)&buf[8]; ed->byte0 = 0x11; ed->subenc_id = 0; ed->num_types = 1; ed->length = 36; strncpy(ed->vendor_id, "AHCI ", SID_VENDOR_SIZE); strncpy(ed->product_id, "SGPIO Enclosure ", SID_PRODUCT_SIZE); strncpy(ed->product_rev, " ", SID_REVISION_SIZE); td = (struct ses_elm_type_desc *)ses_enc_desc_next(ed); td->etype_elm_type = 0x17; td->etype_maxelt = enc->channels; td->etype_subenc = 0; td->etype_txt_len = 0; ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB RECEIVE DIAGNOSTIC RESULT (2) */ if (ccb->ataio.cmd.lba_low == 0x02 && ccb->ataio.cmd.features == 0x02 && ccb->ataio.cmd.sector_count >= (3 + enc->channels)) { bzero(buf, ccb->ataio.dxfer_len); page->hdr.page_code = 0x02; scsi_ulto2b(4 + 4 * (1 + enc->channels), page->hdr.length); for (i = 0; i < enc->channels; i++) { ads = &page->elements[i + 1].array_dev_slot; memcpy(ads, enc->status[i], 4); ads->common.bytes[0] |= (enc->ichannels & (1 << i)) ? SES_OBJSTAT_UNKNOWN : SES_OBJSTAT_NOTINSTALLED; } ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB SEND DIAGNOSTIC (2) */ if (ccb->ataio.cmd.lba_low == 0x82 && ccb->ataio.cmd.features == 0x02 && ccb->ataio.cmd.sector_count >= (3 + enc->channels)) { ads0 = &page->elements[0].array_dev_slot; for (i = 0; i < enc->channels; i++) { ads = &page->elements[i + 1].array_dev_slot; if (ads->common.bytes[0] & SESCTL_CSEL) { enc->status[i][0] = 0; enc->status[i][1] = ads->bytes[0] & 0x02; enc->status[i][2] = ads->bytes[1] & (0x80 | SESCTL_RQSID); enc->status[i][3] = ads->bytes[2] & SESCTL_RQSFLT; ahci_em_setleds(dev, i); } else if (ads0->common.bytes[0] & SESCTL_CSEL) { enc->status[i][0] = 0; enc->status[i][1] = ads0->bytes[0] & 0x02; enc->status[i][2] = ads0->bytes[1] & (0x80 | SESCTL_RQSID); enc->status[i][3] = ads0->bytes[2] & SESCTL_RQSFLT; ahci_em_setleds(dev, i); } } ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB RECEIVE DIAGNOSTIC RESULT (7) */ if (ccb->ataio.cmd.lba_low == 0x02 && ccb->ataio.cmd.features == 0x07 && ccb->ataio.cmd.sector_count >= (3 + 3 * enc->channels)) { bzero(buf, ccb->ataio.dxfer_len); page->hdr.page_code = 0x07; scsi_ulto2b(4 + 4 + 12 * enc->channels, page->hdr.length); for (i = 0; i < enc->channels; i++) { elmd = (struct ses_elm_desc_hdr *)&buf[8 + 4 + 12 * i]; scsi_ulto2b(8, elmd->length); snprintf((char *)(elmd + 1), 9, "SLOT %03d", i); } ccb->ccb_h.status = CAM_REQ_CMP; goto out; } ccb->ccb_h.status = CAM_REQ_INVALID; out: xpt_done(ccb); }
static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_lock_vbus(vbus_ext); hpt_scsi_io(vbus_ext, ccb); hpt_unlock_vbus(vbus_ext); return; case XPT_RESET_BUS: hpt_lock_vbus(vbus_ext); ldm_reset_vbus((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->maxio = HPT27XX_DFLTPHYS; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; }
static void ic_action(struct cam_sim *sim, union ccb *ccb) { isc_session_t *sp = cam_sim_softc(sim); struct ccb_hdr *ccb_h = &ccb->ccb_h; debug_called(8); ccb_h->spriv_ptr0 = sp; sdebug(4, "func_code=0x%x flags=0x%x status=0x%x target=%d lun=%d retry_count=%d timeout=%d", ccb_h->func_code, ccb->ccb_h.flags, ccb->ccb_h.status, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->ccb_h.retry_count, ccb_h->timeout); if(sp == NULL) { xdebug("sp == NULL! cannot happen"); return; } switch(ccb_h->func_code) { case XPT_PATH_INQ: _inq(sim, ccb); break; case XPT_RESET_BUS: // (can just be a stub that does nothing and completes) { struct ccb_pathinq *cpi = &ccb->cpi; debug(3, "XPT_RESET_BUS"); cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SCSI_IO: { struct ccb_scsiio* csio = &ccb->csio; debug(4, "XPT_SCSI_IO cmd=0x%x", csio->cdb_io.cdb_bytes[0]); if(sp == NULL) { ccb_h->status = CAM_REQ_INVALID; //CAM_NO_NEXUS; debug(4, "xpt_done.status=%d", ccb_h->status); break; } if(ccb_h->target_lun == CAM_LUN_WILDCARD) { debug(3, "target=%d: bad lun (-1)", ccb_h->target_id); ccb_h->status = CAM_LUN_INVALID; break; } if(_scsi_encap(sim, ccb) != 0) return; break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; debug(4, "sid=%d target=%d lun=%d XPT_CALC_GEOMETRY vsize=%jd bsize=%d", sp->sid, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccg->volume_size, ccg->block_size); if(ccg->block_size == 0 || (ccg->volume_size < ccg->block_size)) { // print error message ... /* XXX: what error is appropiate? */ break; } else { int lun, *off, boff; lun = ccb->ccb_h.target_lun; if(lun > ISCSI_MAX_LUNS) { // XXX: xdebug("lun %d > ISCSI_MAX_LUNS!\n", lun); lun %= ISCSI_MAX_LUNS; } off = &sp->target_lun[lun / (sizeof(int)*8)]; boff = BIT(lun % (sizeof(int)*8)); debug(4, "sp->target_nluns=%d *off=%x boff=%x", sp->target_nluns, *off, boff); if((*off & boff) == 0) { sp->target_nluns++; *off |= boff; } cam_calc_geometry(ccg, /*extended*/1); } break; } case XPT_GET_TRAN_SETTINGS: default: ccb_h->status = CAM_REQ_INVALID; break; } #if __FreeBSD_version < 700000 XPT_DONE(sp, ccb); #else xpt_done(ccb); #endif return; }
static void ata_cam_end_transaction(device_t dev, struct ata_request *request) { struct ata_channel *ch = device_get_softc(dev); union ccb *ccb = request->ccb; int fatalerr = 0; if (ch->requestsense) { ata_cam_process_sense(dev, request); return; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (request->flags & ATA_R_TIMEOUT) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ; fatalerr = 1; } else if (request->status & ATA_S_ERROR) { if (ccb->ccb_h.func_code == XPT_ATA_IO) { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } else { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } } else if (request->result == ERESTART) ccb->ccb_h.status |= CAM_REQUEUE_REQ; else if (request->result != 0) ccb->ccb_h.status |= CAM_REQ_CMP_ERR; else ccb->ccb_h.status |= CAM_REQ_CMP; if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } if (ccb->ccb_h.func_code == XPT_ATA_IO && ((request->status & ATA_S_ERROR) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) { struct ata_res *res = &ccb->ataio.res; res->status = request->status; res->error = request->error; res->lba_low = request->u.ata.lba; res->lba_mid = request->u.ata.lba >> 8; res->lba_high = request->u.ata.lba >> 16; res->device = request->u.ata.lba >> 24; res->lba_low_exp = request->u.ata.lba >> 24; res->lba_mid_exp = request->u.ata.lba >> 32; res->lba_high_exp = request->u.ata.lba >> 40; res->sector_count = request->u.ata.count; res->sector_count_exp = request->u.ata.count >> 8; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if (ccb->ccb_h.func_code == XPT_ATA_IO) { ccb->ataio.resid = ccb->ataio.dxfer_len - request->donecount; } else { ccb->csio.resid = ccb->csio.dxfer_len - request->donecount; } } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) ata_cam_request_sense(dev, request); else xpt_done(ccb); /* Do error recovery if needed. */ if (fatalerr) ata_reinit(dev); }
/** * mrsas_action: SIM callback entry point * input: pointer to SIM * pointer to CAM Control Block * * This function processes CAM subsystem requests. The type of request is * stored in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary * because ccb->cpi.maxio is not supported for FreeBSD version 7.4 or * earlier. */ static void mrsas_action(struct cam_sim *sim, union ccb *ccb) { struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &(ccb->ccb_h); u_int32_t device_id; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { device_id = ccb_h->target_id; /* * bus 0 is LD, bus 1 is for system-PD */ if (cam_sim_bus(sim) == 1 && sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) { ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); } else { if (mrsas_startio(sc, sim, ccb)){ ccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(ccb); } } break; } case XPT_ABORT: { ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; } case XPT_RESET_BUS: { xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SPI; ccb->cts.transport_version = 2; ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; } case XPT_PATH_INQ: { ccb->cpi.version_num = 1; ccb->cpi.hba_inquiry = 0; ccb->cpi.target_sprt = 0; ccb->cpi.hba_misc = 0; ccb->cpi.hba_eng_cnt = 0; ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS; ccb->cpi.unit_number = cam_sim_unit(sim); ccb->cpi.bus_id = cam_sim_bus(sim); ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID; ccb->cpi.base_transfer_speed = 150000; strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); strncpy(ccb->cpi.hba_vid, "LSI", HBA_IDLEN); strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); ccb->cpi.transport = XPORT_SPI; ccb->cpi.transport_version = 2; ccb->cpi.protocol = PROTO_SCSI; ccb->cpi.protocol_version = SCSI_REV_2; if (ccb->cpi.bus_id == 0) ccb->cpi.max_target = MRSAS_MAX_LD-1; else ccb->cpi.max_target = MRSAS_MAX_PD-1; ccb->cpi.maxio = MRSAS_MAX_IO_SIZE; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } }
static void tws_action(struct cam_sim *sim, union ccb *ccb) { struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim); switch( ccb->ccb_h.func_code ) { case XPT_SCSI_IO: { if ( tws_execute_scsi(sc, ccb) ) TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0); break; } case XPT_ABORT: { TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0); ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; } case XPT_RESET_BUS: { TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb); break; } case XPT_SET_TRAN_SETTINGS: { TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb); #if (__FreeBSD_version >= 700000 ) ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SPI; ccb->cts.transport_version = 2; ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; #else ccb->cts.valid = (CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID); ccb->cts.flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); #endif ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb, ccb->ccg.block_size); cam_calc_geometry(&ccb->ccg, 1/* extended */); xpt_done(ccb); break; } case XPT_PATH_INQ: { TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb); ccb->cpi.version_num = 1; ccb->cpi.hba_inquiry = 0; ccb->cpi.target_sprt = 0; ccb->cpi.hba_misc = 0; ccb->cpi.hba_eng_cnt = 0; ccb->cpi.max_target = TWS_MAX_NUM_UNITS; ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1; ccb->cpi.unit_number = cam_sim_unit(sim); ccb->cpi.bus_id = cam_sim_bus(sim); ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID; ccb->cpi.base_transfer_speed = 6000000; strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); strncpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN); strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); #if (__FreeBSD_version >= 700000 ) ccb->cpi.transport = XPORT_SPI; ccb->cpi.transport_version = 2; ccb->cpi.protocol = PROTO_SCSI; ccb->cpi.protocol_version = SCSI_REV_2; ccb->cpi.maxio = TWS_MAX_IO_SIZE; #endif ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: TWS_TRACE_DEBUG(sc, "default", sim, ccb); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
static void mfip_cam_action(struct cam_sim *sim, union ccb *ccb) { struct mfip_softc *sc = cam_sim_softc(sim); struct mfi_softc *mfisc = sc->mfi_sc; mfi_lockassert(&mfisc->mfi_io_lock); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET|PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = MFI_SCSI_MAX_TARGETS; cpi->max_lun = MFI_SCSI_MAX_LUNS; cpi->initiator_id = MFI_SCSI_INITIATOR_ID; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_DEV: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings_sas *sas = &ccb->cts.xport_specific.sas; ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SAS; ccb->cts.transport_version = 0; sas->valid &= ~CTS_SAS_VALID_SPEED; sas->bitrate = 150000; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_SCSI_IO: { struct ccb_hdr *ccbh = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; ccbh->status = CAM_REQ_INPROG; if (csio->cdb_len > MFI_SCSI_MAX_CDB_LEN) { ccbh->status = CAM_REQ_INVALID; break; } if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if (ccbh->flags & CAM_DATA_PHYS) { ccbh->status = CAM_REQ_INVALID; break; } if (ccbh->flags & CAM_SCATTER_VALID) { ccbh->status = CAM_REQ_INVALID; break; } } ccbh->ccb_mfip_ptr = sc; TAILQ_INSERT_TAIL(&mfisc->mfi_cam_ccbq, ccbh, sim_links.tqe); mfi_startio(mfisc); return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; }
static void mfip_done(struct mfi_command *cm) { union ccb *ccb = cm->cm_private; struct ccb_hdr *ccbh = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; struct mfi_pass_frame *pt; pt = &cm->cm_frame->pass; switch (pt->header.cmd_status) { case MFI_STAT_OK: { uint8_t command, device; ccbh->status = CAM_REQ_CMP; csio->scsi_status = pt->header.scsi_status; if (ccbh->flags & CAM_CDB_POINTER) command = csio->cdb_io.cdb_ptr[0]; else command = csio->cdb_io.cdb_bytes[0]; if (command == INQUIRY) { device = csio->data_ptr[0] & 0x1f; if ((device == T_DIRECT) || (device == T_PROCESSOR)) csio->data_ptr[0] = (csio->data_ptr[0] & 0xe0) | T_NODEVICE; } break; } case MFI_STAT_SCSI_DONE_WITH_ERROR: { int sense_len; ccbh->status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; csio->scsi_status = pt->header.scsi_status; if (pt->header.sense_len < csio->sense_len) csio->sense_resid = csio->sense_len - pt->header.sense_len; else csio->sense_resid = 0; sense_len = min(pt->header.sense_len, sizeof(struct scsi_sense_data)); bzero(&csio->sense_data, sizeof(struct scsi_sense_data)); bcopy(&cm->cm_sense->data[0], &csio->sense_data, sense_len); break; } case MFI_STAT_DEVICE_NOT_FOUND: ccbh->status = CAM_SEL_TIMEOUT; break; case MFI_STAT_SCSI_IO_FAILED: ccbh->status = CAM_REQ_CMP_ERR; csio->scsi_status = pt->header.scsi_status; break; default: ccbh->status = CAM_REQ_CMP_ERR; csio->scsi_status = pt->header.scsi_status; break; } mfi_release_command(cm); xpt_done(ccb); }
void isci_task_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, SCI_TASK_REQUEST_HANDLE_T task_request, SCI_TASK_STATUS completion_status) { struct ISCI_TASK_REQUEST *isci_task_request = (struct ISCI_TASK_REQUEST *)sci_object_get_association(task_request); struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); struct ISCI_REMOTE_DEVICE *isci_remote_device = (struct ISCI_REMOTE_DEVICE *)sci_object_get_association(remote_device); struct ISCI_REMOTE_DEVICE *pending_remote_device; BOOL retry_task = FALSE; union ccb *ccb = isci_task_request->ccb; isci_remote_device->is_resetting = FALSE; switch ((int)completion_status) { case SCI_TASK_SUCCESS: case SCI_TASK_FAILURE_RESPONSE_VALID: break; case SCI_TASK_FAILURE_INVALID_STATE: retry_task = TRUE; isci_log_message(0, "ISCI", "task failure (invalid state) - retrying\n"); break; case SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES: retry_task = TRUE; isci_log_message(0, "ISCI", "task failure (insufficient resources) - retrying\n"); break; case SCI_FAILURE_TIMEOUT: if (isci_controller->fail_on_task_timeout) { retry_task = FALSE; isci_log_message(0, "ISCI", "task timeout - not retrying\n"); scif_cb_domain_device_removed(isci_controller, isci_remote_device->domain, isci_remote_device); } else { retry_task = TRUE; isci_log_message(0, "ISCI", "task timeout - retrying\n"); } break; case SCI_TASK_FAILURE: case SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_TASK_FAILURE_INVALID_TAG: case SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR: case SCI_TASK_FAILURE_TERMINATED: case SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE: isci_log_message(0, "ISCI", "unhandled task completion code 0x%x\n", completion_status); break; default: isci_log_message(0, "ISCI", "unhandled task completion code 0x%x\n", completion_status); break; } if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_task_request); /* Make sure we release the device queue, since it may have been frozen * if someone tried to start an I/O while the task was in progress. */ isci_remote_device_release_device_queue(isci_remote_device); if (retry_task == TRUE) isci_remote_device_reset(isci_remote_device, ccb); else { pending_remote_device = sci_fast_list_remove_head( &isci_controller->pending_device_reset_list); if (pending_remote_device != NULL) { /* Any resets that were triggered from an XPT_RESET_DEV * CCB are never put in the pending list if the request * pool is empty - they are given back to CAM to be * requeued. So we will alawys pass NULL here, * denoting that there is no CCB associated with the * device reset. */ isci_remote_device_reset(pending_remote_device, NULL); } else if (ccb != NULL) { /* There was a CCB associated with this reset, so mark * it complete and return it to CAM. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(ccb); } } }
void isci_action(struct cam_sim *sim, union ccb *ccb) { struct ISCI_CONTROLLER *controller = (struct ISCI_CONTROLLER *)cam_sim_softc(sim); switch ( ccb->ccb_h.func_code ) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; int bus = cam_sim_bus(sim); ccb->ccb_h.ccb_sim_ptr = sim; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1; cpi->max_lun = ISCI_MAX_LUN; #if __FreeBSD_version >= 800102 cpi->maxio = isci_io_request_get_max_io_size(); #endif cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = bus; cpi->initiator_id = SCI_MAX_REMOTE_DEVICES; cpi->base_transfer_speed = 300000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *general_settings = &ccb->cts; struct ccb_trans_settings_sas *sas_settings = &general_settings->xport_specific.sas; struct ccb_trans_settings_scsi *scsi_settings = &general_settings->proto_specific.scsi; struct ISCI_REMOTE_DEVICE *remote_device; remote_device = controller->remote_device[ccb->ccb_h.target_id]; if (remote_device == NULL) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); break; } general_settings->protocol = PROTO_SCSI; general_settings->transport = XPORT_SAS; general_settings->protocol_version = SCSI_REV_SPC2; general_settings->transport_version = 0; scsi_settings->valid = CTS_SCSI_VALID_TQ; scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; sas_settings->bitrate = isci_remote_device_get_bitrate(remote_device); if (sas_settings->bitrate != 0) sas_settings->valid = CTS_SAS_VALID_SPEED; xpt_done(ccb); } break; case XPT_SCSI_IO: isci_io_request_execute_scsi_io(ccb, controller); break; #if __FreeBSD_version >= 900026 case XPT_SMP_IO: isci_io_request_execute_smp_io(ccb, controller); break; #endif case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; case XPT_RESET_DEV: { struct ISCI_REMOTE_DEVICE *remote_device = controller->remote_device[ccb->ccb_h.target_id]; if (remote_device != NULL) isci_remote_device_reset(remote_device, ccb); else { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); } } break; case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; default: isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n", ccb->ccb_h.func_code); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(ccb); break; } }
static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error) { struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; struct cam_sim *sim; struct adv_softc *adv; struct adv_ccb_info *cinfo; struct adv_scsi_q scsiq; struct adv_sg_head sghead; csio = (struct ccb_scsiio *)arg; ccb_h = &csio->ccb_h; sim = xpt_path_sim(ccb_h->path); adv = (struct adv_softc *)cam_sim_softc(sim); cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); /* * Setup our done routine to release the simq on * the next ccb that completes. */ if ((adv->state & ADV_BUSDMA_BLOCK) != 0) adv->state |= ADV_BUSDMA_BLOCK_CLEARED; if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { /* XXX Need phystovirt!!!! */ /* How about pmap_kenter??? */ scsiq.cdbptr = csio->cdb_io.cdb_ptr; } else { scsiq.cdbptr = csio->cdb_io.cdb_ptr; } } else { scsiq.cdbptr = csio->cdb_io.cdb_bytes; } /* * Build up the request */ scsiq.q1.status = 0; scsiq.q1.q_no = 0; scsiq.q1.cntl = 0; scsiq.q1.sg_queue_cnt = 0; scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); scsiq.q1.target_lun = ccb_h->target_lun; scsiq.q1.sense_len = csio->sense_len; scsiq.q1.extra_bytes = 0; scsiq.q2.ccb_index = cinfo - adv->ccb_infos; scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, ccb_h->target_lun); scsiq.q2.flag = 0; scsiq.q2.cdb_len = csio->cdb_len; if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) scsiq.q2.tag_code = csio->tag_action; else scsiq.q2.tag_code = 0; scsiq.q2.vm_id = 0; if (nsegments != 0) { bus_dmasync_op_t op; scsiq.q1.data_addr = dm_segs->ds_addr; scsiq.q1.data_cnt = dm_segs->ds_len; if (nsegments > 1) { scsiq.q1.cntl |= QC_SG_HEAD; sghead.entry_cnt = sghead.entry_to_copy = nsegments; sghead.res = 0; sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); scsiq.sg_head = &sghead; } else { scsiq.sg_head = NULL; } if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); } else { scsiq.q1.data_addr = 0; scsiq.q1.data_cnt = 0; scsiq.sg_head = NULL; } /* * Last time we need to check if this SCB needs to * be aborted. */ if (ccb_h->status != CAM_REQ_INPROG) { if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); return; } if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { /* Temporary resource shortage */ adv_set_state(adv, ADV_RESOURCE_SHORTAGE); if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); csio->ccb_h.status = CAM_REQUEUE_REQ; adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); return; } cinfo->state |= ACCB_ACTIVE; ccb_h->status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); /* Schedule our timeout */ callout_reset(&cinfo->timer, ccb_h->timeout * hz /1000, adv_timeout, csio); }
static void adv_action(struct cam_sim *sim, union ccb *ccb) { struct adv_softc *adv; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); adv = (struct adv_softc *)cam_sim_softc(sim); mtx_assert(&adv->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ccb_hdr *ccb_h; struct ccb_scsiio *csio; struct adv_ccb_info *cinfo; int error; ccb_h = &ccb->ccb_h; csio = &ccb->csio; cinfo = adv_get_ccb_info(adv); if (cinfo == NULL) panic("XXX Handle CCB info error!!!"); ccb_h->ccb_cinfo_ptr = cinfo; cinfo->ccb = ccb; error = bus_dmamap_load_ccb(adv->buffer_dmat, cinfo->dmamap, ccb, adv_execute_ccb, csio, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller * queue until our mapping is returned. */ adv_set_state(adv, ADV_BUSDMA_BLOCK); } break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; target_bit_vector targ_mask; struct adv_transinfo *tconf; u_int update_type; cts = &ccb->cts; targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); update_type = 0; /* * The user must specify which type of settings he wishes * to change. */ if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; update_type |= ADV_TRANS_GOAL; } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].user; update_type |= ADV_TRANS_USER; } else { ccb->ccb_h.status = CAM_REQ_INVALID; break; } scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((update_type & ADV_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) adv->disc_enable |= targ_mask; else adv->disc_enable &= ~targ_mask; adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->cmd_qng_enabled |= targ_mask; else adv->cmd_qng_enabled &= ~targ_mask; } } if ((update_type & ADV_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_VALID_DISC) != 0) adv->user_disc_enable |= targ_mask; else adv->user_disc_enable &= ~targ_mask; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->user_cmd_qng_enabled |= targ_mask; else adv->user_cmd_qng_enabled &= ~targ_mask; } } /* * If the user specifies either the sync rate, or offset, * but not both, the unspecified parameter defaults to its * current value in transfer negotiations. */ if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { /* * If the user provided a sync rate but no offset, * use the current offset. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = tconf->offset; /* * If the user provided an offset but no sync rate, * use the current sync rate. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = tconf->period; adv_period_offset_to_sdtr(adv, &spi->sync_period, &spi->sync_offset, cts->ccb_h.target_id); adv_set_syncrate(adv, /*struct cam_path */NULL, cts->ccb_h.target_id, spi->sync_period, spi->sync_offset, update_type); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; struct adv_transinfo *tconf; target_bit_vector target_mask; cts = &ccb->cts; target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; if ((adv->disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } else { tconf = &adv->tinfo[cts->ccb_h.target_id].user; if ((adv->user_disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->user_cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tconf->period; spi->sync_offset = tconf->offset; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { int extended; extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; cam_calc_geometry(&ccb->ccg, extended); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { adv_stop_execution(adv); adv_reset_bus(adv, /*initiate_reset*/TRUE); adv_start_execution(adv); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = adv->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
void adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, u_int host_stat, u_int scsi_status, u_int q_no) { struct adv_ccb_info *cinfo; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; LIST_REMOVE(&ccb->ccb_h, sim_links.le); callout_stop(&cinfo->timer); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); } switch (done_stat) { case QD_NO_ERROR: if (host_stat == QHSTA_NO_ERROR) { ccb->ccb_h.status = CAM_REQ_CMP; break; } xpt_print_path(ccb->ccb_h.path); printf("adv_done - queue done without error, " "but host status non-zero(%x)\n", host_stat); /*FALLTHROUGH*/ case QD_WITH_ERROR: switch (host_stat) { case QHSTA_M_TARGET_STATUS_BUSY: case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: /* * Assume that if we were a tagged transaction * the target reported queue full. Otherwise, * report busy. The firmware really should just * pass the original status back up to us even * if it thinks the target was in error for * returning this status as no other transactions * from this initiator are in effect, but this * ignores multi-initiator setups and there is * evidence that the firmware gets its per-device * transaction counts screwed up occassionally. */ ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) scsi_status = SCSI_STATUS_QUEUE_FULL; else scsi_status = SCSI_STATUS_BUSY; adv_abort_ccb(adv, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, /*ccb*/NULL, CAM_REQUEUE_REQ, /*queued_only*/TRUE); /*FALLTHROUGH*/ case QHSTA_M_NO_AUTO_REQ_SENSE: case QHSTA_NO_ERROR: ccb->csio.scsi_status = scsi_status; switch (scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: ccb->ccb_h.status |= CAM_AUTOSNS_VALID; /* Structure copy */ ccb->csio.sense_data = adv->sense_buffers[q_no - 1]; /* FALLTHROUGH */ case SCSI_STATUS_BUSY: case SCSI_STATUS_RESERV_CONFLICT: case SCSI_STATUS_QUEUE_FULL: case SCSI_STATUS_COND_MET: case SCSI_STATUS_INTERMED: case SCSI_STATUS_INTERMED_COND_MET: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; case SCSI_STATUS_OK: ccb->ccb_h.status |= CAM_REQ_CMP; break; } break; case QHSTA_M_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case QHSTA_M_DATA_OVER_RUN: ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case QHSTA_M_UNEXPECTED_BUS_FREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case QHSTA_M_BAD_BUS_PHASE_SEQ: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_M_BAD_CMPL_STATUS_IN: /* No command complete after a status message */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: case QHSTA_M_WTM_TIMEOUT: case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: /* The SCSI bus hung in a phase */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; adv_reset_bus(adv, /*initiate_reset*/TRUE); break; case QHSTA_M_AUTO_REQ_SENSE_FAIL: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case QHSTA_D_QDONE_SG_LIST_CORRUPTED: case QHSTA_D_ASC_DVC_ERROR_CODE_SET: case QHSTA_D_HOST_ABORT_FAILED: case QHSTA_D_EXE_SCSI_Q_FAILED: case QHSTA_D_ASPI_NO_BUF_POOL: case QHSTA_M_BAD_TAG_CODE: case QHSTA_D_LRAM_CMP_ERROR: case QHSTA_M_MICRO_CODE_ERROR_HALT: default: panic("%s: Unhandled Host status error %x", device_get_nameunit(adv->dev), host_stat); /* NOTREACHED */ } break; case QD_ABORTED_BY_HOST: /* Don't clobber any, more explicit, error codes we've set */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) ccb->ccb_h.status = CAM_REQ_ABORTED; break; default: xpt_print_path(ccb->ccb_h.path); printf("adv_done - queue done with unknown status %x:%x\n", done_stat, host_stat); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } adv_clear_state(adv, ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } adv_free_ccb_info(adv, cinfo); /* * Null this out so that we catch driver bugs that cause a * ccb to be completed twice. */ ccb->ccb_h.ccb_cinfo_ptr = NULL; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); }
/* * Function name: tw_osl_complete_io * Description: Called to complete CAM scsi requests. * * Input: req_handle -- ptr to request handle * Output: None * Return value: None */ TW_VOID tw_osl_complete_io(struct tw_cl_req_handle *req_handle) { struct tw_osli_req_context *req = req_handle->osl_req_ctxt; struct tw_cl_req_packet *req_pkt = (struct tw_cl_req_packet *)(&req->req_pkt); struct tw_cl_scsi_req_packet *scsi_req; struct twa_softc *sc = req->ctlr; union ccb *ccb = (union ccb *)(req->orig_req); tw_osli_dbg_dprintf(10, sc, "entering"); if (req->state != TW_OSLI_REQ_STATE_BUSY) tw_osli_printf(sc, "request = %p, status = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x210A, "Unposted command completed!!", req, req->state); /* * Remove request from the busy queue. Just mark it complete. * There's no need to move it into the complete queue as we are * going to be done with it right now. */ req->state = TW_OSLI_REQ_STATE_COMPLETE; tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q); tw_osli_unmap_request(req); req->deadline = 0; if (req->error_code) { /* This request never got submitted to the firmware. */ if (req->error_code == EBUSY) { /* * Cmd queue is full, or the Common Layer is out of * resources. The simq will already have been frozen. * When this ccb gets completed will unfreeze the simq. */ ccb->ccb_h.status |= CAM_REQUEUE_REQ; } else if (req->error_code == EFBIG) ccb->ccb_h.status = CAM_REQ_TOO_BIG; else ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { scsi_req = &(req_pkt->gen_req_pkt.scsi_req); if (req_pkt->status == TW_CL_ERR_REQ_SUCCESS) ccb->ccb_h.status = CAM_REQ_CMP; else { if (req_pkt->status & TW_CL_ERR_REQ_INVALID_TARGET) ccb->ccb_h.status |= CAM_SEL_TIMEOUT; else if (req_pkt->status & TW_CL_ERR_REQ_INVALID_LUN) ccb->ccb_h.status |= CAM_DEV_NOT_THERE; else if (req_pkt->status & TW_CL_ERR_REQ_SCSI_ERROR) ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; else if (req_pkt->status & TW_CL_ERR_REQ_BUS_RESET) ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_SCSI_BUS_RESET); /* * If none of the above errors occurred, simply * mark completion error. */ if (ccb->ccb_h.status == 0) ccb->ccb_h.status = CAM_REQ_CMP_ERR; if (req_pkt->status & TW_CL_ERR_REQ_AUTO_SENSE_VALID) { ccb->csio.sense_len = scsi_req->sense_len; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } } ccb->csio.scsi_status = scsi_req->scsi_status; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; lockmgr(sc->sim_lock, LK_EXCLUSIVE); xpt_done(ccb); lockmgr(sc->sim_lock, LK_RELEASE); if (! req->error_code) /* twa_action will free the request otherwise */ tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); }
static void vpo_action(struct cam_sim *sim, union ccb *ccb) { struct vpo_data *vpo = (struct vpo_data *)sim->softc; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; csio = &ccb->csio; #ifdef VP0_DEBUG printf("vpo%d: XPT_SCSI_IO (0x%x) request\n", vpo->vpo_unit, csio->cdb_io.cdb_bytes[0]); #endif vpo_intr(vpo, csio); xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; #ifdef VP0_DEBUG printf("vpo%d: XPT_CALC_GEOMETRY (bs=%d,vs=%d,c=%d,h=%d,spt=%d) request\n", vpo->vpo_unit, ccg->block_size, ccg->volume_size, ccg->cylinders, ccg->heads, ccg->secs_per_track); #endif ccg->heads = 64; ccg->secs_per_track = 32; ccg->cylinders = ccg->volume_size / (ccg->heads * ccg->secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { #ifdef VP0_DEBUG printf("vpo%d: XPT_RESET_BUS request\n", vpo->vpo_unit); #endif if (vpo->vpo_isplus) { if (imm_reset_bus(&vpo->vpo_io)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); return; } } else { if (vpoio_reset_bus(&vpo->vpo_io)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); return; } } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; #ifdef VP0_DEBUG printf("vpo%d: XPT_PATH_INQ request\n", vpo->vpo_unit); #endif cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 0; cpi->initiator_id = VP0_INITIATOR; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 93; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Iomega", HBA_IDLEN); strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } return; }
void cfcs_action(struct cam_sim *sim, union ccb *ccb) { struct cfcs_softc *softc; int err; softc = (struct cfcs_softc *)cam_sim_softc(sim); mtx_assert(&softc->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { union ctl_io *io; struct ccb_scsiio *csio; csio = &ccb->csio; /* * Catch CCB flags, like physical address flags, that * indicate situations we currently can't handle. */ if (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) { ccb->ccb_h.status = CAM_REQ_INVALID; printf("%s: bad CCB flags %#x (all flags %#x)\n", __func__, ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS, ccb->ccb_h.flags); xpt_done(ccb); return; } /* * If we aren't online, there are no devices to see. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { printf("%s: can't allocate ctl_io\n", __func__); ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down via the XPT_RESET_BUS/LUN CCBs below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid.id = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; /* * XXX KDM how do we handle target IDs? */ io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id; io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun; /* * This tag scheme isn't the best, since we could in theory * have a very long-lived I/O and tag collision, especially * in a high I/O environment. But it should work well * enough for now. Since we're using unsigned ints, * they'll just wrap around. */ io->scsiio.tag_num = softc->cur_tag_num++; csio->tag_id = io->scsiio.tag_num; switch (csio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, csio->tag_action); break; } if (csio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, csio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(csio->cdb_io.cdb_bytes, io->scsiio.cdb, io->scsiio.cdb_len); ccb->ccb_h.status |= CAM_SIM_QUEUED; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s: func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } break; } case XPT_ABORT: { union ctl_io *io; union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; if (abort_ccb->ccb_h.func_code != XPT_SCSI_IO) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); } /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid.id = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id; io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun; io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = abort_ccb->csio.tag_id; switch (abort_ccb->csio.tag_action) { case CAM_TAG_ACTION_NONE: io->taskio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->taskio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->taskio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->taskio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->taskio.tag_type = CTL_TAG_ACA; break; default: io->taskio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, abort_ccb->csio.tag_action); break; } err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_fc *fc; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 800000; fc->wwnn = softc->wwnn; fc->wwpn = softc->wwpn; fc->port = softc->port.targ_port; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: /* XXX KDM should we actually do something here? */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_BUS: case XPT_RESET_DEV: { union ctl_io *io; /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ if (ccb->ccb_h.func_code == XPT_RESET_DEV) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid.id = 0; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id; io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun; if (ccb->ccb_h.func_code == XPT_RESET_BUS) io->taskio.task_action = CTL_TASK_BUS_RESET; else io->taskio.task_action = CTL_TASK_LUN_RESET; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 0; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 1; cpi->max_lun = 1024; /* Do we really have a limit? */ cpi->maxio = 1024 * 1024; cpi->async_flags = 0; cpi->hpath_id = 0; cpi->initiator_id = 0; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = 0; cpi->bus_id = 0; cpi->base_transfer_speed = 800000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; /* * Pretend to be Fibre Channel. */ cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = softc->wwnn; cpi->xport_specific.fc.wwpn = softc->wwpn; cpi->xport_specific.fc.port = softc->port.targ_port; cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_PROVIDE_FAIL; printf("%s: unsupported CCB type %#x\n", __func__, ccb->ccb_h.func_code); xpt_done(ccb); break; } }
/* * Action function - dispatch command */ static void sili_xpt_action(struct cam_sim *sim, union ccb *ccb) { struct sili_port *ap; struct ata_port *at, *atx; struct ccb_hdr *ccbh; int unit; /* XXX lock */ ap = cam_sim_softc(sim); at = ap->ap_ata; atx = NULL; KKASSERT(ap != NULL); ccbh = &ccb->ccb_h; unit = cam_sim_unit(sim); /* * Early failure checks. These checks do not apply to XPT_PATH_INQ, * otherwise the bus rescan will not remove the dead devices when * unplugging a PM. * * For non-wildcards we have one target (0) and one lun (0), * unless we have a port multiplier. * * A wildcard target indicates only the general bus is being * probed. * * Calculate at and atx. at is always non-NULL. atx is only * NULL for direct-attached devices. It will be non-NULL for * devices behind a port multiplier. * * XXX What do we do with a LUN wildcard? */ if (ccbh->target_id != CAM_TARGET_WILDCARD && ccbh->func_code != XPT_PATH_INQ) { if (ap->ap_type == ATA_PORT_T_NONE) { ccbh->status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } if (ccbh->target_id < 0 || ccbh->target_id >= ap->ap_pmcount) { ccbh->status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } at += ccbh->target_id; if (ap->ap_type == ATA_PORT_T_PM) atx = at; if (ccbh->target_lun != CAM_LUN_WILDCARD && ccbh->target_lun) { ccbh->status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } } /* * Switch on the meta XPT command */ switch(ccbh->func_code) { case XPT_ENG_EXEC: /* * This routine is called after a port multiplier has been * probed. */ ccbh->status = CAM_REQ_CMP; sili_os_lock_port(ap); sili_port_state_machine(ap, 0); sili_os_unlock_port(ap); xpt_done(ccb); sili_xpt_rescan(ap); break; case XPT_PATH_INQ: /* * This command always succeeds, otherwise the bus scan * will not detach dead devices. */ ccb->cpi.version_num = 1; ccb->cpi.hba_inquiry = 0; ccb->cpi.target_sprt = 0; ccb->cpi.hba_misc = PIM_SEQSCAN; ccb->cpi.hba_eng_cnt = 0; bzero(ccb->cpi.vuhba_flags, sizeof(ccb->cpi.vuhba_flags)); ccb->cpi.max_target = SILI_MAX_PMPORTS - 1; ccb->cpi.max_lun = 0; ccb->cpi.async_flags = 0; ccb->cpi.hpath_id = 0; ccb->cpi.initiator_id = SILI_MAX_PMPORTS - 1; ccb->cpi.unit_number = cam_sim_unit(sim); ccb->cpi.bus_id = cam_sim_bus(sim); ccb->cpi.base_transfer_speed = 150000; ccb->cpi.transport = XPORT_SATA; ccb->cpi.transport_version = 1; ccb->cpi.protocol = PROTO_SCSI; ccb->cpi.protocol_version = SCSI_REV_2; ccbh->status = CAM_REQ_CMP; if (ccbh->target_id == CAM_TARGET_WILDCARD) { sili_os_lock_port(ap); sili_port_state_machine(ap, 0); sili_os_unlock_port(ap); } else { switch(sili_pread(ap, SILI_PREG_SSTS) & SILI_PREG_SSTS_SPD) { case SILI_PREG_SSTS_SPD_GEN1: ccb->cpi.base_transfer_speed = 150000; break; case SILI_PREG_SSTS_SPD_GEN2: ccb->cpi.base_transfer_speed = 300000; break; default: /* unknown */ ccb->cpi.base_transfer_speed = 1000; break; } #if 0 if (ap->ap_type == ATA_PORT_T_NONE) ccbh->status = CAM_DEV_NOT_THERE; #endif } xpt_done(ccb); break; case XPT_RESET_DEV: sili_os_lock_port(ap); if (ap->ap_type == ATA_PORT_T_NONE) { ccbh->status = CAM_DEV_NOT_THERE; } else { sili_port_reset(ap, atx, 0); ccbh->status = CAM_REQ_CMP; } sili_os_unlock_port(ap); xpt_done(ccb); break; case XPT_RESET_BUS: sili_os_lock_port(ap); sili_port_reset(ap, NULL, 1); sili_os_unlock_port(ap); ccbh->status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: ccbh->status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SATA; ccb->cts.transport_version = XPORT_VERSION_UNSPECIFIED; ccb->cts.proto_specific.valid = 0; ccb->cts.xport_specific.valid = 0; ccbh->status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_SCSI_IO: /* * Our parallel startup code might have only probed through * to the IDENT, so do the last step if necessary. */ if (at->at_probe == ATA_PROBE_NEED_IDENT) sili_cam_probe(ap, atx); if (at->at_probe != ATA_PROBE_GOOD) { ccbh->status = CAM_DEV_NOT_THERE; xpt_done(ccb); break; } switch(at->at_type) { case ATA_PORT_T_DISK: sili_xpt_scsi_disk_io(ap, atx, ccb); break; case ATA_PORT_T_ATAPI: sili_xpt_scsi_atapi_io(ap, atx, ccb); break; default: ccbh->status = CAM_REQ_INVALID; xpt_done(ccb); break; } break; default: ccbh->status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
/** * mrsas_startio: SCSI IO entry point * input: Adapter instance soft state * pointer to CAM Control Block * * This function is the SCSI IO entry point and it initiates IO processing. * It copies the IO and depending if the IO is read/write or inquiry, it would * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns * 0 if the command is sent to firmware successfully, otherwise it returns 1. */ static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, union ccb *ccb) { struct mrsas_mpt_cmd *cmd; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE){ ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return(0); } ccb_h->status |= CAM_SIM_QUEUED; cmd = mrsas_get_mpt_cmd(sc); if (!cmd) { ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); return(0); } if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) cmd->flags |= MRSAS_DIR_IN; if(ccb_h->flags & CAM_DIR_OUT) cmd->flags |= MRSAS_DIR_OUT; } else cmd->flags = MRSAS_DIR_NONE; /* no data */ /* For FreeBSD 10.0 and higher */ #if 0 /* XXX (__FreeBSD_version >= 1000000) */ /* * * XXX We don't yet support physical addresses here. */ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: kprintf("%s: physical addresses not supported\n", __func__); mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; ccb_h->status &= ~CAM_SIM_QUEUED; goto done; case CAM_DATA_SG: kprintf("%s: scatter gather is not supported\n", __func__); mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; goto done; case CAM_DATA_VADDR: if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_TOO_BIG; goto done; } cmd->length = csio->dxfer_len; if (cmd->length) cmd->data = csio->data_ptr; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; goto done; } #else if (!(ccb_h->flags & CAM_DATA_PHYS)) { //Virtual data address if (!(ccb_h->flags & CAM_SCATTER_VALID)) { if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_TOO_BIG; goto done; } cmd->length = csio->dxfer_len; if (cmd->length) cmd->data = csio->data_ptr; } else { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; goto done; } } else { //Data addresses are physical. mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; ccb_h->status &= ~CAM_SIM_QUEUED; goto done; } #endif /* save ccb ptr */ cmd->ccb_ptr = ccb; req_desc = mrsas_get_request_desc(sc, (cmd->index)-1); if (!req_desc) { device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n"); return (FAIL); } memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); cmd->request_desc = req_desc; if (ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len); lockmgr(&sc->raidmap_lock, LK_EXCLUSIVE); if (mrsas_ldio_inq(sim, ccb)) { if (mrsas_build_ldio(sc, cmd, ccb)){ device_printf(sc->mrsas_dev, "Build LDIO failed.\n"); lockmgr(&sc->raidmap_lock, LK_RELEASE); return(1); } } else { if (mrsas_build_dcdb(sc, cmd, ccb, sim)) { device_printf(sc->mrsas_dev, "Build DCDB failed.\n"); lockmgr(&sc->raidmap_lock, LK_RELEASE); return(1); } } lockmgr(&sc->raidmap_lock, LK_RELEASE); if (cmd->flags == MRSAS_DIR_IN) //from device cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ; else if (cmd->flags == MRSAS_DIR_OUT) //to device cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/4; cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr; cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE; req_desc = cmd->request_desc; req_desc->SCSIIO.SMID = cmd->index; /* * Start timer for IO timeout. Default timeout value is 90 second. */ callout_reset(&cmd->cm_callout, (sc->mrsas_io_timeout * hz) / 1000, mrsas_scsiio_timeout, cmd); atomic_inc(&sc->fw_outstanding); if(atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater) sc->io_cmds_highwater++; mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); return(0); done: xpt_done(ccb); return(0); }
void isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) { struct ISCI_CONTROLLER *isci_controller; struct ISCI_REMOTE_DEVICE *isci_remote_device; union ccb *ccb; BOOL complete_ccb; complete_ccb = TRUE; isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); isci_remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); ccb = isci_request->ccb; ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (completion_status) { case SCI_IO_SUCCESS: case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: #if __FreeBSD_version >= 900026 if (ccb->ccb_h.func_code == XPT_SMP_IO) { void *smp_response = scif_io_request_get_response_iu_address( isci_request->sci_object); memcpy(ccb->smpio.smp_response, smp_response, ccb->smpio.smp_response_len); } #endif ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCI_IO_SUCCESS_IO_DONE_EARLY: ccb->ccb_h.status |= CAM_REQ_CMP; ccb->csio.resid = ccb->csio.dxfer_len - scif_io_request_get_number_of_bytes_transferred( isci_request->sci_object); break; case SCI_IO_FAILURE_RESPONSE_VALID: { SCI_SSP_RESPONSE_IU_T * response_buffer; uint32_t sense_length; int error_code, sense_key, asc, ascq; struct ccb_scsiio *csio = &ccb->csio; response_buffer = (SCI_SSP_RESPONSE_IU_T *) scif_io_request_get_response_iu_address( isci_request->sci_object); sense_length = sci_ssp_get_sense_data_length( response_buffer->sense_data_length); sense_length = MIN(csio->sense_len, sense_length); memcpy(&csio->sense_data, response_buffer->data, sense_length); csio->sense_resid = csio->sense_len - sense_length; csio->scsi_status = response_buffer->status; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, &asc, &ascq ); isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->scsi_status, sense_key, asc, ascq); break; } case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: isci_remote_device_reset(isci_remote_device, NULL); ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(0, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); break; case SCI_IO_FAILURE_TERMINATED: ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(0, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); break; case SCI_IO_FAILURE_INVALID_STATE: case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: complete_ccb = FALSE; break; case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: ccb->ccb_h.status |= CAM_DEV_NOT_THERE; break; case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: { struct ccb_relsim ccb_relsim; struct cam_path *path; xpt_create_path(&path, NULL, cam_sim_path(isci_controller->sim), isci_remote_device->index, 0); xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; ccb_relsim.openings = scif_remote_device_get_max_queue_depth(remote_device); xpt_action((union ccb *)&ccb_relsim); xpt_free_path(path); complete_ccb = FALSE; } break; case SCI_IO_FAILURE: case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_IO_FAILURE_PROTOCOL_VIOLATION: case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: default: isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], completion_status); ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } callout_stop(&isci_request->parent.timer); bus_dmamap_sync(isci_request->parent.dma_tag, isci_request->parent.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(isci_request->parent.dma_tag, isci_request->parent.dma_map); isci_request->ccb = NULL; sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_request); if (complete_ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* ccb will be completed with some type of non-success * status. So temporarily freeze the queue until the * upper layers can act on the status. The * CAM_DEV_QFRZN flag will then release the queue * after the status is acted upon. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); TAILQ_REMOVE(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; /* * This CCB that was in the queue was completed, so * set the in_progress pointer to NULL denoting that * we can retry another CCB from the queue. We only * allow one CCB at a time from the queue to be * in progress so that we can effectively maintain * ordering. */ isci_remote_device->queued_ccb_in_progress = NULL; } if (isci_remote_device->frozen_lun_mask != 0) { isci_remote_device_release_device_queue(isci_remote_device); } xpt_done(ccb); if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } } else { isci_remote_device_freeze_lun_queue(isci_remote_device, ccb->ccb_h.target_lun); if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); /* * Do nothing, CCB is already on the device's queue. * We leave it on the queue, to be retried again * next time a CCB on this device completes, or we * get a ready notification for this device. */ isci_log_message(1, "ISCI", "already queued %p %x\n", ccb, ccb->csio.cdb_io.cdb_bytes[0]); isci_remote_device->queued_ccb_in_progress = NULL; } else { isci_log_message(1, "ISCI", "queue %p %x\n", ccb, ccb->csio.cdb_io.cdb_bytes[0]); ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); } } }
static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb) { struct tws_command_packet *cmd_pkt; struct tws_request *req; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); int error; u_int16_t lun; mtx_assert(&sc->sim_lock, MA_OWNED); if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) { TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; xpt_done(ccb); return(0); } if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) { TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; xpt_done(ccb); return(0); } if(ccb_h->flags & CAM_CDB_PHYS) { TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); return(0); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO); if ( !req ) { TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); return(0); } if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) req->flags |= TWS_DIR_IN; if(ccb_h->flags & CAM_DIR_OUT) req->flags |= TWS_DIR_OUT; } else { req->flags = TWS_DIR_NONE; /* no data */ } req->type = TWS_REQ_TYPE_SCSI_IO; req->cb = tws_scsi_complete; cmd_pkt = req->cmd_pkt; /* cmd_pkt->hdr.header_desc.size_header = 128; */ cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id; cmd_pkt->cmd.pkt_a.status = 0; cmd_pkt->cmd.pkt_a.sgl_offset = 16; /* lower nibble */ lun = ccb_h->target_lun & 0XF; lun = lun << 12; cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id; /* upper nibble */ lun = ccb_h->target_lun & 0XF0; lun = lun << 8; cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun; #ifdef TWS_DEBUG if ( csio->cdb_len > 16 ) TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len); #endif if(ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); req->data = ccb; req->flags |= TWS_DATA_CCB; /* save ccb ptr */ req->ccb_ptr = ccb; /* * tws_map_load_data_callback will fill in the SGL, * and submit the I/O. */ sc->stats.scsi_ios++; ccb_h->timeout_ch = timeout(tws_timeout, req, (ccb_h->timeout * hz)/1000); error = tws_map_request(sc, req); return(error); }
void isci_io_request_execute_smp_io(union ccb *ccb, struct ISCI_CONTROLLER *controller) { SCI_STATUS status; target_id_t target_id = ccb->ccb_h.target_id; struct ISCI_REQUEST *request; struct ISCI_IO_REQUEST *io_request; SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle; struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id]; /* SMP commands are sent to an end device, because SMP devices are not * exposed to the kernel. It is our responsibility to use this method * to get the SMP device that contains the specified end device. If * the device is direct-attached, the handle will come back NULL, and * we'll just fail the SMP_IO with DEV_NOT_THERE. */ scif_remote_device_get_containing_device(end_device->sci_object, &smp_device_handle); if (smp_device_handle == NULL) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); return; } if (sci_pool_empty(controller->request_pool)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_freeze_simq(controller->sim, 1); controller->is_frozen = TRUE; xpt_done(ccb); return; } ASSERT(device->is_resetting == FALSE); sci_pool_get(controller->request_pool, request); io_request = (struct ISCI_IO_REQUEST *)request; io_request->ccb = ccb; io_request->parent.remote_device_handle = smp_device_handle; status = isci_smp_request_construct(io_request); if (status != SCI_SUCCESS) { isci_io_request_complete(controller->scif_controller_handle, smp_device_handle, io_request, (SCI_IO_STATUS)status); return; } sci_object_set_association(io_request->sci_object, io_request); status = (SCI_STATUS) scif_controller_start_io( controller->scif_controller_handle, smp_device_handle, io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_SUCCESS) { isci_io_request_complete(controller->scif_controller_handle, smp_device_handle, io_request, (SCI_IO_STATUS)status); return; } if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout, isci_io_request_timeout, request); }
/* * Callback routine from "bus_dmamap_load" or in simple case called directly. * * Takes a list of physical segments and builds the SGL for SCSI IO command * and forwards the commard to the IOC after one last check that CAM has not * aborted the transaction. */ static void mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req; union ccb *ccb; mpt_softc_t *mpt; MSG_SCSI_IO_REQUEST *mpt_req; SGE_SIMPLE32 *se; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; mpt_req = req->req_vbuf; if (error == 0 && nseg > MPT_SGL_MAX) { error = EFBIG; } if (error != 0) { if (error != EFBIG) mpt_prt(mpt, "bus_dmamap_load returned %d", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status = CAM_DEV_QFRZN; if (error == EFBIG) ccb->ccb_h.status |= CAM_REQ_TOO_BIG; else ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); MPTLOCK_2_CAMLOCK(mpt); return; } if (nseg > MPT_NSGL_FIRST(mpt)) { int i, nleft = nseg; u_int32_t flags; bus_dmasync_op_t op; SGE_CHAIN32 *ce; mpt_req->DataLength = ccb->csio.dxfer_len; flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) flags |= MPI_SGE_FLAGS_HOST_TO_IOC; se = (SGE_SIMPLE32 *) &mpt_req->SGL; for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) { u_int32_t tf; bzero(se, sizeof (*se)); se->Address = dm_segs->ds_addr; MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (i == MPT_NSGL_FIRST(mpt) - 2) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } MPI_pSGE_SET_FLAGS(se, tf); nleft -= 1; } /* * Tell the IOC where to find the first chain element */ mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2; /* * Until we're finished with all segments... */ while (nleft) { int ntodo; /* * Construct the chain element that point to the * next segment. */ ce = (SGE_CHAIN32 *) se++; if (nleft > MPT_NSGL(mpt)) { ntodo = MPT_NSGL(mpt) - 1; ce->NextChainOffset = (MPT_RQSL(mpt) - sizeof (SGE_SIMPLE32)) >> 2; } else {
static void ata_cam_begin_transaction(device_t dev, union ccb *ccb) { struct ata_channel *ch = device_get_softc(dev); struct ata_request *request; if (!(request = ata_alloc_request())) { device_printf(dev, "FAILURE - out of memory in start\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } bzero(request, sizeof(*request)); /* setup request */ request->dev = NULL; request->parent = dev; request->unit = ccb->ccb_h.target_id; if (ccb->ccb_h.func_code == XPT_ATA_IO) { request->data = ccb->ataio.data_ptr; request->bytecount = ccb->ataio.dxfer_len; request->u.ata.command = ccb->ataio.cmd.command; request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) | (uint16_t)ccb->ataio.cmd.features; request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) | (uint16_t)ccb->ataio.cmd.sector_count; if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { request->flags |= ATA_R_48BIT; request->u.ata.lba = ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) | ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) | ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24); } else { request->u.ata.lba = ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24); } request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) | ((uint64_t)ccb->ataio.cmd.lba_mid << 8) | (uint64_t)ccb->ataio.cmd.lba_low; if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT) request->flags |= ATA_R_NEEDRESULT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ccb->ataio.cmd.flags & CAM_ATAIO_DMA) request->flags |= ATA_R_DMA; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) request->flags |= ATA_R_READ; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) request->flags |= ATA_R_WRITE; if (ccb->ataio.cmd.command == ATA_READ_MUL || ccb->ataio.cmd.command == ATA_READ_MUL48 || ccb->ataio.cmd.command == ATA_WRITE_MUL || ccb->ataio.cmd.command == ATA_WRITE_MUL48) { request->transfersize = min(request->bytecount, ch->curr[ccb->ccb_h.target_id].bytecount); } else request->transfersize = min(request->bytecount, 512); } else { request->data = ccb->csio.data_ptr; request->bytecount = ccb->csio.dxfer_len; bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, request->u.atapi.ccb, ccb->csio.cdb_len); request->flags |= ATA_R_ATAPI; if (ch->curr[ccb->ccb_h.target_id].atapi == 16) request->flags |= ATA_R_ATAPI16; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) request->flags |= ATA_R_DMA; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) request->flags |= ATA_R_READ; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) request->flags |= ATA_R_WRITE; request->transfersize = min(request->bytecount, ch->curr[ccb->ccb_h.target_id].bytecount); } request->retries = 0; request->timeout = (ccb->ccb_h.timeout + 999) / 1000; callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); request->ccb = ccb; request->flags |= ATA_R_DATA_IN_CCB; ch->running = request; ch->state = ATA_ACTIVE; if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { ch->running = NULL; ch->state = ATA_IDLE; ata_cam_end_transaction(dev, request); return; } }
static void ataaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct ata_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ata_channel *)cam_sim_softc(sim); dev = ch->dev; switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ata_check_ids(dev, ccb)) return; if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << ccb->ccb_h.target_id)) == 0) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } if (ch->running) device_printf(dev, "already running!\n"); if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { struct ata_res *res = &ccb->ataio.res; bzero(res, sizeof(*res)); if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { res->lba_high = 0; res->lba_mid = 0; } else { res->lba_high = 0xeb; res->lba_mid = 0x14; } ccb->ccb_h.status = CAM_REQ_CMP; break; } ata_cam_begin_transaction(dev, ccb); return; case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ata_cam_device *d; if (ata_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (ch->flags & ATA_SATA) { if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { d->mode = ATA_SETMODE(ch->dev, ccb->ccb_h.target_id, cts->xport_specific.sata.mode); } else d->mode = cts->xport_specific.sata.mode; } if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; } else { if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { d->mode = ATA_SETMODE(ch->dev, ccb->ccb_h.target_id, cts->xport_specific.ata.mode); } else d->mode = cts->xport_specific.ata.mode; } if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) d->bytecount = cts->xport_specific.ata.bytecount; if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) d->atapi = cts->xport_specific.ata.atapi; if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS) d->caps = cts->xport_specific.ata.caps; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ata_cam_device *d; if (ata_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; if (ch->flags & ATA_SATA) { cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->xport_specific.sata.valid = 0; cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { cts->xport_specific.sata.revision = ATA_GETREV(dev, ccb->ccb_h.target_id); if (cts->xport_specific.sata.revision != 0xff) { cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; } cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; } cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; } else { cts->transport = XPORT_ATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->xport_specific.ata.valid = 0; cts->xport_specific.ata.mode = d->mode; cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; cts->xport_specific.ata.bytecount = d->bytecount; cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { cts->xport_specific.ata.caps = d->caps & CTS_ATA_CAPS_D; if (!(ch->flags & ATA_NO_48BIT_DMA)) cts->xport_specific.ata.caps |= CTS_ATA_CAPS_H_DMA48; cts->xport_specific.ata.caps &= ch->user[ccb->ccb_h.target_id].caps; } else cts->xport_specific.ata.caps = d->caps; cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS; cts->xport_specific.ata.atapi = d->atapi; cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ata_reinit(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; if (ch->flags & ATA_NO_SLAVE) cpi->max_target = 0; else cpi->max_target = 1; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); if (ch->flags & ATA_SATA) cpi->base_transfer_speed = 150000; else cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); if (ch->flags & ATA_SATA) cpi->transport = XPORT_SATA; else cpi->transport = XPORT_ATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; if (device_get_devclass(device_get_parent(parent)) == devclass_find("pci")) { cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); } cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); }
/* * Function name: tw_osli_execute_scsi * Description: Build a fw cmd, based on a CAM style ccb, and * send it down. * * Input: req -- ptr to OSL internal request context * ccb -- ptr to CAM style ccb * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_execute_scsi(struct tw_osli_req_context *req, union ccb *ccb) { struct twa_softc *sc = req->ctlr; struct tw_cl_req_packet *req_pkt; struct tw_cl_scsi_req_packet *scsi_req; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); TW_INT32 error; tw_osli_dbg_dprintf(10, sc, "SCSI I/O request 0x%x", csio->cdb_io.cdb_bytes[0]); if (ccb_h->target_id >= TW_CL_MAX_NUM_UNITS) { tw_osli_dbg_dprintf(3, sc, "Invalid target. PTL = %x %x %x", ccb_h->path_id, ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; xpt_done(ccb); return(1); } if (ccb_h->target_lun >= TW_CL_MAX_NUM_LUNS) { tw_osli_dbg_dprintf(3, sc, "Invalid lun. PTL = %x %x %x", ccb_h->path_id, ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; xpt_done(ccb); return(1); } if(ccb_h->flags & CAM_CDB_PHYS) { tw_osli_printf(sc, "", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2105, "Physical CDB address!"); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); return(1); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) req->flags |= TW_OSLI_REQ_FLAGS_DATA_IN; else req->flags |= TW_OSLI_REQ_FLAGS_DATA_OUT; } /* Build the CL understood request packet for SCSI cmds. */ req_pkt = &req->req_pkt; req_pkt->status = 0; req_pkt->tw_osl_callback = tw_osl_complete_io; scsi_req = &(req_pkt->gen_req_pkt.scsi_req); scsi_req->unit = ccb_h->target_id; scsi_req->lun = ccb_h->target_lun; scsi_req->sense_len = 0; scsi_req->sense_data = (TW_UINT8 *)(&csio->sense_data); scsi_req->scsi_status = 0; if(ccb_h->flags & CAM_CDB_POINTER) scsi_req->cdb = csio->cdb_io.cdb_ptr; else scsi_req->cdb = csio->cdb_io.cdb_bytes; scsi_req->cdb_len = csio->cdb_len; if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data addresses. Need to convert them... */ tw_osli_dbg_dprintf(3, sc, "XPT_SCSI_IO: Single virtual address!"); if (!(ccb_h->flags & CAM_SCATTER_VALID)) { if (csio->dxfer_len > TW_CL_MAX_IO_SIZE) { tw_osli_printf(sc, "size = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2106, "I/O size too big", csio->dxfer_len); ccb_h->status = CAM_REQ_TOO_BIG; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return(1); } if ((req->length = csio->dxfer_len)) { req->data = csio->data_ptr; scsi_req->sgl_entries = 1; } } else { tw_osli_printf(sc, "", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2107, "XPT_SCSI_IO: Got SGList"); ccb_h->status = CAM_REQ_INVALID; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return(1); } } else { /* Data addresses are physical. */ tw_osli_printf(sc, "", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2108, "XPT_SCSI_IO: Physical data addresses"); ccb_h->status = CAM_REQ_INVALID; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return(1); } req->deadline = tw_osl_get_local_time() + (ccb_h->timeout / 1000); /* * twa_map_load_data_callback will fill in the SGL, * and submit the I/O. */ error = tw_osli_map_request(req); if ((error) && (req->flags & TW_OSLI_REQ_FLAGS_FAILED)) { req->deadline = 0; ccb_h->status = CAM_REQ_CMP_ERR; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } return(error); }
static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; if (vd->capacity>0xfffffffful) cap = 0xfffffffful; else cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { int idx; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) pCmd->flags.physical_sg = 1; for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; pCmd->psg[idx].size = sgList[idx].ds_len; pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } callout_reset(&ccb->ccb_h.timeout_ch, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } else { int error; pCmd->flags.physical_sg = 1; error = bus_dmamap_load(vbus_ext->io_dmat, ext->dma_map, ccb->csio.data_ptr, ccb->csio.dxfer_len, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; }
/* * Function name: twa_action * Description: Driver entry point for CAM's use. * * Input: sim -- sim corresponding to the ctlr * ccb -- ptr to CAM request * Output: None * Return value: None */ TW_VOID twa_action(struct cam_sim *sim, union ccb *ccb) { struct twa_softc *sc = (struct twa_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &(ccb->ccb_h); switch (ccb_h->func_code) { case XPT_SCSI_IO: /* SCSI I/O */ { struct tw_osli_req_context *req; req = tw_osli_get_request(sc); if (req == NULL) { tw_osli_dbg_dprintf(2, sc, "Cannot get request pkt."); /* * Freeze the simq to maintain ccb ordering. The next * ccb that gets completed will unfreeze the simq. */ ccb_h->status &= ~CAM_SIM_QUEUED; ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); break; } if ((tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { ccb_h->status &= ~CAM_SIM_QUEUED; ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); break; } req->req_handle.osl_req_ctxt = req; req->req_handle.is_io = TW_CL_TRUE; req->orig_req = ccb; if (tw_osli_execute_scsi(req, ccb)) tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); break; } case XPT_ABORT: tw_osli_dbg_dprintf(2, sc, "Abort request."); ccb_h->status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_RESET_BUS: tw_cl_create_event(&(sc->ctlr_handle), TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2108, 0x3, TW_CL_SEVERITY_INFO_STRING, "Received Reset Bus request from CAM", " "); tw_cl_set_reset_needed(&(sc->ctlr_handle)); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: tw_osli_dbg_dprintf(3, sc, "XPT_SET_TRAN_SETTINGS"); /* * This command is not supported, since it's very specific * to SCSI, and we are doing ATA. */ ccb_h->status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; tw_osli_dbg_dprintf(3, sc, "XPT_GET_TRAN_SETTINGS"); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: tw_osli_dbg_dprintf(3, sc, "XPT_CALC_GEOMETRY"); cam_calc_geometry(&ccb->ccg, 1/* extended */); xpt_done(ccb); break; case XPT_PATH_INQ: /* Path inquiry -- get twa properties */ { struct ccb_pathinq *path_inq = &ccb->cpi; tw_osli_dbg_dprintf(3, sc, "XPT_PATH_INQ request"); path_inq->version_num = 1; path_inq->hba_inquiry = 0; path_inq->target_sprt = 0; path_inq->hba_misc = 0; path_inq->hba_eng_cnt = 0; path_inq->max_target = TW_CL_MAX_NUM_UNITS; path_inq->max_lun = TW_CL_MAX_NUM_LUNS - 1; path_inq->unit_number = cam_sim_unit(sim); path_inq->bus_id = cam_sim_bus(sim); path_inq->initiator_id = TW_CL_MAX_NUM_UNITS; path_inq->base_transfer_speed = 100000; strncpy(path_inq->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(path_inq->hba_vid, "3ware", HBA_IDLEN); strncpy(path_inq->dev_name, cam_sim_name(sim), DEV_IDLEN); path_inq->transport = XPORT_SPI; path_inq->transport_version = 2; path_inq->protocol = PROTO_SCSI; path_inq->protocol_version = SCSI_REV_2; #if 0 /* XXX swildner */ path_inq->maxio = TW_CL_MAX_IO_SIZE; #endif ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; } default: tw_osli_dbg_dprintf(3, sc, "func_code = %x", ccb_h->func_code); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
static void nvme_sim_action(struct cam_sim *sim, union ccb *ccb) { struct nvme_controller *ctrlr; struct nvme_namespace *ns; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_sim_action: func= %#x\n", ccb->ccb_h.func_code)); /* * XXX when we support multiple namespaces in the base driver we'll need * to revisit how all this gets stored and saved in the periph driver's * reserved areas. Right now we store all three in the softc of the sim. */ ns = sim2ns(sim); ctrlr = sim2ctrlr(sim); mtx_assert(&ctrlr->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_CALC_GEOMETRY: /* Calculate Geometry Totally nuts ? XXX */ /* * Only meaningful for old-school SCSI disks since only the SCSI * da driver generates them. Reject all these that slip through. */ /*FALLTHROUGH*/ case XPT_ABORT: /* Abort the specified CCB */ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ /* * Only target mode generates these, and only for SCSI. They are * all invalid/unsupported for NVMe. */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: /* * NVMe doesn't really have different transfer settings, but * other parts of CAM think failure here is a big deal. */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; /* * NVMe may have multiple LUNs on the same path. Current generation * of NVMe devives support only a single name space. Multiple name * space drives are coming, but it's unclear how we should report * them up the stack. */ cpi->version_num = 1; cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = PIM_UNMAPPED /* | PIM_NOSCAN */; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = ctrlr->cdata.nn; cpi->maxio = nvme_ns_get_max_io_xfer_size(ns); cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 4000000; /* 4 GB/s 4 lanes pcie 3 */ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "NVMe", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_NVME; /* XXX XPORT_PCIE ? */ cpi->transport_version = 1; /* XXX Get PCIe spec ? */ cpi->protocol = PROTO_NVME; cpi->protocol_version = NVME_REV_1; /* Groks all 1.x NVMe cards */ cpi->xport_specific.nvme.nsid = ns->id; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get transport settings */ { struct ccb_trans_settings *cts; struct ccb_trans_settings_nvme *nvmep; struct ccb_trans_settings_nvme *nvmex; cts = &ccb->cts; nvmex = &cts->xport_specific.nvme; nvmep = &cts->proto_specific.nvme; nvmex->valid = CTS_NVME_VALID_SPEC; nvmex->spec_major = 1; /* XXX read from card */ nvmex->spec_minor = 2; nvmex->spec_tiny = 0; nvmep->valid = CTS_NVME_VALID_SPEC; nvmep->spec_major = 1; /* XXX read from card */ nvmep->spec_minor = 2; nvmep->spec_tiny = 0; cts->transport = XPORT_NVME; cts->protocol = PROTO_NVME; cts->ccb_h.status = CAM_REQ_CMP; break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* * every driver handles this, but nothing generates it. Assume * it's OK to just say 'that worked'. */ /*FALLTHROUGH*/ case XPT_RESET_DEV: /* Bus Device Reset the specified device */ case XPT_RESET_BUS: /* Reset the specified bus */ /* * NVMe doesn't really support physically resetting the bus. It's part * of the bus scanning dance, so return sucess to tell the process to * proceed. */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_NVME_IO: /* Execute the requested I/O operation */ nvme_sim_nvmeio(sim, ccb); return; /* no done */ default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); }
static void wds_scsi_io(struct cam_sim * sim, struct ccb_scsiio * csio) { int unit = cam_sim_unit(sim); struct wds *wp; struct ccb_hdr *ccb_h; struct wds_req *r; int base; u_int8_t c; int error; int n; wp = (struct wds *)cam_sim_softc(sim); ccb_h = &csio->ccb_h; DBG(DBX "wds%d: cmd TARG=%d LUN=%d\n", unit, ccb_h->target_id, ccb_h->target_lun); if (ccb_h->target_id > 7 || ccb_h->target_id == WDS_HBA_ID) { ccb_h->status = CAM_TID_INVALID; xpt_done((union ccb *) csio); return; } if (ccb_h->target_lun > 7) { ccb_h->status = CAM_LUN_INVALID; xpt_done((union ccb *) csio); return; } if (csio->dxfer_len > BUFSIZ) { ccb_h->status = CAM_REQ_TOO_BIG; xpt_done((union ccb *) csio); return; } if (ccb_h->flags & (CAM_CDB_PHYS | CAM_SCATTER_VALID | CAM_DATA_PHYS)) { /* don't support these */ ccb_h->status = CAM_REQ_INVALID; xpt_done((union ccb *) csio); return; } base = wp->addr; /* * this check is mostly for debugging purposes, * "can't happen" normally. */ if(wp->want_wdsr) { DBG(DBX "wds%d: someone already waits for buffer\n", unit); smallog('b'); n = xpt_freeze_simq(sim, /* count */ 1); smallog('0'+n); ccb_h->status = CAM_REQUEUE_REQ; xpt_done((union ccb *) csio); return; } r = wdsr_alloc(wp); if (r == NULL) { device_printf(wp->dev, "no request slot available!\n"); wp->want_wdsr = 1; n = xpt_freeze_simq(sim, /* count */ 1); smallog2('f', '0'+n); ccb_h->status = CAM_REQUEUE_REQ; xpt_done((union ccb *) csio); return; } ccb_h->ccb_wdsr = (void *) r; r->ccb = (union ccb *) csio; switch (error = frag_alloc(wp, csio->dxfer_len, &r->buf, &r->mask)) { case CAM_REQ_CMP: break; case CAM_REQUEUE_REQ: DBG(DBX "wds%d: no data buffer available\n", unit); wp->want_wdsr = 1; n = xpt_freeze_simq(sim, /* count */ 1); smallog2('f', '0'+n); wdsr_ccb_done(wp, r, r->ccb, CAM_REQUEUE_REQ); return; default: DBG(DBX "wds%d: request is too big\n", unit); wdsr_ccb_done(wp, r, r->ccb, error); break; } ccb_h->status |= CAM_SIM_QUEUED; r->flags &= ~WR_DONE; scsi_ulto3b(WDSTOPHYS(wp, &r->cmd), wp->dx->ombs[r->ombn].addr); bzero(&r->cmd, sizeof r->cmd); r->cmd.cmd = WDSX_SCSICMD; r->cmd.targ = (ccb_h->target_id << 5) | ccb_h->target_lun; if (ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &r->cmd.scb, csio->cdb_len < 12 ? csio->cdb_len : 12); else bcopy(csio->cdb_io.cdb_bytes, &r->cmd.scb, csio->cdb_len < 12 ? csio->cdb_len : 12); scsi_ulto3b(csio->dxfer_len, r->cmd.len); if (csio->dxfer_len > 0 && (ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { /* we already rejected physical or scattered addresses */ bcopy(csio->data_ptr, r->buf, csio->dxfer_len); } scsi_ulto3b(csio->dxfer_len ? WDSTOPHYS(wp, r->buf) : 0, r->cmd.data); if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) r->cmd.write = 0x80; else r->cmd.write = 0x00; scsi_ulto3b(0, r->cmd.next); outb(base + WDS_HCR, WDSH_IRQEN | WDSH_DRQEN); c = WDSC_MSTART(r->ombn); if (wds_cmd(base, &c, sizeof c) != 0) { device_printf(wp->dev, "unable to start outgoing mbox\n"); wp->dx->ombs[r->ombn].stat = 0; wdsr_ccb_done(wp, r, r->ccb, CAM_RESRC_UNAVAIL); return; } DBG(DBX "wds%d: enqueued cmd 0x%x, r=%p\n", unit, r->cmd.scb[0] & 0xFF, r); smallog3('+', ccb_h->target_id + '0', ccb_h->target_lun + '0'); }