/* * Get the scsi driver to send a full inquiry to the * device and use the results to fill out the global * parameter structure. * * called from: * attach * open * ioctl (to reset original blksize) */ static int st_scsibus_mode_sense(struct st_softc *st, int flags) { u_int scsipi_sense_len; int error; struct scsipi_sense { struct scsi_mode_parameter_header_6 header; struct scsi_general_block_descriptor blk_desc; u_char sense_data[MAX_PAGE_0_SIZE]; } scsipi_sense; struct scsipi_periph *periph = st->sc_periph; scsipi_sense_len = sizeof(scsipi_sense.header) + sizeof(scsipi_sense.blk_desc) + st->page_0_size; /* * Set up a mode sense * We don't need the results. Just print them for our interest's sake, * if asked, or if we need it as a template for the mode select store * it away. */ error = scsipi_mode_sense(st->sc_periph, 0, SMS_PCTRL_CURRENT, &scsipi_sense.header, scsipi_sense_len, flags, ST_RETRIES, ST_CTL_TIME); if (error) return error; st->numblks = _3btol(scsipi_sense.blk_desc.nblocks); st->media_blksize = _3btol(scsipi_sense.blk_desc.blklen); st->media_density = scsipi_sense.blk_desc.density; if (scsipi_sense.header.dev_spec & SMH_DSP_WRITE_PROT) st->flags |= ST_READONLY; else st->flags &= ~ST_READONLY; SC_DEBUG(periph, SCSIPI_DB3, ("density code %d, %d-byte blocks, write-%s, ", st->media_density, st->media_blksize, st->flags & ST_READONLY ? "protected" : "enabled")); SC_DEBUG(periph, SCSIPI_DB3, ("%sbuffered\n", scsipi_sense.header.dev_spec & SMH_DSP_BUFF_MODE ? "" : "un")); if (st->page_0_size) memcpy(st->sense_data, scsipi_sense.sense_data, st->page_0_size); periph->periph_flags |= PERIPH_MEDIA_LOADED; return 0; }
/* * Ask the drive what it's min and max blk sizes are. */ static int st_scsibus_read_block_limits(struct st_softc *st, int flags) { struct scsi_block_limits cmd; struct scsi_block_limits_data block_limits; struct scsipi_periph *periph = st->sc_periph; int error; /* * do a 'Read Block Limits' */ memset(&cmd, 0, sizeof(cmd)); cmd.opcode = READ_BLOCK_LIMITS; /* * do the command, update the global values */ error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), (void *)&block_limits, sizeof(block_limits), ST_RETRIES, ST_CTL_TIME, NULL, flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK); if (error) return (error); st->blkmin = _2btol(block_limits.min_length); st->blkmax = _3btol(block_limits.max_length); SC_DEBUG(periph, SCSIPI_DB3, ("(%d <= blksize <= %d)\n", st->blkmin, st->blkmax)); return (0); }
void vdsk_scsi_cmd(struct scsi_xfer *xs) { struct scsi_rw *rw; struct scsi_rw_big *rwb; struct scsi_rw_12 *rw12; struct scsi_rw_16 *rw16; u_int64_t lba; u_int32_t sector_count; uint8_t operation; switch (xs->cmd->opcode) { case READ_BIG: case READ_COMMAND: case READ_12: case READ_16: operation = VD_OP_BREAD; break; case WRITE_BIG: case WRITE_COMMAND: case WRITE_12: case WRITE_16: operation = VD_OP_BWRITE; break; case SYNCHRONIZE_CACHE: operation = VD_OP_FLUSH; break; case INQUIRY: vdsk_scsi_inq(xs); return; case READ_CAPACITY: vdsk_scsi_capacity(xs); return; case READ_CAPACITY_16: vdsk_scsi_capacity16(xs); return; case TEST_UNIT_READY: case START_STOP: case PREVENT_ALLOW: vdsk_scsi_done(xs, XS_NOERROR); return; default: printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode); case MODE_SENSE: case MODE_SENSE_BIG: case REPORT_LUNS: case READ_TOC: vdsk_scsi_done(xs, XS_DRIVER_STUFFUP); return; } /* * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same * layout as 10-byte READ/WRITE commands. */ if (xs->cmdlen == 6) { rw = (struct scsi_rw *)xs->cmd; lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); sector_count = rw->length ? rw->length : 0x100; } else if (xs->cmdlen == 10) { rwb = (struct scsi_rw_big *)xs->cmd; lba = _4btol(rwb->addr); sector_count = _2btol(rwb->length); } else if (xs->cmdlen == 12) { rw12 = (struct scsi_rw_12 *)xs->cmd; lba = _4btol(rw12->addr); sector_count = _4btol(rw12->length); } else if (xs->cmdlen == 16) { rw16 = (struct scsi_rw_16 *)xs->cmd; lba = _8btol(rw16->addr); sector_count = _4btol(rw16->length); } { struct vdsk_softc *sc = xs->sc_link->adapter_softc; struct ldc_map *map = sc->sc_lm; struct vio_dring_msg dm; vaddr_t va; paddr_t pa; psize_t nbytes; int len, ncookies; int desc, s; int timeout; s = splbio(); desc = sc->sc_tx_prod; ncookies = 0; len = xs->datalen; va = (vaddr_t)xs->data; while (len > 0) { KASSERT(ncookies < MAXPHYS / PAGE_SIZE); pmap_extract(pmap_kernel(), va, &pa); while (map->lm_slot[map->lm_next].entry != 0) { map->lm_next++; map->lm_next &= (map->lm_nentries - 1); } map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW; map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW; map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W; map->lm_count++; nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK)); sc->sc_vd->vd_desc[desc].cookie[ncookies].addr = map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes; sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next; va += nbytes; len -= nbytes; ncookies++; } sc->sc_vd->vd_desc[desc].hdr.ack = 1; sc->sc_vd->vd_desc[desc].operation = operation; sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE; sc->sc_vd->vd_desc[desc].status = 0xffffffff; sc->sc_vd->vd_desc[desc].offset = lba; sc->sc_vd->vd_desc[desc].size = xs->datalen; sc->sc_vd->vd_desc[desc].ncookies = ncookies; membar(Sync); sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY; sc->sc_vsd[desc].vsd_xs = xs; sc->sc_vsd[desc].vsd_ncookies = ncookies; sc->sc_tx_prod++; sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); bzero(&dm, sizeof(dm)); dm.tag.type = VIO_TYPE_DATA; dm.tag.stype = VIO_SUBTYPE_INFO; dm.tag.stype_env = VIO_DRING_DATA; dm.tag.sid = sc->sc_local_sid; dm.seq_no = sc->sc_seq_no++; dm.dring_ident = sc->sc_dring_ident; dm.start_idx = dm.end_idx = desc; vdsk_sendmsg(sc, &dm, sizeof(dm)); if (!ISSET(xs->flags, SCSI_POLL)) { splx(s); return; } timeout = 1000; do { if (vdsk_rx_intr(sc) && sc->sc_vd->vd_desc[desc].status == VIO_DESC_FREE) break; delay(1000); } while(--timeout > 0); splx(s); } }
/* * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure * cannot be completed. */ int sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags) { union scsi_mode_sense_buf *buf = NULL; struct page_rigid_geometry *rigid = NULL; struct page_flex_geometry *flex = NULL; struct page_reduced_geometry *reduced = NULL; u_char *page0 = NULL; u_int32_t heads = 0, sectors = 0, cyls = 0, secsize = 0; int err = 0, big; if (sd_size(sc, flags) != 0) return (SDGP_RESULT_OFFLINE); if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) { /* we dont know the unmap limits, so we cant use thin shizz */ CLR(sc->flags, SDF_THIN); } buf = dma_alloc(sizeof(*buf), PR_NOWAIT); if (buf == NULL) goto validate; /* * Ask for page 0 (vendor specific) mode sense data to find * READONLY info. The only thing USB devices will ask for. */ err = scsi_do_mode_sense(sc->sc_link, 0, buf, (void **)&page0, NULL, NULL, NULL, 1, flags | SCSI_SILENT, &big); if (err == 0) { if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT) SET(sc->sc_link->flags, SDEV_READONLY); else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT) SET(sc->sc_link->flags, SDEV_READONLY); else CLR(sc->sc_link->flags, SDEV_READONLY); } /* * Many UMASS devices choke when asked about their geometry. Most * don't have a meaningful geometry anyway, so just fake it if * scsi_size() worked. */ if ((sc->sc_link->flags & SDEV_UMASS) && (dp->disksize > 0)) goto validate; switch (sc->sc_link->inqdata.device & SID_TYPE) { case T_OPTICAL: /* No more information needed or available. */ break; case T_RDIRECT: /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */ err = scsi_do_mode_sense(sc->sc_link, PAGE_REDUCED_GEOMETRY, buf, (void **)&reduced, NULL, NULL, &secsize, sizeof(*reduced), flags | SCSI_SILENT, NULL); if (!err && reduced && DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) { if (dp->disksize == 0) dp->disksize = _5btol(reduced->sectors); if (secsize == 0) secsize = _2btol(reduced->bytes_s); } break; default: /* * NOTE: Some devices leave off the last four bytes of * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages. * The only information in those four bytes is RPM information * so accept the page. The extra bytes will be zero and RPM will * end up with the default value of 3600. */ if (((sc->sc_link->flags & SDEV_ATAPI) == 0) || ((sc->sc_link->flags & SDEV_REMOVABLE) == 0)) err = scsi_do_mode_sense(sc->sc_link, PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL, NULL, &secsize, sizeof(*rigid) - 4, flags | SCSI_SILENT, NULL); if (!err && rigid && DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) { heads = rigid->nheads; cyls = _3btol(rigid->ncyl); if (heads * cyls > 0) sectors = dp->disksize / (heads * cyls); } else { err = scsi_do_mode_sense(sc->sc_link, PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL, &secsize, sizeof(*flex) - 4, flags | SCSI_SILENT, NULL); if (!err && flex && DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) { sectors = flex->ph_sec_tr; heads = flex->nheads; cyls = _2btol(flex->ncyl); if (secsize == 0) secsize = _2btol(flex->bytes_s); if (dp->disksize == 0) dp->disksize = heads * cyls * sectors; } } break; } validate: if (buf) dma_free(buf, sizeof(*buf)); if (dp->disksize == 0) return (SDGP_RESULT_OFFLINE); if (dp->secsize == 0) dp->secsize = (secsize == 0) ? 512 : secsize; /* * Restrict secsize values to powers of two between 512 and 64k. */ switch (dp->secsize) { case 0x200: /* == 512, == DEV_BSIZE on all architectures. */ case 0x400: case 0x800: case 0x1000: case 0x2000: case 0x4000: case 0x8000: case 0x10000: break; default: SC_DEBUG(sc->sc_link, SDEV_DB1, ("sd_get_parms: bad secsize: %#x\n", dp->secsize)); return (SDGP_RESULT_OFFLINE); } /* * XXX THINK ABOUT THIS!! Using values such that sectors * heads * * cyls is <= disk_size can lead to wasted space. We need a more * careful calculation/validation to make everything work out * optimally. */ if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) { dp->heads = 511; dp->sectors = 255; cyls = 0; } else { /* * Use standard geometry values for anything we still don't * know. */ dp->heads = (heads == 0) ? 255 : heads; dp->sectors = (sectors == 0) ? 63 : sectors; } dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) : cyls; if (dp->cyls == 0) { dp->heads = dp->cyls = 1; dp->sectors = dp->disksize; } return (SDGP_RESULT_OK); }
/* * This is pretty much a CD target for now */ static void scsitest_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg) { struct scsipi_xfer *xs = arg; struct scsipi_generic *cmd = xs->cmd; #ifdef USE_TOSI_ISO int error; #endif if (req != ADAPTER_REQ_RUN_XFER) return; //show_scsipi_xs(xs); switch (cmd->opcode) { case SCSI_TEST_UNIT_READY: if (isofd == -1) sense_notready(xs); break; case INQUIRY: { struct scsipi_inquiry_data *inqbuf = (void *)xs->data; memset(inqbuf, 0, sizeof(*inqbuf)); inqbuf->device = T_CDROM; inqbuf->dev_qual2 = SID_REMOVABLE; strcpy(inqbuf->vendor, "RUMPHOBO"); strcpy(inqbuf->product, "It's a LIE"); strcpy(inqbuf->revision, "0.00"); break; } case READ_CD_CAPACITY: { struct scsipi_read_cd_cap_data *ret = (void *)xs->data; _lto4b(CDBLOCKSIZE, ret->length); _lto4b(mycdsize, ret->addr); break; } case READ_DISCINFO: { struct scsipi_read_discinfo_data *ret = (void *)xs->data; memset(ret, 0, sizeof(*ret)); break; } case READ_TRACKINFO: { struct scsipi_read_trackinfo_data *ret = (void *)xs->data; _lto4b(mycdsize, ret->track_size); break; } case READ_TOC: { struct scsipi_toc_header *ret = (void *)xs->data; memset(ret, 0, sizeof(*ret)); break; } case START_STOP: { struct scsipi_start_stop *param = (void *)cmd; if (param->how & SSS_LOEJ) { #ifdef USE_TOSI_ISO rumpuser_close(isofd, &error); #endif isofd = -1; } break; } case SCSI_SYNCHRONIZE_CACHE_10: { if (isofd == -1) { if ((xs->xs_control & XS_CTL_SILENT) == 0) atomic_inc_uint(&rump_scsitest_err [RUMP_SCSITEST_NOISYSYNC]); sense_notready(xs); } break; } case GET_CONFIGURATION: { memset(xs->data, 0, sizeof(struct scsipi_get_conf_data)); break; } case SCSI_READ_6_COMMAND: { #ifdef USE_TOSI_ISO struct scsi_rw_6 *param = (void *)cmd; printf("reading %d bytes from %d\n", param->length * CDBLOCKSIZE, _3btol(param->addr) * CDBLOCKSIZE); rumpuser_pread(isofd, xs->data, param->length * CDBLOCKSIZE, _3btol(param->addr) * CDBLOCKSIZE, &error); #endif break; } case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL: /* hardcoded for now */ break; default: printf("unhandled opcode 0x%x\n", cmd->opcode); break; } scsipi_done(xs); }
/* * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure * cannot be completed. */ int sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags) { union scsi_mode_sense_buf *buf = NULL; struct page_rigid_geometry *rigid; struct page_flex_geometry *flex; struct page_reduced_geometry *reduced; u_int32_t heads = 0, sectors = 0, cyls = 0, blksize = 0, ssblksize; u_int16_t rpm = 0; dp->disksize = scsi_size(sc->sc_link, flags, &ssblksize); /* * Many UMASS devices choke when asked about their geometry. Most * don't have a meaningful geometry anyway, so just fake it if * scsi_size() worked. */ if ((sc->sc_link->flags & SDEV_UMASS) && (dp->disksize > 0)) goto validate; /* N.B. buf will be NULL at validate. */ buf = malloc(sizeof(*buf), M_TEMP, M_NOWAIT); if (buf == NULL) goto validate; switch (sc->sc_link->inqdata.device & SID_TYPE) { case T_OPTICAL: /* No more information needed or available. */ break; case T_RDIRECT: /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */ scsi_do_mode_sense(sc->sc_link, PAGE_REDUCED_GEOMETRY, buf, (void **)&reduced, NULL, NULL, &blksize, sizeof(*reduced), flags | SCSI_SILENT, NULL); if (DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) { if (dp->disksize == 0) dp->disksize = _5btol(reduced->sectors); if (blksize == 0) blksize = _2btol(reduced->bytes_s); } break; default: /* * NOTE: Some devices leave off the last four bytes of * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages. * The only information in those four bytes is RPM information * so accept the page. The extra bytes will be zero and RPM will * end up with the default value of 3600. */ rigid = NULL; if (((sc->sc_link->flags & SDEV_ATAPI) == 0) || ((sc->sc_link->flags & SDEV_REMOVABLE) == 0)) scsi_do_mode_sense(sc->sc_link, PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL, NULL, &blksize, sizeof(*rigid) - 4, flags | SCSI_SILENT, NULL); if (DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) { heads = rigid->nheads; cyls = _3btol(rigid->ncyl); rpm = _2btol(rigid->rpm); if (heads * cyls > 0) sectors = dp->disksize / (heads * cyls); } else { scsi_do_mode_sense(sc->sc_link, PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL, &blksize, sizeof(*flex) - 4, flags | SCSI_SILENT, NULL); if (DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) { sectors = flex->ph_sec_tr; heads = flex->nheads; cyls = _2btol(flex->ncyl); rpm = _2btol(flex->rpm); if (blksize == 0) blksize = _2btol(flex->bytes_s); if (dp->disksize == 0) dp->disksize = heads * cyls * sectors; } } break; } validate: if (buf) free(buf, M_TEMP); if (dp->disksize == 0) return (SDGP_RESULT_OFFLINE); if (ssblksize > 0) dp->blksize = ssblksize; else dp->blksize = (blksize == 0) ? 512 : blksize; /* * Restrict blksize values to powers of two between 512 and 64k. */ switch (dp->blksize) { case 0x200: /* == 512, == DEV_BSIZE on all architectures. */ case 0x400: case 0x800: case 0x1000: case 0x2000: case 0x4000: case 0x8000: case 0x10000: break; default: SC_DEBUG(sc->sc_link, SDEV_DB1, ("sd_get_parms: bad blksize: %#x\n", dp->blksize)); return (SDGP_RESULT_OFFLINE); } /* * XXX THINK ABOUT THIS!! Using values such that sectors * heads * * cyls is <= disk_size can lead to wasted space. We need a more * careful calculation/validation to make everything work out * optimally. */ if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) { dp->heads = 511; dp->sectors = 255; cyls = 0; } else { /* * Use standard geometry values for anything we still don't * know. */ dp->heads = (heads == 0) ? 255 : heads; dp->sectors = (sectors == 0) ? 63 : sectors; dp->rot_rate = (rpm == 0) ? 3600 : rpm; } dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) : cyls; if (dp->cyls == 0) { dp->heads = dp->cyls = 1; dp->sectors = dp->disksize; } return (SDGP_RESULT_OK); }
/* * Perform a READ ELEMENT STATUS on behalf of the user. This returns * the new (more complete) data format. */ static int ch_usergetelemstatus(struct ch_softc *sc, struct changer_element_status_request *cesr) { struct scsipi_channel *chan = sc->sc_periph->periph_channel; struct scsipi_periph *dtperiph; struct read_element_status_header *st_hdrp, st_hdr; struct read_element_status_page_header *pg_hdrp; struct read_element_status_descriptor *desc; struct changer_volume_tag *avol, *pvol; size_t size, desclen, stddesclen, offset; int first, avail, i, error = 0; void *data; void *uvendptr; struct changer_element_status ces; /* * Check arguments. */ if (cesr->cesr_type > CHET_DT) return (EINVAL); if (sc->sc_counts[cesr->cesr_type] == 0) return (ENODEV); if (cesr->cesr_unit > (sc->sc_counts[cesr->cesr_type] - 1)) return (ENODEV); if (cesr->cesr_count > (sc->sc_counts[cesr->cesr_type] + cesr->cesr_unit)) return (EINVAL); /* * Do the request the user wants, but only read the status header. * This will tell us the amount of storage we must allocate * in order to read all the data. */ error = ch_getelemstatus(sc, sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit, cesr->cesr_count, &st_hdr, sizeof(st_hdr), 0, cesr->cesr_flags); if (error) return (error); size = sizeof(struct read_element_status_header) + _3btol(st_hdr.nbytes); /* * We must have at least room for the status header and * one page header (since we only ask for oen element type * at a time). */ if (size < (sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header))) return (EIO); /* * Allocate the storage and do the request again. */ data = malloc(size, M_DEVBUF, M_WAITOK); error = ch_getelemstatus(sc, sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit, cesr->cesr_count, data, size, 0, cesr->cesr_flags); if (error) goto done; st_hdrp = (struct read_element_status_header *)data; pg_hdrp = (struct read_element_status_page_header *)((u_long)st_hdrp + sizeof(struct read_element_status_header)); desclen = _2btol(pg_hdrp->edl); /* * Fill in the user status array. */ first = _2btol(st_hdrp->fear); if (first < (sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit) || first >= (sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit + cesr->cesr_count)) { error = EIO; goto done; } first -= sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit; avail = _2btol(st_hdrp->count); if (avail <= 0 || avail > cesr->cesr_count) { error = EIO; goto done; } offset = sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header); for (i = 0; i < cesr->cesr_count; i++) { memset(&ces, 0, sizeof(ces)); if (i < first || i >= (first + avail)) { error = copyout(&ces, &cesr->cesr_data[i], sizeof(ces)); if (error) goto done; } desc = (struct read_element_status_descriptor *) ((char *)data + offset); stddesclen = sizeof(struct read_element_status_descriptor); offset += desclen; ces.ces_flags = CESTATUS_STATUS_VALID; /* * The SCSI flags conveniently map directly to the * chio API flags. */ ces.ces_flags |= (desc->flags1 & 0x3f); ces.ces_asc = desc->sense_code; ces.ces_ascq = desc->sense_qual; /* * For Data Transport elemenets, get the SCSI ID and LUN, * and attempt to map them to a device name if they're * on the same SCSI bus. */ if (desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_IDVALID) { ces.ces_target = desc->dt_scsi_addr; ces.ces_flags |= CESTATUS_TARGET_VALID; } if (desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUVALID) { ces.ces_lun = desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUNMASK; ces.ces_flags |= CESTATUS_LUN_VALID; } if (desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_NOTBUS) ces.ces_flags |= CESTATUS_NOTBUS; else if ((ces.ces_flags & (CESTATUS_TARGET_VALID|CESTATUS_LUN_VALID)) == (CESTATUS_TARGET_VALID|CESTATUS_LUN_VALID)) { if (ces.ces_target < chan->chan_ntargets && ces.ces_lun < chan->chan_nluns && (dtperiph = scsipi_lookup_periph(chan, ces.ces_target, ces.ces_lun)) != NULL && dtperiph->periph_dev != NULL) { strlcpy(ces.ces_xname, device_xname(dtperiph->periph_dev), sizeof(ces.ces_xname)); ces.ces_flags |= CESTATUS_XNAME_VALID; } } if (desc->flags2 & READ_ELEMENT_STATUS_INVERT) ces.ces_flags |= CESTATUS_INVERTED; if (desc->flags2 & READ_ELEMENT_STATUS_SVALID) { if (ch_map_element(sc, _2btol(desc->ssea), &ces.ces_from_type, &ces.ces_from_unit)) ces.ces_flags |= CESTATUS_FROM_VALID; } /* * Extract volume tag information. */ switch (pg_hdrp->flags & (READ_ELEMENT_STATUS_PVOLTAG|READ_ELEMENT_STATUS_AVOLTAG)) { case (READ_ELEMENT_STATUS_PVOLTAG|READ_ELEMENT_STATUS_AVOLTAG): pvol = (struct changer_volume_tag *)(desc + 1); avol = pvol + 1; break; case READ_ELEMENT_STATUS_PVOLTAG: pvol = (struct changer_volume_tag *)(desc + 1); avol = NULL; break; case READ_ELEMENT_STATUS_AVOLTAG: pvol = NULL; avol = (struct changer_volume_tag *)(desc + 1); break; default: avol = pvol = NULL; break; } if (pvol != NULL) { ch_voltag_convert_in(pvol, &ces.ces_pvoltag); ces.ces_flags |= CESTATUS_PVOL_VALID; stddesclen += sizeof(struct changer_volume_tag); } if (avol != NULL) { ch_voltag_convert_in(avol, &ces.ces_avoltag); ces.ces_flags |= CESTATUS_AVOL_VALID; stddesclen += sizeof(struct changer_volume_tag); } /* * Compute vendor-specific length. Note the 4 reserved * bytes between the volume tags and the vendor-specific * data. Copy it out of the user wants it. */ stddesclen += 4; if (desclen > stddesclen) ces.ces_vendor_len = desclen - stddesclen; if (ces.ces_vendor_len != 0 && cesr->cesr_vendor_data != NULL) { error = copyin(&cesr->cesr_vendor_data[i], &uvendptr, sizeof(uvendptr)); if (error) goto done; error = copyout((void *)((u_long)desc + stddesclen), uvendptr, ces.ces_vendor_len); if (error) goto done; } /* * Now copy out the status descriptor we've constructed. */ error = copyout(&ces, &cesr->cesr_data[i], sizeof(ces)); if (error) goto done; } done: if (data != NULL) free(data, M_DEVBUF); return (error); }
/* * Perform a READ ELEMENT STATUS on behalf of the user, and return to * the user only the data the user is interested in. This returns the * old data format. */ static int ch_ousergetelemstatus(struct ch_softc *sc, int chet, u_int8_t *uptr) { struct read_element_status_header *st_hdrp, st_hdr; struct read_element_status_page_header *pg_hdrp; struct read_element_status_descriptor *desc; size_t size, desclen; void *data; int avail, i, error = 0; u_int8_t user_data; /* * If there are no elements of the requested type in the changer, * the request is invalid. */ if (sc->sc_counts[chet] == 0) return (EINVAL); /* * Do the request the user wants, but only read the status header. * This will tell us the amount of storage we must allocate in * order to read all data. */ error = ch_getelemstatus(sc, sc->sc_firsts[chet], sc->sc_counts[chet], &st_hdr, sizeof(st_hdr), 0, 0); if (error) return (error); size = sizeof(struct read_element_status_header) + _3btol(st_hdr.nbytes); /* * We must have at least room for the status header and * one page header (since we only ask for one element type * at a time). */ if (size < (sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header))) return (EIO); /* * Allocate the storage and do the request again. */ data = malloc(size, M_DEVBUF, M_WAITOK); error = ch_getelemstatus(sc, sc->sc_firsts[chet], sc->sc_counts[chet], data, size, 0, 0); if (error) goto done; st_hdrp = (struct read_element_status_header *)data; pg_hdrp = (struct read_element_status_page_header *)((u_long)st_hdrp + sizeof(struct read_element_status_header)); desclen = _2btol(pg_hdrp->edl); /* * Fill in the user status array. */ avail = _2btol(st_hdrp->count); if (avail != sc->sc_counts[chet]) printf("%s: warning, READ ELEMENT STATUS avail != count\n", device_xname(sc->sc_dev)); desc = (struct read_element_status_descriptor *)((u_long)data + sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header)); for (i = 0; i < avail; ++i) { user_data = desc->flags1; error = copyout(&user_data, &uptr[i], avail); if (error) break; desc = (struct read_element_status_descriptor *)((u_long)desc + desclen); } done: if (data != NULL) free(data, M_DEVBUF); return (error); }
static int sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp) { struct sd_mode_sense_data scsipi_sense; union scsi_disk_pages *pages; size_t poffset; int byte2, error; byte2 = SMS_DBD; again: memset(&scsipi_sense, 0, sizeof(scsipi_sense)); error = scsi_mode_sense(sd, byte2, 4, &scsipi_sense.header, (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) + sizeof(scsipi_sense.pages.rigid_geometry)); if (error) { if (byte2 == SMS_DBD) { /* No result; try once more with DBD off */ byte2 = 0; goto again; } return error; } poffset = sizeof(scsipi_sense.header); poffset += scsipi_sense.header.blk_desc_len; if (poffset > sizeof(scsipi_sense) - sizeof(pages->rigid_geometry)) return ERESTART; pages = (void *)((u_long)&scsipi_sense + poffset); #if 0 { size_t i; u_int8_t *p; printf("page 4 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n"); printf("page 4 pg_code=%d sense=%p/%p\n", pages->rigid_geometry.pg_code, &scsipi_sense, pages); } #endif if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4) return ERESTART; /* * KLUDGE!! (for zone recorded disks) * give a number of sectors so that sec * trks * cyls * is <= disk_size * can lead to wasted space! THINK ABOUT THIS ! */ dp->heads = pages->rigid_geometry.nheads; dp->cyls = _3btol(pages->rigid_geometry.ncyl); if (dp->heads == 0 || dp->cyls == 0) return ERESTART; dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */ dp->rot_rate = _2btol(pages->rigid_geometry.rpm); if (dp->rot_rate == 0) dp->rot_rate = 3600; #if 0 printf("page 4 ok\n"); #endif return 0; }
/* * Get the scsi driver to send a full inquiry to the * device and use the * results to fill out the disk parameter structure. */ static int sd_get_capacity(struct sd_softc *sd) { struct disk_parms *dp = &sd->sc_params; uint64_t blocks; int error, blksize; dp->disksize = blocks = sd_read_capacity(sd, &blksize); if (blocks == 0) { struct scsipi_read_format_capacities cmd; struct { struct scsipi_capacity_list_header header; struct scsipi_capacity_descriptor desc; } __packed data; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = READ_FORMAT_CAPACITIES; _lto2b(sizeof(data), cmd.length); error = scsi_command(sd, (void *)&cmd, sizeof(cmd), (void *)&data, sizeof(data)); if (error == EFTYPE) /* Medium Format Corrupted, handle as not formatted */ return SDGP_RESULT_UNFORMATTED; if (error || data.header.length == 0) return SDGP_RESULT_OFFLINE; switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) { case SCSIPI_CAP_DESC_CODE_RESERVED: case SCSIPI_CAP_DESC_CODE_FORMATTED: break; case SCSIPI_CAP_DESC_CODE_UNFORMATTED: return SDGP_RESULT_UNFORMATTED; case SCSIPI_CAP_DESC_CODE_NONE: return SDGP_RESULT_OFFLINE; } dp->disksize = blocks = _4btol(data.desc.nblks); if (blocks == 0) return SDGP_RESULT_OFFLINE; /* XXX? */ blksize = _3btol(data.desc.blklen); } else if (!sd_validate_blksize(blksize)) { struct sd_mode_sense_data scsipi_sense; int bsize; memset(&scsipi_sense, 0, sizeof(scsipi_sense)); error = scsi_mode_sense(sd, 0, 0, &scsipi_sense.header, sizeof(struct scsi_mode_parameter_header_6) + sizeof(scsipi_sense.blk_desc)); if (!error) { bsize = scsipi_sense.header.blk_desc_len; if (bsize >= 8) blksize = _3btol(scsipi_sense.blk_desc.blklen); } } if (!sd_validate_blksize(blksize)) blksize = SD_DEFAULT_BLKSIZE; dp->blksize = blksize; dp->disksize512 = (blocks * dp->blksize) / DEV_BSIZE; return 0; }
/* * scsi_interpret_sense: * * Look at the returned sense and act on the error, determining * the unix error number to pass back. (0 = report no error) * * NOTE: If we return ERESTART, we are expected to haved * thawed the device! * * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. */ static int scsi_interpret_sense(struct siop_adapter *adp, struct scsi_xfer *xs) { struct scsi_sense_data *sense; u_int8_t key; int error; uint32_t info; static const char *error_mes[] = { "soft error (corrected)", "not ready", "medium error", "non-media hardware failure", "illegal request", "unit attention", "readonly device", "no data found", "vendor unique", "copy aborted", "command aborted", "search returned equal", "volume overflow", "verify miscompare", "unknown error key" }; sense = (struct scsi_sense_data *)xs->data; DPRINTF((" sense debug information:\n")); DPRINTF(("\tcode 0x%x valid %d\n", SSD_RCODE(sense->response_code), sense->response_code & SSD_RCODE_VALID ? 1 : 0)); DPRINTF(("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", sense->segment, SSD_SENSE_KEY(sense->flags), sense->flags & SSD_ILI ? 1 : 0, sense->flags & SSD_EOM ? 1 : 0, sense->flags & SSD_FILEMARK ? 1 : 0)); DPRINTF(("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " "extra bytes\n", sense->info[0], sense->info[1], sense->info[2], sense->info[3], sense->extra_len)); switch (SSD_RCODE(sense->response_code)) { /* * Old SCSI-1 and SASI devices respond with * codes other than 70. */ case 0x00: /* no error (command completed OK) */ return 0; case 0x04: /* drive not ready after it was selected */ if (adp->sd->sc_flags & FLAGS_REMOVABLE) adp->sd->sc_flags &= ~FLAGS_MEDIA_LOADED; /* XXX - display some sort of error here? */ return EIO; case 0x20: /* invalid command */ return EINVAL; case 0x25: /* invalid LUN (Adaptec ACB-4000) */ return EACCES; /* * If it's code 70, use the extended stuff and * interpret the key */ case 0x71: /* delayed error */ key = SSD_SENSE_KEY(sense->flags); printf(" DEFERRED ERROR, key = 0x%x\n", key); /* FALLTHROUGH */ case 0x70: if ((sense->response_code & SSD_RCODE_VALID) != 0) info = _4btol(sense->info); else info = 0; key = SSD_SENSE_KEY(sense->flags); switch (key) { case SKEY_NO_SENSE: case SKEY_RECOVERED_ERROR: if (xs->resid == xs->datalen && xs->datalen) { /* * Why is this here? */ xs->resid = 0; /* not short read */ } case SKEY_EQUAL: error = 0; break; case SKEY_NOT_READY: if (adp->sd->sc_flags & FLAGS_REMOVABLE) adp->sd->sc_flags &= ~FLAGS_MEDIA_LOADED; if (sense->asc == 0x3A) { error = ENODEV; /* Medium not present */ } else error = EIO; break; case SKEY_ILLEGAL_REQUEST: error = EINVAL; break; case SKEY_UNIT_ATTENTION: if (sense->asc == 0x29 && sense->ascq == 0x00) { /* device or bus reset */ return ERESTART; } if (adp->sd->sc_flags & FLAGS_REMOVABLE) adp->sd->sc_flags &= ~FLAGS_MEDIA_LOADED; if (!(adp->sd->sc_flags & FLAGS_REMOVABLE)) return ERESTART; error = EIO; break; case SKEY_DATA_PROTECT: error = EROFS; break; case SKEY_BLANK_CHECK: error = 0; break; case SKEY_ABORTED_COMMAND: break; case SKEY_VOLUME_OVERFLOW: error = ENOSPC; break; default: error = EIO; break; } /* Print brief(er) sense information */ printf("%s", error_mes[key - 1]); if ((sense->response_code & SSD_RCODE_VALID) != 0) { switch (key) { case SKEY_NOT_READY: case SKEY_ILLEGAL_REQUEST: case SKEY_UNIT_ATTENTION: case SKEY_DATA_PROTECT: break; case SKEY_BLANK_CHECK: printf(", requested size: %d (decimal)", info); break; case SKEY_ABORTED_COMMAND: printf(", cmd 0x%x, info 0x%x", xs->cmd->opcode, info); break; default: printf(", info = %d (decimal)", info); } } if (sense->extra_len != 0) { int n; printf(", data ="); for (n = 0; n < sense->extra_len; n++) printf(" %x", sense->csi[n]); } printf("\n"); return error; /* * Some other code, just report it */ default: printf("Sense Error Code 0x%x", SSD_RCODE(sense->response_code)); if ((sense->response_code & SSD_RCODE_VALID) != 0) { struct scsi_sense_data_unextended *usense = (struct scsi_sense_data_unextended *)sense; printf(" at block no. %d (decimal)", _3btol(usense->block)); } printf("\n"); return EIO; } }
void wdc_atapi_send_cmd(struct scsi_xfer *sc_xfer) { struct atapiscsi_softc *as = sc_xfer->sc_link->adapter_softc; struct channel_softc *chp = as->chp; struct ata_drive_datas *drvp = &chp->ch_drive[as->drive]; struct wdc_xfer *xfer; int s; int idx; WDCDEBUG_PRINT(("wdc_atapi_send_cmd %s:%d:%d start\n", chp->wdc->sc_dev.dv_xname, chp->channel, as->drive), DEBUG_XFERS); if (sc_xfer->sc_link->target != 0) { sc_xfer->error = XS_DRIVER_STUFFUP; scsi_done(sc_xfer); return; } xfer = sc_xfer->io; wdc_scrub_xfer(xfer); if (sc_xfer->flags & SCSI_POLL) xfer->c_flags |= C_POLL; xfer->drive = as->drive; xfer->c_flags |= C_ATAPI; xfer->cmd = sc_xfer; xfer->databuf = sc_xfer->data; xfer->c_bcount = sc_xfer->datalen; xfer->c_start = wdc_atapi_start; xfer->c_intr = wdc_atapi_intr; timeout_set(&xfer->atapi_poll_to, wdc_atapi_timer_handler, chp); WDCDEBUG_PRINT(("wdc_atapi_send_cmd %s:%d:%d ", chp->wdc->sc_dev.dv_xname, chp->channel, as->drive), DEBUG_XFERS | DEBUG_ERRORS); for (idx = 0; idx < sc_xfer->cmdlen; idx++) { WDCDEBUG_PRINT((" %02x", ((unsigned char *)sc_xfer->cmd)[idx]), DEBUG_XFERS | DEBUG_ERRORS); } WDCDEBUG_PRINT(("\n"), DEBUG_XFERS | DEBUG_ERRORS); s = splbio(); if (drvp->atapi_cap & ACAP_DSC) { WDCDEBUG_PRINT(("about to send cmd 0x%x ", sc_xfer->cmd->opcode), DEBUG_DSC); switch (sc_xfer->cmd->opcode) { case READ: case WRITE: xfer->c_flags |= C_MEDIA_ACCESS; /* If we are not in buffer availability mode, we limit the first request to 0 bytes, which gets us into buffer availability mode without holding the bus. */ if (!(drvp->drive_flags & DRIVE_DSCBA)) { xfer->c_bcount = 0; xfer->transfer_len = _3btol(((struct scsi_rw_tape *) sc_xfer->cmd)->len); _lto3b(0, ((struct scsi_rw_tape *) sc_xfer->cmd)->len); xfer->c_done = wdc_atapi_tape_done; WDCDEBUG_PRINT( ("R/W in completion mode, do 0 blocks\n"), DEBUG_DSC); } else WDCDEBUG_PRINT(("R/W %d blocks %d bytes\n", _3btol(((struct scsi_rw_tape *) sc_xfer->cmd)->len), sc_xfer->datalen), DEBUG_DSC); /* DSC will change to buffer availability mode. We reflect this in wdc_atapi_intr. */ break; case ERASE: /* Media access commands */ case LOAD: case REWIND: case SPACE: case WRITE_FILEMARKS: #if 0 case LOCATE: case READ_POSITION: #endif xfer->c_flags |= C_MEDIA_ACCESS; break; default: WDCDEBUG_PRINT(("no media access\n"), DEBUG_DSC); } } wdc_exec_xfer(chp, xfer); splx(s); }