/* * Ask the drive what it's min and max blk sizes are. */ static int st_scsibus_read_block_limits(struct st_softc *st, int flags) { struct scsi_block_limits cmd; struct scsi_block_limits_data block_limits; struct scsipi_periph *periph = st->sc_periph; int error; /* * do a 'Read Block Limits' */ memset(&cmd, 0, sizeof(cmd)); cmd.opcode = READ_BLOCK_LIMITS; /* * do the command, update the global values */ error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), (void *)&block_limits, sizeof(block_limits), ST_RETRIES, ST_CTL_TIME, NULL, flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK); if (error) return (error); st->blkmin = _2btol(block_limits.min_length); st->blkmax = _3btol(block_limits.max_length); SC_DEBUG(periph, SCSIPI_DB3, ("(%d <= blksize <= %d)\n", st->blkmin, st->blkmax)); return (0); }
int sd_vpd_block_limits(struct sd_softc *sc, int flags) { struct scsi_vpd_disk_limits *pg; int rv; pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ? PR_NOWAIT : PR_WAITOK) | PR_ZERO); if (pg == NULL) return (ENOMEM); rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg), SI_PG_DISK_LIMITS, flags); if (rv != 0) goto done; if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) { sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count); sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count); } else rv = EOPNOTSUPP; done: dma_free(pg, sizeof(*pg)); return (rv); }
static int sd_get_simplifiedparms(struct sd_softc *sd) { struct { struct scsi_mode_parameter_header_6 header; /* no block descriptor */ uint8_t pg_code; /* page code (should be 6) */ uint8_t pg_length; /* page length (should be 11) */ uint8_t wcd; /* bit0: cache disable */ uint8_t lbs[2]; /* logical block size */ uint8_t size[5]; /* number of log. blocks */ uint8_t pp; /* power/performance */ uint8_t flags; uint8_t resvd; } scsipi_sense; struct disk_parms *dp = &sd->sc_params; uint64_t blocks; int error, blksize; /* * sd_read_capacity (ie "read capacity") and mode sense page 6 * give the same information. Do both for now, and check * for consistency. * XXX probably differs for removable media */ dp->blksize = SD_DEFAULT_BLKSIZE; if ((blocks = sd_read_capacity(sd, &blksize)) == 0) return SDGP_RESULT_OFFLINE; /* XXX? */ error = scsi_mode_sense(sd, SMS_DBD, 6, &scsipi_sense.header, sizeof(scsipi_sense)); if (error != 0) return SDGP_RESULT_OFFLINE; /* XXX? */ dp->blksize = blksize; if (!sd_validate_blksize(dp->blksize)) dp->blksize = _2btol(scsipi_sense.lbs); if (!sd_validate_blksize(dp->blksize)) dp->blksize = SD_DEFAULT_BLKSIZE; /* * Create a pseudo-geometry. */ dp->heads = 64; dp->sectors = 32; dp->cyls = blocks / (dp->heads * dp->sectors); dp->disksize = _5btol(scsipi_sense.size); if (dp->disksize <= UINT32_MAX && dp->disksize != blocks) { printf("RBC size: mode sense=%llu, get cap=%llu\n", (unsigned long long)dp->disksize, (unsigned long long)blocks); dp->disksize = blocks; } dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE; return SDGP_RESULT_OK; }
int sd_thin_pages(struct sd_softc *sc, int flags) { struct scsi_vpd_hdr *pg; size_t len = 0; u_int8_t *pages; int i, score = 0; int rv; pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ? PR_NOWAIT : PR_WAITOK) | PR_ZERO); if (pg == NULL) return (ENOMEM); rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg), SI_PG_SUPPORTED, flags); if (rv != 0) goto done; len = _2btol(pg->page_length); dma_free(pg, sizeof(*pg)); pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ? PR_NOWAIT : PR_WAITOK) | PR_ZERO); if (pg == NULL) return (ENOMEM); rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len, SI_PG_SUPPORTED, flags); if (rv != 0) goto done; pages = (u_int8_t *)(pg + 1); if (pages[0] != SI_PG_SUPPORTED) { rv = EIO; goto done; } for (i = 1; i < len; i++) { switch (pages[i]) { case SI_PG_DISK_LIMITS: case SI_PG_DISK_THIN: score++; break; } } if (score < 2) rv = EOPNOTSUPP; done: dma_free(pg, sizeof(*pg) + len); return (rv); }
/* * Copy a volume tag to a volume_tag struct, converting SCSI byte order * to host native byte order in the volume serial number. The volume * label as returned by the changer is transferred to user mode as * nul-terminated string. Volume labels are truncated at the first * space, as suggested by SCSI-2. */ static void copy_voltag(struct changer_voltag *uvoltag, struct volume_tag *voltag) { int i; for (i=0; i<CH_VOLTAG_MAXLEN; i++) { char c = voltag->vif[i]; if (c && c != ' ') uvoltag->cv_volid[i] = c; else break; } uvoltag->cv_volid[i] = '\0'; uvoltag->cv_serial = _2btol(voltag->vsn); }
int sd_read_cap_16(struct sd_softc *sc, int flags) { struct scsi_read_capacity_16 cdb; struct scsi_read_cap_data_16 *rdcap; struct scsi_xfer *xs; int rv = ENOMEM; CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST); rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ? PR_NOWAIT : PR_WAITOK) | PR_ZERO); if (rdcap == NULL) return (ENOMEM); xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT); if (xs == NULL) goto done; bzero(&cdb, sizeof(cdb)); cdb.opcode = READ_CAPACITY_16; cdb.byte2 = SRC16_SERVICE_ACTION; _lto4b(sizeof(*rdcap), cdb.length); memcpy(xs->cmd, &cdb, sizeof(cdb)); xs->cmdlen = sizeof(cdb); xs->data = (void *)rdcap; xs->datalen = sizeof(*rdcap); xs->timeout = 20000; rv = scsi_xs_sync(xs); scsi_xs_put(xs); if (rv == 0) { sc->params.disksize = _8btol(rdcap->addr) + 1; sc->params.secsize = _4btol(rdcap->length); if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE)) SET(sc->flags, SDF_THIN); else CLR(sc->flags, SDF_THIN); } done: dma_free(rdcap, sizeof(*rdcap)); return (rv); }
static void ch_voltag_convert_in(const struct changer_volume_tag *sv, struct changer_voltag *cv) { int i; memset(cv, 0, sizeof(struct changer_voltag)); /* * Copy the volume tag string from the SCSI representation. * Per the SCSI-2 spec, we stop at the first blank character. */ for (i = 0; i < sizeof(sv->volid); i++) { if (sv->volid[i] == ' ') break; cv->cv_tag[i] = sv->volid[i]; } cv->cv_tag[i] = '\0'; cv->cv_serial = _2btol(sv->volseq); }
/* * Perform a READ ELEMENT STATUS on behalf of the user, and return to * the user only the data the user is interested in (i.e. an array of * changer_element_status structures) */ int ch_usergetelemstatus(struct ch_softc *sc, struct changer_element_status_request *cesr) { struct changer_element_status *user_data = NULL; struct read_element_status_header *st_hdr; struct read_element_status_page_header *pg_hdr; caddr_t desc; caddr_t data = NULL; size_t size, desclen, udsize; int chet = cesr->cesr_type; int avail, i, error = 0; int want_voltags = (cesr->cesr_flags & CESR_VOLTAGS) ? 1 : 0; /* * If there are no elements of the requested type in the changer, * the request is invalid. */ if (sc->sc_counts[chet] == 0) return (EINVAL); /* * Request one descriptor for the given element type. This * is used to determine the size of the descriptor so that * we can allocate enough storage for all of them. We assume * that the first one can fit into 1k. */ size = 1024; data = dma_alloc(size, PR_WAITOK); error = ch_getelemstatus(sc, sc->sc_firsts[chet], 1, data, size, want_voltags); if (error) goto done; st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *) (st_hdr + 1); desclen = _2btol(pg_hdr->edl); dma_free(data, size); /* * Reallocate storage for descriptors and get them from the * device. */ size = sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header) + (desclen * sc->sc_counts[chet]); data = dma_alloc(size, PR_WAITOK); error = ch_getelemstatus(sc, sc->sc_firsts[chet], sc->sc_counts[chet], data, size, want_voltags); if (error) goto done; /* * Fill in the user status array. */ st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *) (st_hdr + 1); avail = _2btol(st_hdr->count); if (avail != sc->sc_counts[chet]) { error = EINVAL; goto done; } user_data = mallocarray(avail, sizeof(struct changer_element_status), M_DEVBUF, M_WAITOK | M_ZERO); udsize = avail * sizeof(struct changer_element_status); desc = (caddr_t)(pg_hdr + 1); for (i = 0; i < avail; ++i) { struct changer_element_status *ces = &(user_data[i]); copy_element_status(pg_hdr->flags, (struct read_element_status_descriptor *)desc, ces); desc += desclen; } /* Copy array out to userspace. */ error = copyout(user_data, cesr->cesr_data, udsize); done: if (data != NULL) dma_free(data, size); if (user_data != NULL) free(user_data, M_DEVBUF, udsize); return (error); }
static SANE_Status attach (const char *devnam, Ibm_Device ** devp) { SANE_Status status; Ibm_Device *dev; int fd; struct inquiry_data ibuf; struct measurements_units_page mup; struct ibm_window_data wbuf; size_t buf_size; char *str; DBG (11, ">> attach\n"); for (dev = first_dev; dev; dev = dev->next) { if (strcmp (dev->sane.name, devnam) == 0) { if (devp) *devp = dev; return (SANE_STATUS_GOOD); } } DBG (3, "attach: opening %s\n", devnam); status = sanei_scsi_open (devnam, &fd, NULL, NULL); if (status != SANE_STATUS_GOOD) { DBG (1, "attach: open failed: %s\n", sane_strstatus (status)); return (status); } DBG (3, "attach: sending INQUIRY\n"); memset (&ibuf, 0, sizeof (ibuf)); buf_size = sizeof(ibuf); /* next line by mf */ ibuf.byte2 = 2; status = inquiry (fd, &ibuf, &buf_size); if (status != SANE_STATUS_GOOD) { DBG (1, "attach: inquiry failed: %s\n", sane_strstatus (status)); sanei_scsi_close (fd); return (status); } if (ibuf.devtype != 6) { DBG (1, "attach: device \"%s\" is not a scanner\n", devnam); sanei_scsi_close (fd); return (SANE_STATUS_INVAL); } if (!( (strncmp ((char *)ibuf.vendor, "IBM", 3) ==0 && strncmp ((char *)ibuf.product, "2456", 4) == 0) || (strncmp ((char *)ibuf.vendor, "RICOH", 5) == 0 && strncmp ((char *)ibuf.product, "IS420", 5) == 0) || (strncmp ((char *)ibuf.vendor, "RICOH", 5) == 0 && strncmp ((char *)ibuf.product, "IS410", 5) == 0) || (strncmp ((char *)ibuf.vendor, "RICOH", 5) == 0 && strncmp ((char *)ibuf.product, "IS430", 5) == 0) )) { DBG (1, "attach: device \"%s\" doesn't look like a scanner I know\n", devnam); sanei_scsi_close (fd); return (SANE_STATUS_INVAL); } DBG (3, "attach: sending TEST_UNIT_READY\n"); status = test_unit_ready (fd); if (status != SANE_STATUS_GOOD) { DBG (1, "attach: test unit ready failed (%s)\n", sane_strstatus (status)); sanei_scsi_close (fd); return (status); } /* * Causes a problem with RICOH IS420 * Ignore this function ... seems to work ok * Suggested to George Murphy [email protected] by henning */ if (strncmp((char *)ibuf.vendor, "RICOH", 5) != 0 && strncmp((char *)ibuf.product, "IS420", 5) != 0) { DBG (3, "attach: sending OBJECT POSITION\n"); status = object_position (fd, OBJECT_POSITION_UNLOAD); if (status != SANE_STATUS_GOOD) { DBG (1, "attach: OBJECT POSTITION failed\n"); sanei_scsi_close (fd); return (SANE_STATUS_INVAL); } } memset (&mup, 0, sizeof (mup)); mup.page_code = MEASUREMENTS_PAGE; mup.parameter_length = 0x06; mup.bmu = INCHES; mup.mud[0] = (DEFAULT_MUD >> 8) & 0xff; mup.mud[1] = (DEFAULT_MUD & 0xff); #if 0 DBG (3, "attach: sending MODE SELECT\n"); status = mode_select (fd, (struct mode_pages *) &mup); if (status != SANE_STATUS_GOOD) { DBG (1, "attach: MODE_SELECT failed\n"); sanei_scsi_close (fd); return (SANE_STATUS_INVAL); } #endif #if 0 DBG (3, "attach: sending MODE SENSE\n"); memset (&mup, 0, sizeof (mup)); status = mode_sense (fd, (struct mode_pages *) &mup, PC_CURRENT | MEASUREMENTS_PAGE); if (status != SANE_STATUS_GOOD) { DBG (1, "attach: MODE_SENSE failed\n"); sanei_scsi_close (fd); return (SANE_STATUS_INVAL); } #endif DBG (3, "attach: sending GET WINDOW\n"); memset (&wbuf, 0, sizeof (wbuf)); status = get_window (fd, &wbuf); if (status != SANE_STATUS_GOOD) { DBG (1, "attach: GET_WINDOW failed %d\n", status); sanei_scsi_close (fd); DBG (11, "<< attach\n"); return (SANE_STATUS_INVAL); } sanei_scsi_close (fd); dev = malloc (sizeof (*dev)); if (!dev) return (SANE_STATUS_NO_MEM); memset (dev, 0, sizeof (*dev)); dev->sane.name = strdup (devnam); dev->sane.vendor = "IBM"; str = malloc (16 + 1); memset (str, 0, sizeof (str)); strncpy (str, (char *)ibuf.product, sizeof(ibuf.product)); strncpy (str + sizeof(ibuf.revision), (char *)ibuf.revision, sizeof(ibuf.revision)); str[sizeof(ibuf.product) + sizeof(ibuf.revision)] = '\0'; dev->sane.model = str; dev->sane.type = "flatbed scanner"; DBG (5, "dev->sane.name = %s\n", dev->sane.name); DBG (5, "dev->sane.vendor = %s\n", dev->sane.vendor); DBG (5, "dev->sane.model = %s\n", dev->sane.model); DBG (5, "dev->sane.type = %s\n", dev->sane.type); dev->info.xres_default = _2btol(wbuf.x_res); dev->info.yres_default = _2btol(wbuf.y_res); dev->info.image_mode_default = wbuf.image_comp; /* if you throw the MRIF bit the brighness control reverses too */ /* so I reverse the reversal in software for symmetry's sake */ /* I should make this into an option */ if (wbuf.image_comp == IBM_GRAYSCALE || wbuf.image_comp == IBM_DITHERED_MONOCHROME) { dev->info.brightness_default = 256 - wbuf.brightness; /* if (is50) dev->info.contrast_default = wbuf.contrast; else */ dev->info.contrast_default = 256 - wbuf.contrast; } else /* wbuf.image_comp == IBM_BINARY_MONOCHROME */ { dev->info.brightness_default = wbuf.brightness; dev->info.contrast_default = wbuf.contrast; } /* da rivedere dev->info.adf_default = wbuf.adf_state; */ dev->info.adf_default = ADF_UNUSED; dev->info.adf_default = IBM_PAPER_USER_DEFINED; #if 1 dev->info.bmu = mup.bmu; dev->info.mud = _2btol(mup.mud); if (dev->info.mud == 0) { /* The Ricoh says it uses points as default Basic Measurement Unit */ /* but gives a Measurement Unit Divisor of zero */ /* So, we set it to the default (SCSI-standard) of 1200 */ /* with BMU in inches, i.e. 1200 points equal 1 inch */ dev->info.bmu = INCHES; dev->info.mud = DEFAULT_MUD; } #else dev->info.bmu = INCHES; dev->info.mud = DEFAULT_MUD; #endif DBG (5, "xres_default=%d\n", dev->info.xres_default); DBG (5, "xres_range.max=%d\n", dev->info.xres_range.max); DBG (5, "xres_range.min=%d\n", dev->info.xres_range.min); DBG (5, "yres_default=%d\n", dev->info.yres_default); DBG (5, "yres_range.max=%d\n", dev->info.yres_range.max); DBG (5, "yres_range.min=%d\n", dev->info.yres_range.min); DBG (5, "x_range.max=%d\n", dev->info.x_range.max); DBG (5, "y_range.max=%d\n", dev->info.y_range.max); DBG (5, "image_mode=%d\n", dev->info.image_mode_default); DBG (5, "brightness=%d\n", dev->info.brightness_default); DBG (5, "contrast=%d\n", dev->info.contrast_default); DBG (5, "adf_state=%d\n", dev->info.adf_default); DBG (5, "bmu=%d\n", dev->info.bmu); DBG (5, "mud=%d\n", dev->info.mud); ++num_devices; dev->next = first_dev; first_dev = dev; if (devp) *devp = dev; DBG (11, "<< attach\n"); return (SANE_STATUS_GOOD); }
/* * Perform special action on behalf of the user. * Knows about the internals of this device */ int cdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) { struct cd_softc *cd; struct disklabel *lp; int part = DISKPART(dev); int error = 0; cd = cdlookup(DISKUNIT(dev)); if (cd == NULL) return ENXIO; SC_DEBUG(cd->sc_link, SDEV_DB2, ("cdioctl 0x%lx\n", cmd)); /* * If the device is not valid.. abandon ship */ if ((cd->sc_link->flags & SDEV_MEDIA_LOADED) == 0) { switch (cmd) { case DIOCWLABEL: case DIOCLOCK: case DIOCEJECT: case SCIOCIDENTIFY: case SCIOCCOMMAND: case SCIOCDEBUG: case CDIOCLOADUNLOAD: case SCIOCRESET: case CDIOCGETVOL: case CDIOCSETVOL: case CDIOCSETMONO: case CDIOCSETSTEREO: case CDIOCSETMUTE: case CDIOCSETLEFT: case CDIOCSETRIGHT: case CDIOCCLOSE: case CDIOCEJECT: case CDIOCALLOW: case CDIOCPREVENT: case CDIOCSETDEBUG: case CDIOCCLRDEBUG: case CDIOCRESET: case DVD_AUTH: case DVD_READ_STRUCT: case MTIOCTOP: if (part == RAW_PART) break; /* FALLTHROUGH */ default: if ((cd->sc_link->flags & SDEV_OPEN) == 0) error = ENODEV; else error = EIO; goto exit; } } switch (cmd) { case DIOCRLDINFO: lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK); cdgetdisklabel(dev, cd, lp, 0); bcopy(lp, cd->sc_dk.dk_label, sizeof(*lp)); free(lp, M_TEMP); break; case DIOCGDINFO: case DIOCGPDINFO: *(struct disklabel *)addr = *(cd->sc_dk.dk_label); break; case DIOCGPART: ((struct partinfo *)addr)->disklab = cd->sc_dk.dk_label; ((struct partinfo *)addr)->part = &cd->sc_dk.dk_label->d_partitions[DISKPART(dev)]; break; case DIOCWDINFO: case DIOCSDINFO: if ((flag & FWRITE) == 0) { error = EBADF; break; } if ((error = cdlock(cd)) != 0) break; cd->flags |= CDF_LABELLING; error = setdisklabel(cd->sc_dk.dk_label, (struct disklabel *)addr, /*cd->sc_dk.dk_openmask : */0); if (error == 0) { } cd->flags &= ~CDF_LABELLING; cdunlock(cd); break; case DIOCWLABEL: error = EBADF; break; case CDIOCPLAYTRACKS: { struct ioc_play_track *args = (struct ioc_play_track *)addr; if ((error = cd_set_pa_immed(cd, 0)) != 0) break; error = cd_play_tracks(cd, args->start_track, args->start_index, args->end_track, args->end_index); break; } case CDIOCPLAYMSF: { struct ioc_play_msf *args = (struct ioc_play_msf *)addr; if ((error = cd_set_pa_immed(cd, 0)) != 0) break; error = cd_play_msf(cd, args->start_m, args->start_s, args->start_f, args->end_m, args->end_s, args->end_f); break; } case CDIOCPLAYBLOCKS: { struct ioc_play_blocks *args = (struct ioc_play_blocks *)addr; if ((error = cd_set_pa_immed(cd, 0)) != 0) break; error = cd_play(cd, args->blk, args->len); break; } case CDIOCREADSUBCHANNEL: { struct ioc_read_subchannel *args = (struct ioc_read_subchannel *)addr; struct cd_sub_channel_info data; int len = args->data_len; if (len > sizeof(data) || len < sizeof(struct cd_sub_channel_header)) { error = EINVAL; break; } error = cd_read_subchannel(cd, args->address_format, args->data_format, args->track, &data, len); if (error) break; len = min(len, _2btol(data.header.data_len) + sizeof(struct cd_sub_channel_header)); error = copyout(&data, args->data, len); break; } case CDIOREADTOCHEADER: { struct ioc_toc_header th; if ((error = cd_read_toc(cd, 0, 0, &th, sizeof(th), 0)) != 0) break; if (cd->sc_link->quirks & ADEV_LITTLETOC) th.len = letoh16(th.len); else th.len = betoh16(th.len); bcopy(&th, addr, sizeof(th)); break; } case CDIOREADTOCENTRYS: { struct cd_toc *toc; struct ioc_read_toc_entry *te = (struct ioc_read_toc_entry *)addr; struct ioc_toc_header *th; struct cd_toc_entry *cte; int len = te->data_len; int ntracks; toc = malloc(sizeof(*toc), M_TEMP, M_WAITOK | M_ZERO); th = &toc->header; if (len > sizeof(toc->entries) || len < sizeof(struct cd_toc_entry)) { free(toc, M_TEMP); error = EINVAL; break; } error = cd_read_toc(cd, te->address_format, te->starting_track, toc, len + sizeof(struct ioc_toc_header), 0); if (error) { free(toc, M_TEMP); break; } if (te->address_format == CD_LBA_FORMAT) for (ntracks = th->ending_track - th->starting_track + 1; ntracks >= 0; ntracks--) { cte = &toc->entries[ntracks]; cte->addr_type = CD_LBA_FORMAT; if (cd->sc_link->quirks & ADEV_LITTLETOC) { #if BYTE_ORDER == BIG_ENDIAN swap16_multi((u_int16_t *)&cte->addr, sizeof(cte->addr) / 2); #endif } else cte->addr.lba = betoh32(cte->addr.lba); } if (cd->sc_link->quirks & ADEV_LITTLETOC) { th->len = letoh16(th->len); } else th->len = betoh16(th->len); len = min(len, th->len - (sizeof(th->starting_track) + sizeof(th->ending_track))); error = copyout(toc->entries, te->data, len); free(toc, M_TEMP); break; } case CDIOREADMSADDR: { struct cd_toc *toc; int sessno = *(int *)addr; struct cd_toc_entry *cte; if (sessno != 0) { error = EINVAL; break; } toc = malloc(sizeof(*toc), M_TEMP, M_WAITOK | M_ZERO); error = cd_read_toc(cd, 0, 0, toc, sizeof(struct ioc_toc_header) + sizeof(struct cd_toc_entry), 0x40 /* control word for "get MS info" */); if (error) { free(toc, M_TEMP); break; } cte = &toc->entries[0]; if (cd->sc_link->quirks & ADEV_LITTLETOC) { #if BYTE_ORDER == BIG_ENDIAN swap16_multi((u_int16_t *)&cte->addr, sizeof(cte->addr) / 2); #endif } else cte->addr.lba = betoh32(cte->addr.lba); if (cd->sc_link->quirks & ADEV_LITTLETOC) toc->header.len = letoh16(toc->header.len); else toc->header.len = betoh16(toc->header.len); *(int *)addr = (toc->header.len >= 10 && cte->track > 1) ? cte->addr.lba : 0; free(toc, M_TEMP); break; } case CDIOCSETPATCH: { struct ioc_patch *arg = (struct ioc_patch *)addr; error = cd_setchan(cd, arg->patch[0], arg->patch[1], arg->patch[2], arg->patch[3], 0); break; } case CDIOCGETVOL: { struct ioc_vol *arg = (struct ioc_vol *)addr; error = cd_getvol(cd, arg, 0); break; } case CDIOCSETVOL: { struct ioc_vol *arg = (struct ioc_vol *)addr; error = cd_setvol(cd, arg, 0); break; } case CDIOCSETMONO: error = cd_setchan(cd, BOTH_CHANNEL, BOTH_CHANNEL, MUTE_CHANNEL, MUTE_CHANNEL, 0); break; case CDIOCSETSTEREO: error = cd_setchan(cd, LEFT_CHANNEL, RIGHT_CHANNEL, MUTE_CHANNEL, MUTE_CHANNEL, 0); break; case CDIOCSETMUTE: error = cd_setchan(cd, MUTE_CHANNEL, MUTE_CHANNEL, MUTE_CHANNEL, MUTE_CHANNEL, 0); break; case CDIOCSETLEFT: error = cd_setchan(cd, LEFT_CHANNEL, LEFT_CHANNEL, MUTE_CHANNEL, MUTE_CHANNEL, 0); break; case CDIOCSETRIGHT: error = cd_setchan(cd, RIGHT_CHANNEL, RIGHT_CHANNEL, MUTE_CHANNEL, MUTE_CHANNEL, 0); break; case CDIOCRESUME: error = cd_pause(cd, 1); break; case CDIOCPAUSE: error = cd_pause(cd, 0); break; case CDIOCSTART: error = scsi_start(cd->sc_link, SSS_START, 0); break; case CDIOCSTOP: error = scsi_start(cd->sc_link, SSS_STOP, 0); break; close_tray: case CDIOCCLOSE: error = scsi_start(cd->sc_link, SSS_START|SSS_LOEJ, SCSI_IGNORE_NOT_READY | SCSI_IGNORE_MEDIA_CHANGE); break; case MTIOCTOP: if (((struct mtop *)addr)->mt_op == MTRETEN) goto close_tray; if (((struct mtop *)addr)->mt_op != MTOFFL) { error = EIO; break; } /* FALLTHROUGH */ case CDIOCEJECT: /* FALLTHROUGH */ case DIOCEJECT: cd->sc_link->flags |= SDEV_EJECTING; break; case CDIOCALLOW: error = scsi_prevent(cd->sc_link, PR_ALLOW, 0); break; case CDIOCPREVENT: error = scsi_prevent(cd->sc_link, PR_PREVENT, 0); break; case DIOCLOCK: error = scsi_prevent(cd->sc_link, (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0); break; case CDIOCSETDEBUG: cd->sc_link->flags |= (SDEV_DB1 | SDEV_DB2); break; case CDIOCCLRDEBUG: cd->sc_link->flags &= ~(SDEV_DB1 | SDEV_DB2); break; case CDIOCRESET: case SCIOCRESET: error = cd_reset(cd); break; case CDIOCLOADUNLOAD: { struct ioc_load_unload *args = (struct ioc_load_unload *)addr; error = cd_load_unload(cd, args->options, args->slot); break; } case DVD_AUTH: error = dvd_auth(cd, (union dvd_authinfo *)addr); break; case DVD_READ_STRUCT: error = dvd_read_struct(cd, (union dvd_struct *)addr); break; default: if (DISKPART(dev) != RAW_PART) { error = ENOTTY; break; } error = scsi_do_ioctl(cd->sc_link, dev, cmd, addr, flag, p); break; } exit: device_unref(&cd->sc_dev); return (error); }
/* * Ask the device about itself and fill in the parameters in our * softc. */ static int ch_get_params(struct ch_softc *sc, int scsiflags) { struct scsi_mode_sense_data { struct scsi_mode_parameter_header_6 header; union { struct page_element_address_assignment ea; struct page_transport_geometry_parameters tg; struct page_device_capabilities cap; } pages; } sense_data; int error, from; u_int8_t *moves, *exchanges; /* * Grab info from the element address assignment page. */ memset(&sense_data, 0, sizeof(sense_data)); error = scsipi_mode_sense(sc->sc_periph, SMS_DBD, 0x1d, &sense_data.header, sizeof(sense_data), scsiflags, CHRETRIES, 6000); if (error) { aprint_error_dev(sc->sc_dev, "could not sense element address page\n"); return (error); } sc->sc_firsts[CHET_MT] = _2btol(sense_data.pages.ea.mtea); sc->sc_counts[CHET_MT] = _2btol(sense_data.pages.ea.nmte); sc->sc_firsts[CHET_ST] = _2btol(sense_data.pages.ea.fsea); sc->sc_counts[CHET_ST] = _2btol(sense_data.pages.ea.nse); sc->sc_firsts[CHET_IE] = _2btol(sense_data.pages.ea.fieea); sc->sc_counts[CHET_IE] = _2btol(sense_data.pages.ea.niee); sc->sc_firsts[CHET_DT] = _2btol(sense_data.pages.ea.fdtea); sc->sc_counts[CHET_DT] = _2btol(sense_data.pages.ea.ndte); /* XXX ask for transport geometry page XXX */ /* * Grab info from the capabilities page. */ memset(&sense_data, 0, sizeof(sense_data)); /* * XXX: Note: not all changers can deal with disabled block descriptors */ error = scsipi_mode_sense(sc->sc_periph, SMS_DBD, 0x1f, &sense_data.header, sizeof(sense_data), scsiflags, CHRETRIES, 6000); if (error) { aprint_error_dev(sc->sc_dev, "could not sense capabilities page\n"); return (error); } memset(sc->sc_movemask, 0, sizeof(sc->sc_movemask)); memset(sc->sc_exchangemask, 0, sizeof(sc->sc_exchangemask)); moves = &sense_data.pages.cap.move_from_mt; exchanges = &sense_data.pages.cap.exchange_with_mt; for (from = CHET_MT; from <= CHET_DT; ++from) { sc->sc_movemask[from] = moves[from]; sc->sc_exchangemask[from] = exchanges[from]; } #ifdef CH_AUTOMATIC_IELEM_POLICY /* * If we need to do an Init-Element-Status, * do that now that we know what's in the changer. */ if ((scsiflags & XS_CTL_IGNORE_MEDIA_CHANGE) == 0) { if ((sc->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) error = ch_ielem(sc); if (error == 0) sc->sc_periph->periph_flags |= PERIPH_MEDIA_LOADED; else sc->sc_periph->periph_flags &= ~PERIPH_MEDIA_LOADED; } #endif return (error); }
static int sd_get_parms_page5(struct sd_softc *sd, struct disk_parms *dp) { struct sd_mode_sense_data scsipi_sense; union scsi_disk_pages *pages; size_t poffset; int byte2, error; byte2 = SMS_DBD; again: memset(&scsipi_sense, 0, sizeof(scsipi_sense)); error = scsi_mode_sense(sd, 0, 5, &scsipi_sense.header, (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) + sizeof(scsipi_sense.pages.flex_geometry)); if (error) { if (byte2 == SMS_DBD) { /* No result; try once more with DBD off */ byte2 = 0; goto again; } return error; } poffset = sizeof(scsipi_sense.header); poffset += scsipi_sense.header.blk_desc_len; if (poffset > sizeof(scsipi_sense) - sizeof(pages->flex_geometry)) return ERESTART; pages = (void *)((u_long)&scsipi_sense + poffset); #if 0 { size_t i; u_int8_t *p; printf("page 5 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n"); printf("page 5 pg_code=%d sense=%p/%p\n", pages->flex_geometry.pg_code, &scsipi_sense, pages); } #endif if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5) return ERESTART; dp->heads = pages->flex_geometry.nheads; dp->cyls = _2btol(pages->flex_geometry.ncyl); dp->sectors = pages->flex_geometry.ph_sec_tr; if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0) return ERESTART; dp->rot_rate = _2btol(pages->rigid_geometry.rpm); if (dp->rot_rate == 0) dp->rot_rate = 3600; #if 0 printf("page 5 ok\n"); #endif return 0; }
static int sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp) { struct sd_mode_sense_data scsipi_sense; union scsi_disk_pages *pages; size_t poffset; int byte2, error; byte2 = SMS_DBD; again: memset(&scsipi_sense, 0, sizeof(scsipi_sense)); error = scsi_mode_sense(sd, byte2, 4, &scsipi_sense.header, (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) + sizeof(scsipi_sense.pages.rigid_geometry)); if (error) { if (byte2 == SMS_DBD) { /* No result; try once more with DBD off */ byte2 = 0; goto again; } return error; } poffset = sizeof(scsipi_sense.header); poffset += scsipi_sense.header.blk_desc_len; if (poffset > sizeof(scsipi_sense) - sizeof(pages->rigid_geometry)) return ERESTART; pages = (void *)((u_long)&scsipi_sense + poffset); #if 0 { size_t i; u_int8_t *p; printf("page 4 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n"); printf("page 4 pg_code=%d sense=%p/%p\n", pages->rigid_geometry.pg_code, &scsipi_sense, pages); } #endif if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4) return ERESTART; /* * KLUDGE!! (for zone recorded disks) * give a number of sectors so that sec * trks * cyls * is <= disk_size * can lead to wasted space! THINK ABOUT THIS ! */ dp->heads = pages->rigid_geometry.nheads; dp->cyls = _3btol(pages->rigid_geometry.ncyl); if (dp->heads == 0 || dp->cyls == 0) return ERESTART; dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */ dp->rot_rate = _2btol(pages->rigid_geometry.rpm); if (dp->rot_rate == 0) dp->rot_rate = 3600; #if 0 printf("page 4 ok\n"); #endif return 0; }
/* * Ask the device about itself and fill in the parameters in our * softc. */ int ch_get_params(struct ch_softc *sc, int flags) { union scsi_mode_sense_buf *data; struct page_element_address_assignment *ea; struct page_device_capabilities *cap; int error, from; u_int8_t *moves, *exchanges; data = dma_alloc(sizeof(*data), PR_NOWAIT); if (data == NULL) return (ENOMEM); /* * Grab info from the element address assignment page (0x1d). */ error = scsi_do_mode_sense(sc->sc_link, 0x1d, data, (void **)&ea, NULL, NULL, NULL, sizeof(*ea), flags, NULL); if (error == 0 && ea == NULL) error = EIO; if (error != 0) { #ifdef CHANGER_DEBUG printf("%s: could not sense element address page\n", sc->sc_dev.dv_xname); #endif dma_free(data, sizeof(*data)); return (error); } sc->sc_firsts[CHET_MT] = _2btol(ea->mtea); sc->sc_counts[CHET_MT] = _2btol(ea->nmte); sc->sc_firsts[CHET_ST] = _2btol(ea->fsea); sc->sc_counts[CHET_ST] = _2btol(ea->nse); sc->sc_firsts[CHET_IE] = _2btol(ea->fieea); sc->sc_counts[CHET_IE] = _2btol(ea->niee); sc->sc_firsts[CHET_DT] = _2btol(ea->fdtea); sc->sc_counts[CHET_DT] = _2btol(ea->ndte); /* XXX Ask for transport geometry page. */ /* * Grab info from the capabilities page (0x1f). */ error = scsi_do_mode_sense(sc->sc_link, 0x1f, data, (void **)&cap, NULL, NULL, NULL, sizeof(*cap), flags, NULL); if (cap == NULL) error = EIO; if (error != 0) { #ifdef CHANGER_DEBUG printf("%s: could not sense capabilities page\n", sc->sc_dev.dv_xname); #endif dma_free(data, sizeof(*data)); return (error); } bzero(sc->sc_movemask, sizeof(sc->sc_movemask)); bzero(sc->sc_exchangemask, sizeof(sc->sc_exchangemask)); moves = &cap->move_from_mt; exchanges = &cap->exchange_with_mt; for (from = CHET_MT; from <= CHET_DT; ++from) { sc->sc_movemask[from] = moves[from]; sc->sc_exchangemask[from] = exchanges[from]; } sc->sc_link->flags |= SDEV_MEDIA_LOADED; dma_free(data, sizeof(*data)); return (0); }
/* * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure * cannot be completed. */ int sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags) { union scsi_mode_sense_buf *buf = NULL; struct page_rigid_geometry *rigid = NULL; struct page_flex_geometry *flex = NULL; struct page_reduced_geometry *reduced = NULL; u_char *page0 = NULL; u_int32_t heads = 0, sectors = 0, cyls = 0, secsize = 0; int err = 0, big; if (sd_size(sc, flags) != 0) return (SDGP_RESULT_OFFLINE); if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) { /* we dont know the unmap limits, so we cant use thin shizz */ CLR(sc->flags, SDF_THIN); } buf = dma_alloc(sizeof(*buf), PR_NOWAIT); if (buf == NULL) goto validate; /* * Ask for page 0 (vendor specific) mode sense data to find * READONLY info. The only thing USB devices will ask for. */ err = scsi_do_mode_sense(sc->sc_link, 0, buf, (void **)&page0, NULL, NULL, NULL, 1, flags | SCSI_SILENT, &big); if (err == 0) { if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT) SET(sc->sc_link->flags, SDEV_READONLY); else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT) SET(sc->sc_link->flags, SDEV_READONLY); else CLR(sc->sc_link->flags, SDEV_READONLY); } /* * Many UMASS devices choke when asked about their geometry. Most * don't have a meaningful geometry anyway, so just fake it if * scsi_size() worked. */ if ((sc->sc_link->flags & SDEV_UMASS) && (dp->disksize > 0)) goto validate; switch (sc->sc_link->inqdata.device & SID_TYPE) { case T_OPTICAL: /* No more information needed or available. */ break; case T_RDIRECT: /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */ err = scsi_do_mode_sense(sc->sc_link, PAGE_REDUCED_GEOMETRY, buf, (void **)&reduced, NULL, NULL, &secsize, sizeof(*reduced), flags | SCSI_SILENT, NULL); if (!err && reduced && DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) { if (dp->disksize == 0) dp->disksize = _5btol(reduced->sectors); if (secsize == 0) secsize = _2btol(reduced->bytes_s); } break; default: /* * NOTE: Some devices leave off the last four bytes of * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages. * The only information in those four bytes is RPM information * so accept the page. The extra bytes will be zero and RPM will * end up with the default value of 3600. */ if (((sc->sc_link->flags & SDEV_ATAPI) == 0) || ((sc->sc_link->flags & SDEV_REMOVABLE) == 0)) err = scsi_do_mode_sense(sc->sc_link, PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL, NULL, &secsize, sizeof(*rigid) - 4, flags | SCSI_SILENT, NULL); if (!err && rigid && DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) { heads = rigid->nheads; cyls = _3btol(rigid->ncyl); if (heads * cyls > 0) sectors = dp->disksize / (heads * cyls); } else { err = scsi_do_mode_sense(sc->sc_link, PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL, &secsize, sizeof(*flex) - 4, flags | SCSI_SILENT, NULL); if (!err && flex && DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) { sectors = flex->ph_sec_tr; heads = flex->nheads; cyls = _2btol(flex->ncyl); if (secsize == 0) secsize = _2btol(flex->bytes_s); if (dp->disksize == 0) dp->disksize = heads * cyls * sectors; } } break; } validate: if (buf) dma_free(buf, sizeof(*buf)); if (dp->disksize == 0) return (SDGP_RESULT_OFFLINE); if (dp->secsize == 0) dp->secsize = (secsize == 0) ? 512 : secsize; /* * Restrict secsize values to powers of two between 512 and 64k. */ switch (dp->secsize) { case 0x200: /* == 512, == DEV_BSIZE on all architectures. */ case 0x400: case 0x800: case 0x1000: case 0x2000: case 0x4000: case 0x8000: case 0x10000: break; default: SC_DEBUG(sc->sc_link, SDEV_DB1, ("sd_get_parms: bad secsize: %#x\n", dp->secsize)); return (SDGP_RESULT_OFFLINE); } /* * XXX THINK ABOUT THIS!! Using values such that sectors * heads * * cyls is <= disk_size can lead to wasted space. We need a more * careful calculation/validation to make everything work out * optimally. */ if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) { dp->heads = 511; dp->sectors = 255; cyls = 0; } else { /* * Use standard geometry values for anything we still don't * know. */ dp->heads = (heads == 0) ? 255 : heads; dp->sectors = (sectors == 0) ? 63 : sectors; } dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) : cyls; if (dp->cyls == 0) { dp->heads = dp->cyls = 1; dp->sectors = dp->disksize; } return (SDGP_RESULT_OK); }
void safte_read_encstat(void *arg) { struct safte_readbuf_cmd *cmd; struct safte_sensor *s; struct safte_softc *sc = (struct safte_softc *)arg; struct scsi_xfer *xs; int error, i, flags = 0; u_int16_t oot; rw_enter_write(&sc->sc_lock); if (cold) flags |= SCSI_AUTOCONF; xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT); if (xs == NULL) { rw_exit_write(&sc->sc_lock); return; } xs->cmdlen = sizeof(*cmd); xs->data = sc->sc_encbuf; xs->datalen = sc->sc_encbuflen; xs->retries = 2; xs->timeout = 30000; cmd = (struct safte_readbuf_cmd *)xs->cmd; cmd->opcode = READ_BUFFER; cmd->flags |= SAFTE_RD_MODE; cmd->bufferid = SAFTE_RD_ENCSTAT; cmd->length = htobe16(sc->sc_encbuflen); error = scsi_xs_sync(xs); scsi_xs_put(xs); if (error != 0) { rw_exit_write(&sc->sc_lock); return; } for (i = 0; i < sc->sc_nsensors; i++) { s = &sc->sc_sensors[i]; s->se_sensor.flags &= ~SENSOR_FUNKNOWN; DPRINTF(("%s: %d type: %d field: 0x%02x\n", DEVNAME(sc), i, s->se_type, *s->se_field)); switch (s->se_type) { case SAFTE_T_FAN: switch (*s->se_field) { case SAFTE_FAN_OP: s->se_sensor.value = 1; s->se_sensor.status = SENSOR_S_OK; break; case SAFTE_FAN_MF: s->se_sensor.value = 0; s->se_sensor.status = SENSOR_S_CRIT; break; case SAFTE_FAN_NOTINST: case SAFTE_FAN_UNKNOWN: default: s->se_sensor.value = 0; s->se_sensor.status = SENSOR_S_UNKNOWN; s->se_sensor.flags |= SENSOR_FUNKNOWN; break; } break; case SAFTE_T_PWRSUP: switch (*s->se_field) { case SAFTE_PWR_OP_ON: s->se_sensor.value = 1; s->se_sensor.status = SENSOR_S_OK; break; case SAFTE_PWR_OP_OFF: s->se_sensor.value = 0; s->se_sensor.status = SENSOR_S_OK; break; case SAFTE_PWR_MF_ON: s->se_sensor.value = 1; s->se_sensor.status = SENSOR_S_CRIT; break; case SAFTE_PWR_MF_OFF: s->se_sensor.value = 0; s->se_sensor.status = SENSOR_S_CRIT; break; case SAFTE_PWR_NOTINST: case SAFTE_PWR_PRESENT: case SAFTE_PWR_UNKNOWN: s->se_sensor.value = 0; s->se_sensor.status = SENSOR_S_UNKNOWN; s->se_sensor.flags |= SENSOR_FUNKNOWN; break; } break; case SAFTE_T_DOORLOCK: switch (*s->se_field) { case SAFTE_DOOR_LOCKED: s->se_sensor.value = 1; s->se_sensor.status = SENSOR_S_OK; break; case SAFTE_DOOR_UNLOCKED: s->se_sensor.value = 0; s->se_sensor.status = SENSOR_S_CRIT; break; case SAFTE_DOOR_UNKNOWN: s->se_sensor.value = 0; s->se_sensor.status = SENSOR_S_CRIT; s->se_sensor.flags |= SENSOR_FUNKNOWN; break; } break; case SAFTE_T_ALARM: switch (*s->se_field) { case SAFTE_SPKR_OFF: s->se_sensor.value = 0; s->se_sensor.status = SENSOR_S_OK; break; case SAFTE_SPKR_ON: s->se_sensor.value = 1; s->se_sensor.status = SENSOR_S_CRIT; break; } break; case SAFTE_T_TEMP: s->se_sensor.value = safte_temp2uK(*s->se_field, sc->sc_celsius); break; } } oot = _2btol(sc->sc_temperrs); for (i = 0; i < sc->sc_ntemps; i++) sc->sc_temps[i].se_sensor.status = (oot & (1 << i)) ? SENSOR_S_CRIT : SENSOR_S_OK; rw_exit_write(&sc->sc_lock); }
/* * Perform a READ ELEMENT STATUS on behalf of the user, and return to * the user only the data the user is interested in. This returns the * old data format. */ static int ch_ousergetelemstatus(struct ch_softc *sc, int chet, u_int8_t *uptr) { struct read_element_status_header *st_hdrp, st_hdr; struct read_element_status_page_header *pg_hdrp; struct read_element_status_descriptor *desc; size_t size, desclen; void *data; int avail, i, error = 0; u_int8_t user_data; /* * If there are no elements of the requested type in the changer, * the request is invalid. */ if (sc->sc_counts[chet] == 0) return (EINVAL); /* * Do the request the user wants, but only read the status header. * This will tell us the amount of storage we must allocate in * order to read all data. */ error = ch_getelemstatus(sc, sc->sc_firsts[chet], sc->sc_counts[chet], &st_hdr, sizeof(st_hdr), 0, 0); if (error) return (error); size = sizeof(struct read_element_status_header) + _3btol(st_hdr.nbytes); /* * We must have at least room for the status header and * one page header (since we only ask for one element type * at a time). */ if (size < (sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header))) return (EIO); /* * Allocate the storage and do the request again. */ data = malloc(size, M_DEVBUF, M_WAITOK); error = ch_getelemstatus(sc, sc->sc_firsts[chet], sc->sc_counts[chet], data, size, 0, 0); if (error) goto done; st_hdrp = (struct read_element_status_header *)data; pg_hdrp = (struct read_element_status_page_header *)((u_long)st_hdrp + sizeof(struct read_element_status_header)); desclen = _2btol(pg_hdrp->edl); /* * Fill in the user status array. */ avail = _2btol(st_hdrp->count); if (avail != sc->sc_counts[chet]) printf("%s: warning, READ ELEMENT STATUS avail != count\n", device_xname(sc->sc_dev)); desc = (struct read_element_status_descriptor *)((u_long)data + sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header)); for (i = 0; i < avail; ++i) { user_data = desc->flags1; error = copyout(&user_data, &uptr[i], avail); if (error) break; desc = (struct read_element_status_descriptor *)((u_long)desc + desclen); } done: if (data != NULL) free(data, M_DEVBUF); return (error); }
void vdsk_scsi_cmd(struct scsi_xfer *xs) { struct scsi_rw *rw; struct scsi_rw_big *rwb; struct scsi_rw_12 *rw12; struct scsi_rw_16 *rw16; u_int64_t lba; u_int32_t sector_count; uint8_t operation; switch (xs->cmd->opcode) { case READ_BIG: case READ_COMMAND: case READ_12: case READ_16: operation = VD_OP_BREAD; break; case WRITE_BIG: case WRITE_COMMAND: case WRITE_12: case WRITE_16: operation = VD_OP_BWRITE; break; case SYNCHRONIZE_CACHE: operation = VD_OP_FLUSH; break; case INQUIRY: vdsk_scsi_inq(xs); return; case READ_CAPACITY: vdsk_scsi_capacity(xs); return; case READ_CAPACITY_16: vdsk_scsi_capacity16(xs); return; case TEST_UNIT_READY: case START_STOP: case PREVENT_ALLOW: vdsk_scsi_done(xs, XS_NOERROR); return; default: printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode); case MODE_SENSE: case MODE_SENSE_BIG: case REPORT_LUNS: case READ_TOC: vdsk_scsi_done(xs, XS_DRIVER_STUFFUP); return; } /* * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same * layout as 10-byte READ/WRITE commands. */ if (xs->cmdlen == 6) { rw = (struct scsi_rw *)xs->cmd; lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); sector_count = rw->length ? rw->length : 0x100; } else if (xs->cmdlen == 10) { rwb = (struct scsi_rw_big *)xs->cmd; lba = _4btol(rwb->addr); sector_count = _2btol(rwb->length); } else if (xs->cmdlen == 12) { rw12 = (struct scsi_rw_12 *)xs->cmd; lba = _4btol(rw12->addr); sector_count = _4btol(rw12->length); } else if (xs->cmdlen == 16) { rw16 = (struct scsi_rw_16 *)xs->cmd; lba = _8btol(rw16->addr); sector_count = _4btol(rw16->length); } { struct vdsk_softc *sc = xs->sc_link->adapter_softc; struct ldc_map *map = sc->sc_lm; struct vio_dring_msg dm; vaddr_t va; paddr_t pa; psize_t nbytes; int len, ncookies; int desc, s; int timeout; s = splbio(); desc = sc->sc_tx_prod; ncookies = 0; len = xs->datalen; va = (vaddr_t)xs->data; while (len > 0) { KASSERT(ncookies < MAXPHYS / PAGE_SIZE); pmap_extract(pmap_kernel(), va, &pa); while (map->lm_slot[map->lm_next].entry != 0) { map->lm_next++; map->lm_next &= (map->lm_nentries - 1); } map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW; map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW; map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W; map->lm_count++; nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK)); sc->sc_vd->vd_desc[desc].cookie[ncookies].addr = map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes; sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next; va += nbytes; len -= nbytes; ncookies++; } sc->sc_vd->vd_desc[desc].hdr.ack = 1; sc->sc_vd->vd_desc[desc].operation = operation; sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE; sc->sc_vd->vd_desc[desc].status = 0xffffffff; sc->sc_vd->vd_desc[desc].offset = lba; sc->sc_vd->vd_desc[desc].size = xs->datalen; sc->sc_vd->vd_desc[desc].ncookies = ncookies; membar(Sync); sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY; sc->sc_vsd[desc].vsd_xs = xs; sc->sc_vsd[desc].vsd_ncookies = ncookies; sc->sc_tx_prod++; sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); bzero(&dm, sizeof(dm)); dm.tag.type = VIO_TYPE_DATA; dm.tag.stype = VIO_SUBTYPE_INFO; dm.tag.stype_env = VIO_DRING_DATA; dm.tag.sid = sc->sc_local_sid; dm.seq_no = sc->sc_seq_no++; dm.dring_ident = sc->sc_dring_ident; dm.start_idx = dm.end_idx = desc; vdsk_sendmsg(sc, &dm, sizeof(dm)); if (!ISSET(xs->flags, SCSI_POLL)) { splx(s); return; } timeout = 1000; do { if (vdsk_rx_intr(sc) && sc->sc_vd->vd_desc[desc].status == VIO_DESC_FREE) break; delay(1000); } while(--timeout > 0); splx(s); } }
/* * Perform a READ ELEMENT STATUS on behalf of the user. This returns * the new (more complete) data format. */ static int ch_usergetelemstatus(struct ch_softc *sc, struct changer_element_status_request *cesr) { struct scsipi_channel *chan = sc->sc_periph->periph_channel; struct scsipi_periph *dtperiph; struct read_element_status_header *st_hdrp, st_hdr; struct read_element_status_page_header *pg_hdrp; struct read_element_status_descriptor *desc; struct changer_volume_tag *avol, *pvol; size_t size, desclen, stddesclen, offset; int first, avail, i, error = 0; void *data; void *uvendptr; struct changer_element_status ces; /* * Check arguments. */ if (cesr->cesr_type > CHET_DT) return (EINVAL); if (sc->sc_counts[cesr->cesr_type] == 0) return (ENODEV); if (cesr->cesr_unit > (sc->sc_counts[cesr->cesr_type] - 1)) return (ENODEV); if (cesr->cesr_count > (sc->sc_counts[cesr->cesr_type] + cesr->cesr_unit)) return (EINVAL); /* * Do the request the user wants, but only read the status header. * This will tell us the amount of storage we must allocate * in order to read all the data. */ error = ch_getelemstatus(sc, sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit, cesr->cesr_count, &st_hdr, sizeof(st_hdr), 0, cesr->cesr_flags); if (error) return (error); size = sizeof(struct read_element_status_header) + _3btol(st_hdr.nbytes); /* * We must have at least room for the status header and * one page header (since we only ask for oen element type * at a time). */ if (size < (sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header))) return (EIO); /* * Allocate the storage and do the request again. */ data = malloc(size, M_DEVBUF, M_WAITOK); error = ch_getelemstatus(sc, sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit, cesr->cesr_count, data, size, 0, cesr->cesr_flags); if (error) goto done; st_hdrp = (struct read_element_status_header *)data; pg_hdrp = (struct read_element_status_page_header *)((u_long)st_hdrp + sizeof(struct read_element_status_header)); desclen = _2btol(pg_hdrp->edl); /* * Fill in the user status array. */ first = _2btol(st_hdrp->fear); if (first < (sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit) || first >= (sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit + cesr->cesr_count)) { error = EIO; goto done; } first -= sc->sc_firsts[cesr->cesr_type] + cesr->cesr_unit; avail = _2btol(st_hdrp->count); if (avail <= 0 || avail > cesr->cesr_count) { error = EIO; goto done; } offset = sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header); for (i = 0; i < cesr->cesr_count; i++) { memset(&ces, 0, sizeof(ces)); if (i < first || i >= (first + avail)) { error = copyout(&ces, &cesr->cesr_data[i], sizeof(ces)); if (error) goto done; } desc = (struct read_element_status_descriptor *) ((char *)data + offset); stddesclen = sizeof(struct read_element_status_descriptor); offset += desclen; ces.ces_flags = CESTATUS_STATUS_VALID; /* * The SCSI flags conveniently map directly to the * chio API flags. */ ces.ces_flags |= (desc->flags1 & 0x3f); ces.ces_asc = desc->sense_code; ces.ces_ascq = desc->sense_qual; /* * For Data Transport elemenets, get the SCSI ID and LUN, * and attempt to map them to a device name if they're * on the same SCSI bus. */ if (desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_IDVALID) { ces.ces_target = desc->dt_scsi_addr; ces.ces_flags |= CESTATUS_TARGET_VALID; } if (desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUVALID) { ces.ces_lun = desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUNMASK; ces.ces_flags |= CESTATUS_LUN_VALID; } if (desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_NOTBUS) ces.ces_flags |= CESTATUS_NOTBUS; else if ((ces.ces_flags & (CESTATUS_TARGET_VALID|CESTATUS_LUN_VALID)) == (CESTATUS_TARGET_VALID|CESTATUS_LUN_VALID)) { if (ces.ces_target < chan->chan_ntargets && ces.ces_lun < chan->chan_nluns && (dtperiph = scsipi_lookup_periph(chan, ces.ces_target, ces.ces_lun)) != NULL && dtperiph->periph_dev != NULL) { strlcpy(ces.ces_xname, device_xname(dtperiph->periph_dev), sizeof(ces.ces_xname)); ces.ces_flags |= CESTATUS_XNAME_VALID; } } if (desc->flags2 & READ_ELEMENT_STATUS_INVERT) ces.ces_flags |= CESTATUS_INVERTED; if (desc->flags2 & READ_ELEMENT_STATUS_SVALID) { if (ch_map_element(sc, _2btol(desc->ssea), &ces.ces_from_type, &ces.ces_from_unit)) ces.ces_flags |= CESTATUS_FROM_VALID; } /* * Extract volume tag information. */ switch (pg_hdrp->flags & (READ_ELEMENT_STATUS_PVOLTAG|READ_ELEMENT_STATUS_AVOLTAG)) { case (READ_ELEMENT_STATUS_PVOLTAG|READ_ELEMENT_STATUS_AVOLTAG): pvol = (struct changer_volume_tag *)(desc + 1); avol = pvol + 1; break; case READ_ELEMENT_STATUS_PVOLTAG: pvol = (struct changer_volume_tag *)(desc + 1); avol = NULL; break; case READ_ELEMENT_STATUS_AVOLTAG: pvol = NULL; avol = (struct changer_volume_tag *)(desc + 1); break; default: avol = pvol = NULL; break; } if (pvol != NULL) { ch_voltag_convert_in(pvol, &ces.ces_pvoltag); ces.ces_flags |= CESTATUS_PVOL_VALID; stddesclen += sizeof(struct changer_volume_tag); } if (avol != NULL) { ch_voltag_convert_in(avol, &ces.ces_avoltag); ces.ces_flags |= CESTATUS_AVOL_VALID; stddesclen += sizeof(struct changer_volume_tag); } /* * Compute vendor-specific length. Note the 4 reserved * bytes between the volume tags and the vendor-specific * data. Copy it out of the user wants it. */ stddesclen += 4; if (desclen > stddesclen) ces.ces_vendor_len = desclen - stddesclen; if (ces.ces_vendor_len != 0 && cesr->cesr_vendor_data != NULL) { error = copyin(&cesr->cesr_vendor_data[i], &uvendptr, sizeof(uvendptr)); if (error) goto done; error = copyout((void *)((u_long)desc + stddesclen), uvendptr, ces.ces_vendor_len); if (error) goto done; } /* * Now copy out the status descriptor we've constructed. */ error = copyout(&ces, &cesr->cesr_data[i], sizeof(ces)); if (error) goto done; } done: if (data != NULL) free(data, M_DEVBUF); return (error); }
SANE_Status sane_start (SANE_Handle handle) { char *mode_str; Ibm_Scanner *s = handle; SANE_Status status; struct ibm_window_data wbuf; struct measurements_units_page mup; DBG (11, ">> sane_start\n"); /* First make sure we have a current parameter set. Some of the parameters will be overwritten below, but that's OK. */ status = sane_get_parameters (s, 0); if (status != SANE_STATUS_GOOD) return status; status = sanei_scsi_open (s->hw->sane.name, &s->fd, 0, 0); if (status != SANE_STATUS_GOOD) { DBG (1, "open of %s failed: %s\n", s->hw->sane.name, sane_strstatus (status)); return (status); } mode_str = s->val[OPT_MODE].s; s->xres = s->val[OPT_X_RESOLUTION].w; s->yres = s->val[OPT_Y_RESOLUTION].w; s->ulx = s->val[OPT_TL_X].w; s->uly = s->val[OPT_TL_Y].w; s->width = s->val[OPT_BR_X].w - s->val[OPT_TL_X].w; s->length = s->val[OPT_BR_Y].w - s->val[OPT_TL_Y].w; s->brightness = s->val[OPT_BRIGHTNESS].w; s->contrast = s->val[OPT_CONTRAST].w; s->bpp = s->params.depth; if (strcmp (mode_str, SANE_VALUE_SCAN_MODE_LINEART) == 0) { s->image_composition = IBM_BINARY_MONOCHROME; } else if (strcmp (mode_str, SANE_VALUE_SCAN_MODE_HALFTONE) == 0) { s->image_composition = IBM_DITHERED_MONOCHROME; } else if (strcmp (mode_str, SANE_VALUE_SCAN_MODE_GRAY) == 0) { s->image_composition = IBM_GRAYSCALE; } memset (&wbuf, 0, sizeof (wbuf)); /* next line commented out by mf */ /* _lto2b(sizeof(wbuf) - 8, wbuf.len); */ /* next line by mf */ _lto2b(IBM_WINDOW_DATA_SIZE, wbuf.len); /* size=320 */ _lto2b(s->xres, wbuf.x_res); _lto2b(s->yres, wbuf.y_res); _lto4b(s->ulx, wbuf.x_org); _lto4b(s->uly, wbuf.y_org); _lto4b(s->width, wbuf.width); _lto4b(s->length, wbuf.length); wbuf.image_comp = s->image_composition; /* if you throw the MRIF bit the brighness control reverses too */ /* so I reverse the reversal in software for symmetry's sake */ if (wbuf.image_comp == IBM_GRAYSCALE || wbuf.image_comp == IBM_DITHERED_MONOCHROME) { if (wbuf.image_comp == IBM_GRAYSCALE) wbuf.mrif_filtering_gamma_id = (SANE_Byte) 0x80; /* it was 0x90 */ if (wbuf.image_comp == IBM_DITHERED_MONOCHROME) wbuf.mrif_filtering_gamma_id = (SANE_Byte) 0x10; wbuf.brightness = 256 - (SANE_Byte) s->brightness; /* if (is50) wbuf.contrast = (SANE_Byte) s->contrast; else */ wbuf.contrast = 256 - (SANE_Byte) s->contrast; } else /* wbuf.image_comp == IBM_BINARY_MONOCHROME */ { wbuf.mrif_filtering_gamma_id = (SANE_Byte) 0x00; wbuf.brightness = (SANE_Byte) s->brightness; wbuf.contrast = (SANE_Byte) s->contrast; } wbuf.threshold = 0; wbuf.bits_per_pixel = s->bpp; wbuf.halftone_code = 2; /* diithering */ wbuf.halftone_id = 0x0A; /* 8x8 Bayer pattenr */ wbuf.pad_type = 3; wbuf.bit_ordering[0] = 0; wbuf.bit_ordering[1] = 7; /* modified by mf (it was 3) */ DBG (5, "xres=%d\n", _2btol(wbuf.x_res)); DBG (5, "yres=%d\n", _2btol(wbuf.y_res)); DBG (5, "ulx=%d\n", _4btol(wbuf.x_org)); DBG (5, "uly=%d\n", _4btol(wbuf.y_org)); DBG (5, "width=%d\n", _4btol(wbuf.width)); DBG (5, "length=%d\n", _4btol(wbuf.length)); DBG (5, "image_comp=%d\n", wbuf.image_comp); DBG (11, "sane_start: sending SET WINDOW\n"); status = set_window (s->fd, &wbuf); if (status != SANE_STATUS_GOOD) { DBG (1, "SET WINDOW failed: %s\n", sane_strstatus (status)); return (status); } DBG (11, "sane_start: sending GET WINDOW\n"); memset (&wbuf, 0, sizeof (wbuf)); status = get_window (s->fd, &wbuf); if (status != SANE_STATUS_GOOD) { DBG (1, "GET WINDOW failed: %s\n", sane_strstatus (status)); return (status); } DBG (5, "xres=%d\n", _2btol(wbuf.x_res)); DBG (5, "yres=%d\n", _2btol(wbuf.y_res)); DBG (5, "ulx=%d\n", _4btol(wbuf.x_org)); DBG (5, "uly=%d\n", _4btol(wbuf.y_org)); DBG (5, "width=%d\n", _4btol(wbuf.width)); DBG (5, "length=%d\n", _4btol(wbuf.length)); DBG (5, "image_comp=%d\n", wbuf.image_comp); DBG (11, "sane_start: sending MODE SELECT\n"); memset (&mup, 0, sizeof (mup)); mup.page_code = MEASUREMENTS_PAGE; mup.parameter_length = 0x06; mup.bmu = INCHES; mup.mud[0] = (DEFAULT_MUD >> 8) & 0xff; mup.mud[1] = (DEFAULT_MUD & 0xff); /* next lines by mf */ mup.adf_page_code = 0x26; mup.adf_parameter_length = 6; if (s->adf_state == ADF_ARMED) mup.adf_control = 1; else mup.adf_control = 0; /* end lines by mf */ status = mode_select (s->fd, (struct mode_pages *) &mup); if (status != SANE_STATUS_GOOD) { DBG (1, "attach: MODE_SELECT failed\n"); return (SANE_STATUS_INVAL); } status = trigger_scan (s->fd); if (status != SANE_STATUS_GOOD) { DBG (1, "start of scan failed: %s\n", sane_strstatus (status)); /* next line introduced not to freeze xscanimage */ do_cancel(s); return status; } /* Wait for scanner to become ready to transmit data */ status = ibm_wait_ready (s); if (status != SANE_STATUS_GOOD) { DBG (1, "GET DATA STATUS failed: %s\n", sane_strstatus (status)); return (status); } s->bytes_to_read = s->params.bytes_per_line * s->params.lines; DBG (1, "%d pixels per line, %d bytes, %d lines high, total %lu bytes, " "dpi=%d\n", s->params.pixels_per_line, s->params.bytes_per_line, s->params.lines, (u_long) s->bytes_to_read, s->val[OPT_Y_RESOLUTION].w); s->scanning = SANE_TRUE; DBG (11, "<< sane_start\n"); return (SANE_STATUS_GOOD); }
/* * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure * cannot be completed. */ int sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags) { union scsi_mode_sense_buf *buf = NULL; struct page_rigid_geometry *rigid; struct page_flex_geometry *flex; struct page_reduced_geometry *reduced; u_int32_t heads = 0, sectors = 0, cyls = 0, blksize = 0, ssblksize; u_int16_t rpm = 0; dp->disksize = scsi_size(sc->sc_link, flags, &ssblksize); /* * Many UMASS devices choke when asked about their geometry. Most * don't have a meaningful geometry anyway, so just fake it if * scsi_size() worked. */ if ((sc->sc_link->flags & SDEV_UMASS) && (dp->disksize > 0)) goto validate; /* N.B. buf will be NULL at validate. */ buf = malloc(sizeof(*buf), M_TEMP, M_NOWAIT); if (buf == NULL) goto validate; switch (sc->sc_link->inqdata.device & SID_TYPE) { case T_OPTICAL: /* No more information needed or available. */ break; case T_RDIRECT: /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */ scsi_do_mode_sense(sc->sc_link, PAGE_REDUCED_GEOMETRY, buf, (void **)&reduced, NULL, NULL, &blksize, sizeof(*reduced), flags | SCSI_SILENT, NULL); if (DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) { if (dp->disksize == 0) dp->disksize = _5btol(reduced->sectors); if (blksize == 0) blksize = _2btol(reduced->bytes_s); } break; default: /* * NOTE: Some devices leave off the last four bytes of * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages. * The only information in those four bytes is RPM information * so accept the page. The extra bytes will be zero and RPM will * end up with the default value of 3600. */ rigid = NULL; if (((sc->sc_link->flags & SDEV_ATAPI) == 0) || ((sc->sc_link->flags & SDEV_REMOVABLE) == 0)) scsi_do_mode_sense(sc->sc_link, PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL, NULL, &blksize, sizeof(*rigid) - 4, flags | SCSI_SILENT, NULL); if (DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) { heads = rigid->nheads; cyls = _3btol(rigid->ncyl); rpm = _2btol(rigid->rpm); if (heads * cyls > 0) sectors = dp->disksize / (heads * cyls); } else { scsi_do_mode_sense(sc->sc_link, PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL, &blksize, sizeof(*flex) - 4, flags | SCSI_SILENT, NULL); if (DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) { sectors = flex->ph_sec_tr; heads = flex->nheads; cyls = _2btol(flex->ncyl); rpm = _2btol(flex->rpm); if (blksize == 0) blksize = _2btol(flex->bytes_s); if (dp->disksize == 0) dp->disksize = heads * cyls * sectors; } } break; } validate: if (buf) free(buf, M_TEMP); if (dp->disksize == 0) return (SDGP_RESULT_OFFLINE); if (ssblksize > 0) dp->blksize = ssblksize; else dp->blksize = (blksize == 0) ? 512 : blksize; /* * Restrict blksize values to powers of two between 512 and 64k. */ switch (dp->blksize) { case 0x200: /* == 512, == DEV_BSIZE on all architectures. */ case 0x400: case 0x800: case 0x1000: case 0x2000: case 0x4000: case 0x8000: case 0x10000: break; default: SC_DEBUG(sc->sc_link, SDEV_DB1, ("sd_get_parms: bad blksize: %#x\n", dp->blksize)); return (SDGP_RESULT_OFFLINE); } /* * XXX THINK ABOUT THIS!! Using values such that sectors * heads * * cyls is <= disk_size can lead to wasted space. We need a more * careful calculation/validation to make everything work out * optimally. */ if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) { dp->heads = 511; dp->sectors = 255; cyls = 0; } else { /* * Use standard geometry values for anything we still don't * know. */ dp->heads = (heads == 0) ? 255 : heads; dp->sectors = (sectors == 0) ? 63 : sectors; dp->rot_rate = (rpm == 0) ? 3600 : rpm; } dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) : cyls; if (dp->cyls == 0) { dp->heads = dp->cyls = 1; dp->sectors = dp->disksize; } return (SDGP_RESULT_OK); }