void ctl_scsi_sync_cache(union ctl_io *io, int immed, int reladr, int minimum_cdb_size, uint64_t starting_lba, uint32_t block_count, ctl_tag_type tag_type, uint8_t control) { ctl_scsi_zero_io(io); if ((minimum_cdb_size < 16) && ((block_count & 0xffff) == block_count) && ((starting_lba & 0xffffffff) == starting_lba)) { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)io->scsiio.cdb; cdb->opcode = SYNCHRONIZE_CACHE; if (reladr) cdb->byte2 |= SSC_RELADR; if (immed) cdb->byte2 |= SSC_IMMED; scsi_ulto4b(starting_lba, cdb->begin_lba); scsi_ulto2b(block_count, cdb->lb_count); cdb->control = control; } else { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)io->scsiio.cdb; cdb->opcode = SYNCHRONIZE_CACHE_16; if (reladr) cdb->byte2 |= SSC_RELADR; if (immed) cdb->byte2 |= SSC_IMMED; scsi_u64to8b(starting_lba, cdb->begin_lba); scsi_ulto4b(block_count, cdb->lb_count); cdb->control = control; } io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_NONE; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = NULL; io->scsiio.ext_data_len = 0; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; }
void ctl_scsi_read_capacity_16(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint64_t addr, int reladr, int pmi, ctl_tag_type tag_type, uint8_t control) { struct scsi_read_capacity_16 *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; cdb = (struct scsi_read_capacity_16 *)io->scsiio.cdb; cdb->opcode = SERVICE_ACTION_IN; cdb->service_action = SRC16_SERVICE_ACTION; if (reladr) cdb->reladr |= SRC16_RELADR; if (pmi) cdb->reladr |= SRC16_PMI; scsi_u64to8b(addr, cdb->addr); scsi_ulto4b(data_len, cdb->alloc_len); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; }
void ctl_scsi_read_capacity(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint32_t addr, int reladr, int pmi, ctl_tag_type tag_type, uint8_t control) { struct scsi_read_capacity *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; cdb = (struct scsi_read_capacity *)io->scsiio.cdb; cdb->opcode = READ_CAPACITY; if (reladr) cdb->byte2 = SRC_RELADR; if (pmi) cdb->pmi = SRC_PMI; scsi_ulto4b(addr, cdb->addr); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; }
void ctl_scsi_write_same(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t byte2, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_write_same_16 *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; ctsio->cdb_len = sizeof(*cdb); cdb = (struct scsi_write_same_16 *)ctsio->cdb; cdb->opcode = WRITE_SAME_16; cdb->byte2 = byte2; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->group = 0; cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; }
void ctl_scsi_report_luns(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t select_report, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_report_luns *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_report_luns *)ctsio->cdb; cdb->opcode = REPORT_LUNS; cdb->select_report = select_report; scsi_ulto4b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; }
void ctl_scsi_persistent_res_out(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int action, int type, uint64_t key, uint64_t sa_key, ctl_tag_type tag_type, uint8_t control) { struct scsi_per_res_out *cdb; struct scsi_per_res_out_parms *params; ctl_scsi_zero_io(io); cdb = (struct scsi_per_res_out *)io->scsiio.cdb; params = (struct scsi_per_res_out_parms *)data_ptr; cdb->opcode = PERSISTENT_RES_OUT; if (action == 5) cdb->action = 6; else cdb->action = action; switch(type) { case 0: cdb->scope_type = 1; break; case 1: cdb->scope_type = 3; break; case 2: cdb->scope_type = 5; break; case 3: cdb->scope_type = 6; break; case 4: cdb->scope_type = 7; break; case 5: cdb->scope_type = 8; break; } scsi_ulto4b(data_len, cdb->length); cdb->control = control; scsi_u64to8b(key, params->res_key.key); scsi_u64to8b(sa_key, params->serv_act_res_key); io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_OUT; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; }
void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t action, ctl_tag_type tag_type, uint8_t control) { struct scsi_maintenance_in *cdb; ctl_scsi_zero_io(io); cdb = (struct scsi_maintenance_in *)io->scsiio.cdb; cdb->opcode = MAINTENANCE_IN; cdb->byte2 = action; scsi_ulto4b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; }
void ctl_scsi_read_write(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int read_op, uint8_t byte2, int minimum_cdb_size, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; /* * Pick out the smallest CDB that will hold the user's request. * minimum_cdb_size allows cranking the CDB size up, even for * requests that would not normally need a large CDB. This can be * useful for testing (e.g. to make sure READ_16 support works without * having an array larger than 2TB) and for compatibility -- e.g. * if your device doesn't support READ_6. (ATAPI drives don't.) */ if ((minimum_cdb_size < 10) && ((lba & 0x1fffff) == lba) && ((num_blocks & 0xff) == num_blocks) && (byte2 == 0)) { struct scsi_rw_6 *cdb; /* * Note that according to SBC-2, the target should return 256 * blocks if the transfer length in a READ(6) or WRITE(6) CDB * is set to 0. Since it's possible that some targets * won't do the right thing, we only send a READ(6) or * WRITE(6) for transfer sizes up to and including 255 blocks. */ cdb = (struct scsi_rw_6 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_6 : WRITE_6; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks & 0xff; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else if ((minimum_cdb_size < 12) && ((num_blocks & 0xffff) == num_blocks) && ((lba & 0xffffffff) == lba)) { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_10 : WRITE_10; cdb->byte2 = byte2; scsi_ulto4b(lba, cdb->addr); cdb->reserved = 0; scsi_ulto2b(num_blocks, cdb->length); cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else if ((minimum_cdb_size < 16) && ((num_blocks & 0xffffffff) == num_blocks) && ((lba & 0xffffffff) == lba)) { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_12 : WRITE_12; cdb->byte2 = byte2; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->reserved = 0; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_16 : WRITE_16; cdb->byte2 = byte2; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->reserved = 0; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } io->io_hdr.io_type = CTL_IO_SCSI; if (read_op != 0) io->io_hdr.flags = CTL_FLAG_DATA_IN; else io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; }
/* * Convert the SCSI command in ccb to an ata_xfer command in xa * for ATA_PORT_T_DISK operations. Set the completion function * to convert the response back, then dispatch to the OpenBSD AHCI * layer. * * AHCI DISK commands only support a limited command set, and we * fake additional commands to make it play nice with the CAM subsystem. */ static void ahci_xpt_scsi_disk_io(struct ahci_port *ap, struct ata_port *atx, union ccb *ccb) { struct ccb_hdr *ccbh; struct ccb_scsiio *csio; struct ata_xfer *xa; struct ata_port *at; struct ata_fis_h2d *fis; struct ata_pass_12 *atp12; struct ata_pass_16 *atp16; scsi_cdb_t cdb; union scsi_data *rdata; int rdata_len; u_int64_t capacity; u_int64_t lba; u_int32_t count; ccbh = &ccb->csio.ccb_h; csio = &ccb->csio; at = atx ? atx : ap->ap_ata[0]; /* * XXX not passing NULL at for direct attach! */ xa = ahci_ata_get_xfer(ap, atx); rdata = (void *)csio->data_ptr; rdata_len = csio->dxfer_len; /* * Build the FIS or process the csio to completion. */ cdb = (void *)((ccbh->flags & CAM_CDB_POINTER) ? csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes); switch(cdb->generic.opcode) { case REQUEST_SENSE: /* * Auto-sense everything, so explicit sense requests * return no-sense. */ ccbh->status = CAM_SCSI_STATUS_ERROR; break; case INQUIRY: /* * Inquiry supported features * * [opcode, byte2, page_code, length, control] */ if (cdb->inquiry.byte2 & SI_EVPD) { ahci_xpt_page_inquiry(ap, at, ccb); } else { bzero(rdata, rdata_len); if (rdata_len < SHORT_INQUIRY_LENGTH) { ccbh->status = CAM_CCB_LEN_ERR; break; } if (rdata_len > sizeof(rdata->inquiry_data)) rdata_len = sizeof(rdata->inquiry_data); rdata->inquiry_data.device = T_DIRECT; rdata->inquiry_data.version = SCSI_REV_SPC2; rdata->inquiry_data.response_format = 2; rdata->inquiry_data.additional_length = 32; bcopy("SATA ", rdata->inquiry_data.vendor, 8); bcopy(at->at_identify.model, rdata->inquiry_data.product, sizeof(rdata->inquiry_data.product)); bcopy(at->at_identify.firmware, rdata->inquiry_data.revision, sizeof(rdata->inquiry_data.revision)); ccbh->status = CAM_REQ_CMP; } /* * Use the vendor specific area to set the TRIM status * for scsi_da */ if (at->at_identify.support_dsm) { rdata->inquiry_data.vendor_specific1[0] = at->at_identify.support_dsm &ATA_SUPPORT_DSM_TRIM; rdata->inquiry_data.vendor_specific1[1] = at->at_identify.max_dsm_blocks; } break; case READ_CAPACITY_16: if (cdb->read_capacity_16.service_action != SRC16_SERVICE_ACTION) { ccbh->status = CAM_REQ_INVALID; break; } if (rdata_len < sizeof(rdata->read_capacity_data_16)) { ccbh->status = CAM_CCB_LEN_ERR; break; } /* fall through */ case READ_CAPACITY: if (rdata_len < sizeof(rdata->read_capacity_data)) { ccbh->status = CAM_CCB_LEN_ERR; break; } capacity = at->at_capacity; bzero(rdata, rdata_len); if (cdb->generic.opcode == READ_CAPACITY) { rdata_len = sizeof(rdata->read_capacity_data); if (capacity > 0xFFFFFFFFU) capacity = 0xFFFFFFFFU; bzero(&rdata->read_capacity_data, rdata_len); scsi_ulto4b((u_int32_t)capacity - 1, rdata->read_capacity_data.addr); scsi_ulto4b(512, rdata->read_capacity_data.length); } else { rdata_len = sizeof(rdata->read_capacity_data_16); bzero(&rdata->read_capacity_data_16, rdata_len); scsi_u64to8b(capacity - 1, rdata->read_capacity_data_16.addr); scsi_ulto4b(512, rdata->read_capacity_data_16.length); } ccbh->status = CAM_REQ_CMP; break; case SYNCHRONIZE_CACHE: /* * Synchronize cache. Specification says this can take * greater then 30 seconds so give it at least 45. */ fis = xa->fis; fis->flags = ATA_H2D_FLAGS_CMD; fis->command = ATA_C_FLUSH_CACHE; fis->device = 0; if (xa->timeout < 45000) xa->timeout = 45000; xa->datalen = 0; xa->flags = 0; xa->complete = ahci_ata_complete_disk_synchronize_cache; break; case TRIM: fis = xa->fis; fis->command = ATA_C_DATA_SET_MANAGEMENT; fis->features = (u_int8_t)ATA_SF_DSM_TRIM; fis->features_exp = (u_int8_t)(ATA_SF_DSM_TRIM>> 8); xa->flags = ATA_F_WRITE; fis->flags = ATA_H2D_FLAGS_CMD; xa->data = csio->data_ptr; xa->datalen = csio->dxfer_len; xa->timeout = ccbh->timeout*50; /* milliseconds */ fis->sector_count =(u_int8_t)(xa->datalen/512); fis->sector_count_exp =(u_int8_t)((xa->datalen/512)>>8); lba = 0; fis->lba_low = (u_int8_t)lba; fis->lba_mid = (u_int8_t)(lba >> 8); fis->lba_high = (u_int8_t)(lba >> 16); fis->lba_low_exp = (u_int8_t)(lba >> 24); fis->lba_mid_exp = (u_int8_t)(lba >> 32); fis->lba_high_exp = (u_int8_t)(lba >> 40); fis->device = ATA_H2D_DEVICE_LBA; xa->data = csio->data_ptr; xa->complete = ahci_ata_complete_disk_rw; ccbh->status = CAM_REQ_INPROG; break; case TEST_UNIT_READY: case START_STOP_UNIT: case PREVENT_ALLOW: /* * Just silently return success */ ccbh->status = CAM_REQ_CMP; rdata_len = 0; break; case ATA_PASS_12: atp12 = &cdb->ata_pass_12; fis = xa->fis; /* * Figure out the flags to be used, depending on the direction of the * CAM request. */ switch (ccbh->flags & CAM_DIR_MASK) { case CAM_DIR_IN: xa->flags = ATA_F_READ; break; case CAM_DIR_OUT: xa->flags = ATA_F_WRITE; break; default: xa->flags = 0; } xa->flags |= ATA_F_POLL | ATA_F_EXCLUSIVE; xa->data = csio->data_ptr; xa->datalen = csio->dxfer_len; xa->complete = ahci_ata_complete_disk_rw; xa->timeout = ccbh->timeout; /* * Populate the fis from the information we received through CAM * ATA passthrough. */ fis->flags = ATA_H2D_FLAGS_CMD; /* maybe also atp12->flags ? */ fis->features = atp12->features; fis->sector_count = atp12->sector_count; fis->lba_low = atp12->lba_low; fis->lba_mid = atp12->lba_mid; fis->lba_high = atp12->lba_high; fis->device = atp12->device; /* maybe always 0? */ fis->command = atp12->command; fis->control = atp12->control; /* * Mark as in progress so it is sent to the device. */ ccbh->status = CAM_REQ_INPROG; break; case ATA_PASS_16: atp16 = &cdb->ata_pass_16; fis = xa->fis; /* * Figure out the flags to be used, depending on the direction of the * CAM request. */ switch (ccbh->flags & CAM_DIR_MASK) { case CAM_DIR_IN: xa->flags = ATA_F_READ; break; case CAM_DIR_OUT: xa->flags = ATA_F_WRITE; break; default: xa->flags = 0; } xa->flags |= ATA_F_POLL | ATA_F_EXCLUSIVE; xa->data = csio->data_ptr; xa->datalen = csio->dxfer_len; xa->complete = ahci_ata_complete_disk_rw; xa->timeout = ccbh->timeout; /* * Populate the fis from the information we received through CAM * ATA passthrough. */ fis->flags = ATA_H2D_FLAGS_CMD; /* maybe also atp16->flags ? */ fis->features = atp16->features; fis->features_exp = atp16->features_ext; fis->sector_count = atp16->sector_count; fis->sector_count_exp = atp16->sector_count_ext; fis->lba_low = atp16->lba_low; fis->lba_low_exp = atp16->lba_low_ext; fis->lba_mid = atp16->lba_mid; fis->lba_mid_exp = atp16->lba_mid_ext; fis->lba_high = atp16->lba_high; fis->lba_mid_exp = atp16->lba_mid_ext; fis->device = atp16->device; /* maybe always 0? */ fis->command = atp16->command; /* * Mark as in progress so it is sent to the device. */ ccbh->status = CAM_REQ_INPROG; break; default: switch(cdb->generic.opcode) { case READ_6: lba = scsi_3btoul(cdb->rw_6.addr) & 0x1FFFFF; count = cdb->rw_6.length ? cdb->rw_6.length : 0x100; xa->flags = ATA_F_READ; break; case READ_10: lba = scsi_4btoul(cdb->rw_10.addr); count = scsi_2btoul(cdb->rw_10.length); xa->flags = ATA_F_READ; break; case READ_12: lba = scsi_4btoul(cdb->rw_12.addr); count = scsi_4btoul(cdb->rw_12.length); xa->flags = ATA_F_READ; break; case READ_16: lba = scsi_8btou64(cdb->rw_16.addr); count = scsi_4btoul(cdb->rw_16.length); xa->flags = ATA_F_READ; break; case WRITE_6: lba = scsi_3btoul(cdb->rw_6.addr) & 0x1FFFFF; count = cdb->rw_6.length ? cdb->rw_6.length : 0x100; xa->flags = ATA_F_WRITE; break; case WRITE_10: lba = scsi_4btoul(cdb->rw_10.addr); count = scsi_2btoul(cdb->rw_10.length); xa->flags = ATA_F_WRITE; break; case WRITE_12: lba = scsi_4btoul(cdb->rw_12.addr); count = scsi_4btoul(cdb->rw_12.length); xa->flags = ATA_F_WRITE; break; case WRITE_16: lba = scsi_8btou64(cdb->rw_16.addr); count = scsi_4btoul(cdb->rw_16.length); xa->flags = ATA_F_WRITE; break; default: ccbh->status = CAM_REQ_INVALID; break; } if (ccbh->status != CAM_REQ_INPROG) break; fis = xa->fis; fis->flags = ATA_H2D_FLAGS_CMD; fis->lba_low = (u_int8_t)lba; fis->lba_mid = (u_int8_t)(lba >> 8); fis->lba_high = (u_int8_t)(lba >> 16); fis->device = ATA_H2D_DEVICE_LBA; /* * NCQ only for direct-attached disks, do not currently * try to use NCQ with port multipliers. */ if (at->at_ncqdepth > 1 && ap->ap_type == ATA_PORT_T_DISK && (ap->ap_sc->sc_cap & AHCI_REG_CAP_SNCQ) && (ccbh->flags & CAM_POLLED) == 0) { /* * Use NCQ - always uses 48 bit addressing */ xa->flags |= ATA_F_NCQ; fis->command = (xa->flags & ATA_F_WRITE) ? ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA; fis->lba_low_exp = (u_int8_t)(lba >> 24); fis->lba_mid_exp = (u_int8_t)(lba >> 32); fis->lba_high_exp = (u_int8_t)(lba >> 40); fis->sector_count = xa->tag << 3; fis->features = (u_int8_t)count; fis->features_exp = (u_int8_t)(count >> 8); } else if (count > 0x100 || lba > 0x0FFFFFFFU) {