static void raw_inquiry(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { raw_io_t *io; uint32_t len; struct scsi_inquiry inq; raw_params_t *r; if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; len = (cdb[3] << 8) | cdb[4]; if (((io = do_datain(cmd, cdb, CDB_GROUP0, len)) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); return; } if ((cdb[1] & 1) == 0) { bcopy(io->r_data, &inq, sizeof (inq)); r->r_dtype = inq.inq_dtype; } if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0, raw_free_io, True, io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
static void raw_service_actiong4(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { raw_io_t *io; uint32_t len; struct scsi_capacity_16 cap16; raw_params_t *r; if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; len = (cdb[10] << 24) | (cdb[11] << 16) | (cdb[12] << 8) | cdb[13]; if (((io = do_datain(cmd, cdb, CDB_GROUP4, len)) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); return; } bcopy(io->r_data, &cap16, sizeof (cap16)); /* * Currently there's a bug in ZFS which doesn't report a capacity * for any of the volumes. This means that when using ZFS the * administrator must supply the device size. */ if (cap16.sc_capacity != 0) r->r_size = cap16.sc_capacity; if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0, raw_free_io, True, io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
/*ARGSUSED*/ void raw_write_data(t10_cmd_t *cmd, emul_handle_t id, size_t offset, char *data, size_t data_len) { raw_io_t *io = (raw_io_t *)id; raw_params_t *r = T10_PARAMS_AREA(cmd); if (r == NULL) return; if (r->r_dtype == DTYPE_SEQUENTIAL) { raw_write_tape_data(cmd, id, offset, data, data_len); return; } trans_aiowrite(cmd, data, data_len, (io->r_lba * 512) + (off_t)io->r_offset, &io->r_aio); }
/*ARGSUSED*/ static void sbc_reserve(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { disk_params_t *p = (disk_params_t *)T10_PARAMS_AREA(cmd); t10_lu_impl_t *lu; if (p == NULL) return; if (cdb[1] & 0xe0 || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } if ((p->d_reserve_owner != NULL) && (p->d_reserve_owner != cmd->c_lu)) { trans_send_complete(cmd, STATUS_RESERVATION_CONFLICT); return; } else if (p->d_reserve_owner == cmd->c_lu) { /* * According SPC-2 revision 20, section 7.21.2 * It shall be permissible for an initiator to * reserve a logic unit that is currently reserved * by that initiator */ trans_send_complete(cmd, STATUS_GOOD); } else { lu = avl_first(&cmd->c_lu->l_common->l_all_open); do { if (lu != cmd->c_lu) lu->l_cmd = sbc_cmd_reserved; lu = AVL_NEXT(&cmd->c_lu->l_common->l_all_open, lu); } while (lu != NULL); p->d_reserve_owner = cmd->c_lu; trans_send_complete(cmd, STATUS_GOOD); } }
/*ARGSUSED*/ static void sbc_release(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { disk_params_t *p = (disk_params_t *)T10_PARAMS_AREA(cmd); t10_lu_impl_t *lu; if (p == NULL) return; if (cdb[1] & 0xe0 || cdb[3] || cdb[4] || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } if (p->d_reserve_owner == NULL) { /* * If nobody is the owner this command is successful. */ trans_send_complete(cmd, STATUS_GOOD); return; } /* * At this point the only way to get in here is to be the owner * of the reservation. */ lu = avl_first(&cmd->c_lu->l_common->l_all_open); do { lu->l_cmd = sbc_cmd; lu = AVL_NEXT(&cmd->c_lu->l_common->l_all_open, lu); } while (lu != NULL); p->d_reserve_owner = NULL; trans_send_complete(cmd, STATUS_GOOD); }
/*ARGSUSED*/ void sbc_write_data(t10_cmd_t *cmd, emul_handle_t id, size_t offset, char *data, size_t data_len) { disk_io_t *io = (disk_io_t *)id; disk_params_t *d; if (cmd->c_lu->l_common->l_mmap == MAP_FAILED) { trans_aiowrite(cmd, data, data_len, (io->da_lba * 512) + (off_t)io->da_offset, (aio_result_t *)io); } else { if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; if (d->d_fast_write == False) { /* * We only need to worry about sync'ing the blocks * in the mmap case because if the fast cache isn't * enabled for AIO the file will be opened with F_SYNC * which performs the correct action. */ if (fsync(cmd->c_lu->l_common->l_fd) == -1) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); return; } } /* * Since the data has already been transfered from the * transport to the mmap area we just need to call * the complete routine. */ sbc_write_cmplt(id); } }
/* * []---- * | sbc_io_alloc -- return a disk_io_t structure * | * | If the call to calloc fails we use the structure that was allocate * | during the initial common initialization call. This will allow the * | daemon to at least make progress. * []---- */ static disk_io_t * sbc_io_alloc(t10_cmd_t *c) { disk_io_t *io; disk_params_t *d = T10_PARAMS_AREA(c); if ((io = (disk_io_t *)calloc(1, sizeof (*io))) == NULL) { (void) pthread_mutex_lock(&d->d_mutex); if (d->d_io_used == True) { d->d_io_need = True; while (d->d_io_used == True) pthread_cond_wait(&d->d_io_cond, &d->d_mutex); d->d_io_need = False; } d->d_io_used = True; io = d->d_io_reserved; (void) pthread_mutex_unlock(&d->d_mutex); } io->da_cmd = c; io->da_params = d; return (io); }
/*ARGSUSED*/ static void raw_write(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { /*LINTED*/ union scsi_cdb *cdbp = (union scsi_cdb *)cdb; off_t addr; uint64_t err_blkno; uint32_t cnt; uchar_t addl_sense_len; char debug[80]; /* debug */ raw_params_t *r; raw_io_t *io; size_t max_out; if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; if (r->r_dtype == DTYPE_SEQUENTIAL) { raw_write_tape(cmd, cdb, cdb_len); return; } switch (cdb[0]) { case SCMD_WRITE: /* * SBC-2 revision 16, section 5.24 * Reserve bit checks. */ if ((cdb[1] & 0xe0) || (cdb[5] & 0x38)) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, 0x24, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (off_t)cdbp->g0_addr2 << 16 | (off_t)cdbp->g0_addr1 << 8 | (off_t)cdbp->g0_addr0; cnt = cdbp->g0_count0; /* * SBC-2 Revision 16/Section 5.24 WRITE(6) * A TRANSFER LENGHT of 0 indicates that 256 logical blocks * shall be written. */ if (cnt == 0) cnt = 256; break; case SCMD_WRITE_G1: /* * SBC-2 revision 16, section 5.25 * Reserve bit checks. */ if ((cdb[1] & 0x6) || cdb[6] || (cdb[9] & 0x38)) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, 0x24, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (off_t)cdbp->g1_addr3 << 24 | (off_t)cdbp->g1_addr2 << 16 | (off_t)cdbp->g1_addr1 << 8 | (off_t)cdbp->g1_addr0; cnt = cdbp->g1_count1 << 8 | cdbp->g1_count0; break; case SCMD_WRITE_G4: /* * SBC-2 revision 16, section 5.27 * Reserve bit checks. */ if ((cdb[1] & 0x6) || cdb[14] || (cdb[15] & 0x38)) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, 0x24, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (off_t)(cdbp->g4_addr3 & 0xff) << 56 | (off_t)(cdbp->g4_addr2 & 0xff) << 48 | (off_t)(cdbp->g4_addr1 & 0xff) << 40 | (off_t)(cdbp->g4_addr0 & 0xff) << 32 | (off_t)(cdbp->g4_addtl_cdb_data3 & 0xff) << 24 | (off_t)(cdbp->g4_addtl_cdb_data2 & 0xff) << 16 | (off_t)(cdbp->g4_addtl_cdb_data1 & 0xff) << 8 | (off_t)(cdbp->g4_addtl_cdb_data0 & 0xff); cnt = cdbp->g4_count3 << 24 | cdbp->g4_count2 << 16 | cdbp->g4_count1 << 8 | cdbp->g4_count0; break; default: queue_str(mgmtq, Q_STE_ERRS, msg_log, "Unprocessed WRITE type"); spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, 0x24, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } if ((addr < 0) || ((addr + cnt) > r->r_size)) { /* * request exceed the capacity of disk * set error block number to capacity + 1 */ err_blkno = r->r_size + 1; /* * XXX: What's SBC-2 say about ASC/ASCQ here. Solaris * doesn't care about these values when key is set * to KEY_ILLEGAL_REQUEST. */ if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN) addl_sense_len = INFORMATION_SENSE_DESCR; else addl_sense_len = 0; spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, addl_sense_len); spc_sense_info(cmd, err_blkno); spc_sense_ascq(cmd, 0x21, 0x00); trans_send_complete(cmd, STATUS_CHECK); (void) snprintf(debug, sizeof (debug), "RAW%d WRITE Illegal sector (0x%llx + 0x%x) > 0x%llx", cmd->c_lu->l_common->l_num, addr, cnt, r->r_size); queue_str(mgmtq, Q_STE_ERRS, msg_log, debug); return; } if (cnt == 0) { trans_send_complete(cmd, STATUS_GOOD); return; } io = (raw_io_t *)cmd->c_emul_id; if (io == NULL) { if ((io = (raw_io_t *)calloc(1, sizeof (*io))) == NULL) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); return; } io->r_lba = addr; io->r_lba_cnt = cnt; io->r_cmd = cmd; io->r_aio.a_aio_cmplt = raw_write_cmplt; io->r_aio.a_id = io; /* * Only update the statistics the first time through * for this particular command. If the requested transfer * is larger than the transport can handle this routine * will be called many times. */ cmd->c_lu->l_cmds_write++; cmd->c_lu->l_sects_write += cnt; } /* * If a transport sets the maximum output value to zero we'll * just request the entire amount. Otherwise, transfer no more * than the maximum output or the reminder, whichever is less. */ max_out = cmd->c_lu->l_targ->s_maxout; io->r_data_len = max_out ? MIN(max_out, (cnt * 512) - io->r_offset) : (cnt * 512); #ifdef FULL_DEBUG (void) snprintf(debug, sizeof (debug), "RAW%d blk 0x%llx, cnt %d, offset 0x%llx, size %d", cmd->c_lu->l_common->l_num, addr, cnt, io->r_offset, io->r_data_len); queue_str(mgmtq, Q_STE_IO, msg_log, debug); #endif if ((io->r_data = (char *)malloc(io->r_data_len)) == NULL) { /* * NOTE: May need a different ASC code */ err_blkno = addr + ((io->r_offset + 511) / 512); if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN) addl_sense_len = INFORMATION_SENSE_DESCR; else addl_sense_len = 0; spc_sense_create(cmd, KEY_HARDWARE_ERROR, addl_sense_len); spc_sense_info(cmd, err_blkno); trans_send_complete(cmd, STATUS_CHECK); return; } if (trans_rqst_dataout(cmd, io->r_data, io->r_data_len, io->r_offset, io, raw_free_io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
/*ARGSUSED*/ static void raw_read(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { /*LINTED*/ union scsi_cdb *u = (union scsi_cdb *)cdb; diskaddr_t addr; off_t offset = 0; uint32_t cnt; uint32_t min; raw_io_t *io; uint64_t err_blkno; int sense_len; char debug[80]; raw_params_t *r; uchar_t addl_sense_len; t10_cmd_t *c; if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; if (r->r_dtype == DTYPE_SEQUENTIAL) { raw_read_tape(cmd, cdb, cdb_len); return; } switch (u->scc_cmd) { case SCMD_READ: /* * SBC-2 Revision 16, section 5.5 * Reserve bit checks */ if ((cdb[1] & 0xe0) || (cdb[5] & 0x38)) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, 0x24, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (diskaddr_t)(uint32_t)GETG0ADDR(u); cnt = GETG0COUNT(u); /* * SBC-2 Revision 16 * Section: 5.5 READ(6) command * A TRANSFER LENGTH field set to zero specifies * that 256 logical blocks shall be read. */ if (cnt == 0) cnt = 256; break; case SCMD_READ_G1: /* * SBC-2 Revision 16, section 5.6 * Reserve bit checks. */ if ((cdb[1] & 6) || cdb[6] || (cdb[9] & 0x38)) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, 0x24, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (diskaddr_t)(uint32_t)GETG1ADDR(u); cnt = GETG1COUNT(u); break; case SCMD_READ_G4: /* * SBC-2 Revision 16, section 5.8 * Reserve bit checks */ if ((cdb[1] & 0x6) || (cdb[10] & 6) || cdb[14] || (cdb[15] & 0x38)) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, 0x24, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = GETG4LONGADDR(u); cnt = GETG4COUNT(u); break; default: spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); trans_send_complete(cmd, STATUS_CHECK); return; } if ((addr + cnt) > r->r_size) { /* * request exceed the capacity of disk * set error block number to capacity + 1 */ err_blkno = r->r_size + 1; /* * XXX: What's SBC-2 say about ASC/ASCQ here. Solaris * doesn't care about these values when key is set * to KEY_ILLEGAL_REQUEST. */ if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN) addl_sense_len = INFORMATION_SENSE_DESCR; else addl_sense_len = 0; spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, addl_sense_len); spc_sense_info(cmd, err_blkno); spc_sense_ascq(cmd, 0x21, 0x00); trans_send_complete(cmd, STATUS_CHECK); (void) snprintf(debug, sizeof (debug), "RAW%d READ Illegal sector (0x%llx + 0x%x) > 0x%llx", cmd->c_lu->l_common->l_num, addr, cnt, r->r_size); queue_str(mgmtq, Q_STE_ERRS, msg_log, debug); return; } cmd->c_lu->l_cmds_read++; cmd->c_lu->l_sects_read += cnt; if (cnt == 0) { trans_send_complete(cmd, STATUS_GOOD); return; } do { min = MIN((cnt * 512) - offset, T10_MAX_OUT(cmd)); if ((offset + min) < (cnt * 512LL)) c = trans_cmd_dup(cmd); else c = cmd; if ((io = (raw_io_t *)calloc(1, sizeof (*io))) == NULL) { /* * We're pretty much dead in the water. If we can't * allocate memory. It's unlikey we'll be able to * allocate a sense buffer or queue the command * up to be sent back to the transport for delivery. */ spc_sense_create(c, KEY_HARDWARE_ERROR, 0); trans_send_complete(c, STATUS_CHECK); return; } io->r_cmd = c; io->r_lba = addr; io->r_lba_cnt = cnt; io->r_offset = offset; io->r_data_len = min; io->r_aio.a_aio_cmplt = raw_read_cmplt; io->r_aio.a_id = io; #ifdef FULL_DEBUG (void) snprintf(debug, sizeof (debug), "RAW%d blk 0x%llx, cnt %d, offset 0x%llx, size %d", c->c_lu->l_common->l_num, addr, cnt, io->r_offset, min); queue_str(mgmtq, Q_STE_IO, msg_log, debug); #endif if ((io->r_data = (char *)malloc(min)) == NULL) { err_blkno = addr + ((offset + 511) / 512); if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN) sense_len = INFORMATION_SENSE_DESCR; else sense_len = 0; spc_sense_create(c, KEY_HARDWARE_ERROR, sense_len); spc_sense_info(c, err_blkno); trans_send_complete(c, STATUS_CHECK); return; } trans_aioread(c, io->r_data, min, (addr * 512LL) + (off_t)io->r_offset, &io->r_aio); offset += min; } while (offset < (off_t)(cnt * 512)); }
/*ARGSUSED*/ void sbc_msense(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { struct mode_header *mode_hdr; char *np; disk_params_t *d; disk_io_t *io; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; /* * SPC-3 Revision 21c section 6.8 * Reserve bit checks */ if ((cdb[1] & ~SPC_MODE_SENSE_DBD) || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } /* * Zero length causes a simple ack to occur. */ if (cdb[4] == 0) { trans_send_complete(cmd, STATUS_GOOD); return; } io = sbc_io_alloc(cmd); /* * Make sure that we have enough room in the data buffer. We'll * only send back the amount requested though */ io->da_data_len = MAX(cdb[4], sizeof (struct mode_format) + sizeof (struct mode_geometry) + sizeof (struct mode_control_scsi3) + sizeof (struct mode_cache_scsi3) + sizeof (struct mode_info_ctrl) + (MODE_BLK_DESC_LENGTH * 5)); if ((io->da_data = (char *)calloc(1, io->da_data_len)) == NULL) { sbc_io_free(io); trans_send_complete(cmd, STATUS_BUSY); return; } io->da_clear_overlap = False; io->da_data_alloc = True; mode_hdr = (struct mode_header *)io->da_data; switch (cdb[2]) { case MODE_SENSE_PAGE3_CODE: if ((d->d_heads == 0) && (d->d_cyl == 0) && (d->d_spt == 0)) { sbc_io_free(io); spc_unsupported(cmd, cdb, cdb_len); return; } mode_hdr->length = sizeof (struct mode_format); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_page3(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_PAGE4_CODE: if ((d->d_heads == 0) && (d->d_cyl == 0) && (d->d_spt == 0)) { sbc_io_free(io); spc_unsupported(cmd, cdb, cdb_len); return; } mode_hdr->length = sizeof (struct mode_geometry); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_page4(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_CACHE: mode_hdr->length = sizeof (struct mode_cache_scsi3); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_cache(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_CONTROL: mode_hdr->length = sizeof (struct mode_control_scsi3); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_mode_control(cmd->c_lu, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_INFO_CTRL: (void) sense_info_ctrl(io->da_data); break; case MODE_SENSE_SEND_ALL: /* * SPC-3 revision 21c * Section 6.9.1 Table 97 * "Return all subpage 00h mode pages in page_0 format" */ if (io->da_data_len < (sizeof (struct mode_format) + sizeof (struct mode_geometry) + sizeof (struct mode_control_scsi3) + sizeof (struct mode_info_ctrl))) { /* * Believe it or not, there's an initiator out * there which sends a mode sense request for all * of the pages, without always sending a data-in * size which is large enough. * NOTE: Need to check the error key returned * here and see if something else should be used. */ spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); trans_send_complete(cmd, STATUS_CHECK); } else { /* * If we don't have geometry then don't attempt * report that information. */ if (d->d_heads && d->d_cyl && d->d_spt) { np = sense_page3(d, io->da_data); np = sense_page4(d, np); } np = sense_cache(d, np); np = sense_mode_control(cmd->c_lu, np); (void) sense_info_ctrl(np); } break; case 0x00: /* * SPC-3 Revision 21c, section 6.9.1 * Table 97 -- Mode page code usage for all devices * Page Code 00 == Vendor specific. We are going to return * zeros. */ break; default: queue_prt(mgmtq, Q_STE_ERRS, "SBC%x LUN%d Unsupported mode_sense request 0x%x", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, cdb[2]); spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; break; } if (trans_send_datain(cmd, io->da_data, cdb[4], 0, sbc_io_free, True, io) == False) { trans_send_complete(cmd, STATUS_BUSY); } }
/*ARGSUSED*/ void sbc_recap(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { uint64_t capacity; int len; uint32_t lba; struct scsi_capacity *cap; disk_params_t *d; disk_io_t *io; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; capacity = d->d_size; len = sizeof (struct scsi_capacity); /* * SBC-2 Revision 16, section 5.10.1 * Any of the following conditions will generate an error. * (1) PMI bit is zero and LOGICAL block address is non-zero * (2) Rserved bytes are not zero * (3) Reseved bits are not zero * (4) Reserved CONTROL bits are not zero */ if ((((cdb[8] & SBC_CAPACITY_PMI) == 0) && (cdb[2] || cdb[3] || cdb[4] || cdb[5])) || cdb[1] || cdb[6] || cdb[7] || (cdb[8] & ~SBC_CAPACITY_PMI) || SAM_CONTROL_BYTE_RESERVED(cdb[9])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } /* * if the device capacity larger than 32 bits then set * the capacity of the device to all 0xf's. * a device that supports LBAs larger than 32 bits which * should be used read_capacity(16) comand to get the capacity. * NOTE: the adjustment to subject one from the capacity is * done below. */ if (capacity & 0xFFFFFFFF00000000ULL) capacity = 0xFFFFFFFF; io = sbc_io_alloc(cmd); if ((cap = (struct scsi_capacity *)calloc(1, len)) == NULL) { sbc_io_free(io); trans_send_complete(cmd, STATUS_BUSY); return; } io->da_data = (char *)cap; io->da_data_alloc = True; io->da_clear_overlap = False; io->da_data_len = len; if (capacity != 0xFFFFFFFF) { /* * Look at the PMI information */ if (cdb[8] & SBC_CAPACITY_PMI) { lba = cdb[2] << 24 | cdb[3] << 16 | cdb[4] << 8 | cdb[5]; if (lba >= capacity) cap->capacity = htonl(0xffffffff); else cap->capacity = (capacity - 1); } else { cap->capacity = htonl(capacity - 1); } } else { cap->capacity = htonl(capacity); } cap->lbasize = htonl(d->d_bytes_sect); if (trans_send_datain(cmd, io->da_data, io->da_data_len, 0, sbc_io_free, True, io) == False) { trans_send_complete(cmd, STATUS_BUSY); } }
/*ARGSUSED*/ static void sbc_write(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { union scsi_cdb *u; diskaddr_t addr; uint64_t err_blkno; uint32_t cnt; uchar_t addl_sense_len; disk_params_t *d; disk_io_t *io; size_t max_out; void *mmap_area; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; /*LINTED*/ u = (union scsi_cdb *)cdb; switch (u->scc_cmd) { case SCMD_WRITE: /* * SBC-2 revision 16, section 5.24 * Reserve bit checks. */ if ((cdb[1] & 0xe0) || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (diskaddr_t)(uint32_t)GETG0ADDR(u); cnt = GETG0COUNT(u); /* * SBC-2 Revision 16/Section 5.24 WRITE(6) * A TRANSFER LENGHT of 0 indicates that 256 logical blocks * shall be written. */ if (cnt == 0) cnt = 256; break; case SCMD_WRITE_G1: /* * SBC-2 revision 16, section 5.25 * Reserve bit checks. */ if ((cdb[1] & 0x6) || cdb[6] || SAM_CONTROL_BYTE_RESERVED(cdb[9])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (diskaddr_t)(uint32_t)GETG1ADDR(u); cnt = GETG1COUNT(u); break; case SCMD_WRITE_G4: /* * SBC-2 revision 16, section 5.27 * Reserve bit checks. */ if ((cdb[1] & 0x6) || cdb[14] || SAM_CONTROL_BYTE_RESERVED(cdb[15])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (diskaddr_t)GETG4LONGADDR(u); cnt = GETG4COUNT(u); break; default: queue_prt(mgmtq, Q_STE_ERRS, "Unprocessed WRITE type"); spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } if ((addr + cnt) > d->d_size) { if (addr > d->d_size) err_blkno = addr; else err_blkno = d->d_size; /* * XXX: What's SBC-2 say about ASC/ASCQ here. Solaris * doesn't care about these values when key is set * to KEY_ILLEGAL_REQUEST. */ if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN) addl_sense_len = INFORMATION_SENSE_DESCR; else addl_sense_len = 0; spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, addl_sense_len); spc_sense_info(cmd, err_blkno); spc_sense_ascq(cmd, 0x21, 0x00); trans_send_complete(cmd, STATUS_CHECK); queue_prt(mgmtq, Q_STE_ERRS, "SBC%x LUN%d WRITE Illegal sector " "(0x%llx + 0x%x) > 0x%ullx", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, addr, cnt, d->d_size); return; } if (cnt == 0) { queue_prt(mgmtq, Q_STE_NONIO, "SBC%x LUN%d WRITE zero block count for addr 0x%x", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, addr); trans_send_complete(cmd, STATUS_GOOD); return; } io = (disk_io_t *)cmd->c_emul_id; if (io == NULL) { io = sbc_io_alloc(cmd); io->da_lba = addr; io->da_lba_cnt = cnt; io->da_clear_overlap = False; io->da_aio.a_aio_cmplt = sbc_write_cmplt; io->da_aio.a_id = io; /* * Only update the statistics the first time through * for this particular command. If the requested transfer * is larger than the transport can handle this routine * will be called many times. */ cmd->c_lu->l_cmds_write++; cmd->c_lu->l_sects_write += cnt; } #ifdef FULL_DEBUG queue_prt(mgmtq, Q_STE_IO, "SBC%x LUN%d blk 0x%llx, cnt %d, offset 0x%llx, size %d", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, addr, cnt, io->da_offset, io->da_data_len); #endif /* * If a transport sets the maximum output value to zero we'll * just request the entire amount. Otherwise, transfer no more * than the maximum output or the reminder, whichever is less. */ max_out = cmd->c_lu->l_targ->s_maxout; io->da_data_len = max_out ? MIN(max_out, (cnt * 512) - io->da_offset) : (cnt * 512); mmap_area = T10_MMAP_AREA(cmd); if (mmap_area != MAP_FAILED) { io->da_data_alloc = False; io->da_data = (char *)mmap_area + (addr * 512LL) + io->da_offset; sbc_overlap_check(io); } else if ((io->da_data = (char *)malloc(io->da_data_len)) == NULL) { trans_send_complete(cmd, STATUS_BUSY); return; } else { io->da_data_alloc = True; } if (trans_rqst_dataout(cmd, io->da_data, io->da_data_len, io->da_offset, io) == False) { trans_send_complete(cmd, STATUS_BUSY); } }
/*ARGSUSED*/ static void sbc_read(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { /*LINTED*/ union scsi_cdb *u = (union scsi_cdb *)cdb; diskaddr_t addr; off_t offset = 0; uint32_t cnt, min; disk_io_t *io; void *mmap_data = T10_MMAP_AREA(cmd); uint64_t err_blkno; disk_params_t *d; uchar_t addl_sense_len; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; switch (u->scc_cmd) { case SCMD_READ: /* * SBC-2 Revision 16, section 5.5 * Reserve bit checks */ if ((cdb[1] & 0xe0) || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (diskaddr_t)(uint32_t)GETG0ADDR(u); cnt = GETG0COUNT(u); /* * SBC-2 Revision 16 * Section: 5.5 READ(6) command * A TRANSFER LENGTH field set to zero specifies * that 256 logical blocks shall be read. */ if (cnt == 0) cnt = 256; break; case SCMD_READ_G1: /* * SBC-2 Revision 16, section 5.6 * Reserve bit checks. */ if ((cdb[1] & 6) || cdb[6] || SAM_CONTROL_BYTE_RESERVED(cdb[9])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = (diskaddr_t)(uint32_t)GETG1ADDR(u); cnt = GETG1COUNT(u); break; case SCMD_READ_G4: /* * SBC-2 Revision 16, section 5.8 * Reserve bit checks */ if ((cdb[1] & 0x6) || cdb[14] || SAM_CONTROL_BYTE_RESERVED(cdb[15])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } addr = GETG4LONGADDR(u); cnt = GETG4COUNT(u); break; default: spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); trans_send_complete(cmd, STATUS_CHECK); return; } if ((addr + cnt) > d->d_size) { if (addr > d->d_size) err_blkno = addr; else err_blkno = d->d_size; /* * XXX: What's SBC-2 say about ASC/ASCQ here. Solaris * doesn't care about these values when key is set * to KEY_ILLEGAL_REQUEST. */ if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN) addl_sense_len = INFORMATION_SENSE_DESCR; else addl_sense_len = 0; spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, addl_sense_len); spc_sense_info(cmd, err_blkno); spc_sense_ascq(cmd, 0x21, 0x00); trans_send_complete(cmd, STATUS_CHECK); queue_prt(mgmtq, Q_STE_ERRS, "SBC%x LUN%d READ Illegal sector " "(0x%llx + 0x%x) > 0x%ullx", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, addr, cnt, d->d_size); return; } cmd->c_lu->l_cmds_read++; cmd->c_lu->l_sects_read += cnt; if (cnt == 0) { trans_send_complete(cmd, STATUS_GOOD); return; } do { io = sbc_io_alloc(cmd); min = MIN((cnt * 512) - offset, T10_MAX_OUT(cmd)); io->da_lba = addr; io->da_lba_cnt = cnt; io->da_offset = offset; io->da_data_len = min; #ifdef FULL_DEBUG queue_prt(mgmtq, Q_STE_IO, "SBC%x LUN%d blk 0x%llx, cnt %d, offset 0x%llx, size %d", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, addr, cnt, io->da_offset, min); #endif if (mmap_data != MAP_FAILED) { io->da_clear_overlap = True; io->da_data_alloc = False; io->da_aio.a_aio.aio_return = min; io->da_data = (char *)mmap_data + (addr * 512LL) + io->da_offset; sbc_overlap_store(io); sbc_read_cmplt((emul_handle_t)io); } else { if ((io->da_data = (char *)malloc(min)) == NULL) { trans_send_complete(cmd, STATUS_BUSY); return; } io->da_clear_overlap = False; io->da_data_alloc = True; io->da_aio.a_aio_cmplt = sbc_read_cmplt; io->da_aio.a_id = io; trans_aioread(cmd, io->da_data, min, (addr * 512LL) + (off_t)io->da_offset, (aio_result_t *)io); } offset += min; } while (offset < (off_t)(cnt * 512)); }
/*ARGSUSED*/ static void sbc_read_capacity16(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { uint64_t capacity, lba; int rep_size; /* response data size */ struct scsi_capacity_16 *cap16; disk_params_t *d; disk_io_t *io; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; capacity = d->d_size; /* * READ_CAPACITY(16) command */ rep_size = cdb[10] << 24 | cdb[11] << 16 | cdb[12] << 8 | cdb[13]; if (rep_size == 0) { /* * A zero length field means we're done. */ trans_send_complete(cmd, STATUS_GOOD); return; } rep_size = MIN(rep_size, sizeof (*cap16)); /* * Reserve bit checks. */ if ((cdb[1] & ~SPC_GROUP4_SERVICE_ACTION_MASK) || (cdb[14] & ~SBC_CAPACITY_PMI) || SAM_CONTROL_BYTE_RESERVED(cdb[15])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } lba = (uint64_t)cdb[2] << 56 | (uint64_t)cdb[3] << 48 | (uint64_t)cdb[4] << 40 | (uint64_t)cdb[5] << 32 | (uint64_t)cdb[6] << 24 | (uint64_t)cdb[7] << 16 | (uint64_t)cdb[8] << 8 | (uint64_t)cdb[9]; io = sbc_io_alloc(cmd); /* * We'll malloc enough space for the structure so that we can * set the values as we place. However, we'll set the transfer * length to the minimum of the requested size and our structure. * This is per SBC-2 revision 16, section 5.11.1 regarding * ALLOCATION LENGTH. */ if ((cap16 = (struct scsi_capacity_16 *)calloc(1, sizeof (*cap16))) == NULL) { trans_send_complete(cmd, STATUS_BUSY); return; } io->da_data = (char *)cap16; io->da_data_len = sizeof (*cap16); io->da_data_alloc = True; io->da_clear_overlap = False; if (cdb[14] & SBC_CAPACITY_PMI) { if (lba >= capacity) cap16->sc_capacity = htonll(0xffffffffffffffffULL); else cap16->sc_capacity = htonll(capacity - 1); } else { cap16->sc_capacity = htonll(capacity - 1); } cap16->sc_lbasize = htonl(d->d_bytes_sect); if (trans_send_datain(cmd, io->da_data, io->da_data_len, 0, sbc_io_free, True, io) == False) { trans_send_complete(cmd, STATUS_BUSY); } }