static void raw_inquiry(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { raw_io_t *io; uint32_t len; struct scsi_inquiry inq; raw_params_t *r; if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; len = (cdb[3] << 8) | cdb[4]; if (((io = do_datain(cmd, cdb, CDB_GROUP0, len)) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); return; } if ((cdb[1] & 1) == 0) { bcopy(io->r_data, &inq, sizeof (inq)); r->r_dtype = inq.inq_dtype; } if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0, raw_free_io, True, io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
static void raw_msense(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { raw_io_t *io; int len; switch (cdb[0]) { case SCMD_MODE_SENSE: len = cdb[4]; break; case SCMD_MODE_SENSE_G1: len = (cdb[7] << 8) | cdb[8]; break; } if (((io = do_datain(cmd, cdb, CDB_GROUP0, len)) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); return; } if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0, raw_free_io, True, io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
static void raw_service_actiong4(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { raw_io_t *io; uint32_t len; struct scsi_capacity_16 cap16; raw_params_t *r; if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; len = (cdb[10] << 24) | (cdb[11] << 16) | (cdb[12] << 8) | cdb[13]; if (((io = do_datain(cmd, cdb, CDB_GROUP4, len)) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); return; } bcopy(io->r_data, &cap16, sizeof (cap16)); /* * Currently there's a bug in ZFS which doesn't report a capacity * for any of the volumes. This means that when using ZFS the * administrator must supply the device size. */ if (cap16.sc_capacity != 0) r->r_size = cap16.sc_capacity; if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0, raw_free_io, True, io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
/* * []---- * | raw_read_cmplt -- Once we have the data, need to send it along. * []---- */ static void raw_read_cmplt(emul_handle_t id) { raw_io_t *io = (raw_io_t *)id; int sense_len; uint64_t err_blkno; t10_cmd_t *cmd = io->r_cmd; Boolean_t last; if (io->r_aio.a_aio.aio_return != io->r_data_len) { err_blkno = io->r_lba + ((io->r_offset + 511) / 512); cmd->c_resid = (io->r_lba_cnt * 512) - io->r_offset; if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN) sense_len = INFORMATION_SENSE_DESCR; else sense_len = 0; spc_sense_create(cmd, KEY_HARDWARE_ERROR, sense_len); spc_sense_info(cmd, err_blkno); trans_send_complete(cmd, STATUS_CHECK); raw_free_io(io); return; } last = ((io->r_offset + io->r_data_len) < (io->r_lba_cnt * 512LL)) ? False : True; if (trans_send_datain(cmd, io->r_data, io->r_data_len, io->r_offset, raw_free_io, last, io) == False) { raw_free_io(io); spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
static void raw_request_sense(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { raw_io_t *io; if (((io = do_datain(cmd, cdb, CDB_GROUP0, cdb[4])) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); } else { if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0, raw_free_io, True, io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } } }
/* * []---- * | osd_list -- return a list of objects * []---- */ static void osd_list(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { osd_cmd_list_t *o = (osd_cmd_list_t *)cdb; osd_obj_id_t part; osd_list_param_t *data; uint64_t len, alloc_len; part = (uint64_t)o->ocdb_partition_id[0] << 56 | (uint64_t)o->ocdb_partition_id[1] << 48 | (uint64_t)o->ocdb_partition_id[2] << 40 | (uint64_t)o->ocdb_partition_id[3] << 32 | (uint64_t)o->ocdb_partition_id[4] << 24 | (uint64_t)o->ocdb_partition_id[5] << 16 | (uint64_t)o->ocdb_partition_id[6] << 8 | (uint64_t)o->ocdb_partition_id[7]; len = (uint64_t)o->ocdb_length[0] << 56 | (uint64_t)o->ocdb_length[1] << 48 | (uint64_t)o->ocdb_length[2] << 40 | (uint64_t)o->ocdb_length[3] << 32 | (uint64_t)o->ocdb_length[4] << 24 | (uint64_t)o->ocdb_length[5] << 16 | (uint64_t)o->ocdb_length[6] << 8 | (uint64_t)o->ocdb_length[7]; if (len == 0) { trans_send_complete(cmd, STATUS_GOOD); return; } queue_prt(mgmtq, Q_STE_NONIO, "part=0x%llx, len=0x%llx", part, len); alloc_len = MAX(sizeof (*data), len); if ((data = calloc(1, alloc_len)) == NULL) { trans_send_complete(cmd, STATUS_BUSY); return; } data->op_length[7] = sizeof (*data) - 8; if (part == OSD_PARTITION_ROOT) data->op_root = 1; (void) trans_send_datain(cmd, (char *)data, sizeof (*data), 0, free, True, (emul_handle_t)data); }
static void raw_report_tpgs(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { raw_io_t *io; uint32_t len; len = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; if (((io = do_datain(cmd, cdb, CDB_GROUP5, len)) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); return; } if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0, raw_free_io, True, io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
static void raw_read_limits(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { raw_io_t *io; /* * spec defines this command to return 6 bytes of data */ if (((io = do_datain(cmd, cdb, CDB_GROUP0, 6)) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); return; } if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0, raw_free_io, True, io) == False) { spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0); trans_send_complete(cmd, STATUS_CHECK); } }
/*ARGSUSED*/ static void raw_read_tape(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { size_t req_len; size_t xfer; off_t offset = 0; raw_io_t *io; Boolean_t last; t10_cmd_t *c; req_len = (cdb[2] << 16) | (cdb[3] << 8) | cdb[4]; if (cdb[1] & 0x1) req_len *= 512; if (((io = do_datain(cmd, cdb, CDB_GROUP0, req_len)) == NULL) || (io->r_status != STATUS_GOOD)) { if (io != NULL) raw_free_io(io); trans_send_complete(cmd, STATUS_CHECK); return; } while (offset < io->r_data_len) { xfer = min(T10_MAX_OUT(cmd), io->r_data_len - offset); last = ((offset + xfer) >= io->r_data_len) ? True : False; if (last == True) c = cmd; else c = trans_cmd_dup(cmd); if (trans_send_datain(c, io->r_data + offset, xfer, offset, raw_free_io, last, io) == False) { raw_free_io(io); spc_sense_create(c, KEY_HARDWARE_ERROR, 0); trans_send_complete(c, STATUS_CHECK); return; } offset += xfer; } }
/*ARGSUSED*/ void sbc_msense(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { struct mode_header *mode_hdr; char *np; disk_params_t *d; disk_io_t *io; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; /* * SPC-3 Revision 21c section 6.8 * Reserve bit checks */ if ((cdb[1] & ~SPC_MODE_SENSE_DBD) || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } /* * Zero length causes a simple ack to occur. */ if (cdb[4] == 0) { trans_send_complete(cmd, STATUS_GOOD); return; } io = sbc_io_alloc(cmd); /* * Make sure that we have enough room in the data buffer. We'll * only send back the amount requested though */ io->da_data_len = MAX(cdb[4], sizeof (struct mode_format) + sizeof (struct mode_geometry) + sizeof (struct mode_control_scsi3) + sizeof (struct mode_cache_scsi3) + sizeof (struct mode_info_ctrl) + (MODE_BLK_DESC_LENGTH * 5)); if ((io->da_data = (char *)calloc(1, io->da_data_len)) == NULL) { sbc_io_free(io); trans_send_complete(cmd, STATUS_BUSY); return; } io->da_clear_overlap = False; io->da_data_alloc = True; mode_hdr = (struct mode_header *)io->da_data; switch (cdb[2]) { case MODE_SENSE_PAGE3_CODE: if ((d->d_heads == 0) && (d->d_cyl == 0) && (d->d_spt == 0)) { sbc_io_free(io); spc_unsupported(cmd, cdb, cdb_len); return; } mode_hdr->length = sizeof (struct mode_format); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_page3(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_PAGE4_CODE: if ((d->d_heads == 0) && (d->d_cyl == 0) && (d->d_spt == 0)) { sbc_io_free(io); spc_unsupported(cmd, cdb, cdb_len); return; } mode_hdr->length = sizeof (struct mode_geometry); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_page4(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_CACHE: mode_hdr->length = sizeof (struct mode_cache_scsi3); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_cache(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_CONTROL: mode_hdr->length = sizeof (struct mode_control_scsi3); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_mode_control(cmd->c_lu, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_INFO_CTRL: (void) sense_info_ctrl(io->da_data); break; case MODE_SENSE_SEND_ALL: /* * SPC-3 revision 21c * Section 6.9.1 Table 97 * "Return all subpage 00h mode pages in page_0 format" */ if (io->da_data_len < (sizeof (struct mode_format) + sizeof (struct mode_geometry) + sizeof (struct mode_control_scsi3) + sizeof (struct mode_info_ctrl))) { /* * Believe it or not, there's an initiator out * there which sends a mode sense request for all * of the pages, without always sending a data-in * size which is large enough. * NOTE: Need to check the error key returned * here and see if something else should be used. */ spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); trans_send_complete(cmd, STATUS_CHECK); } else { /* * If we don't have geometry then don't attempt * report that information. */ if (d->d_heads && d->d_cyl && d->d_spt) { np = sense_page3(d, io->da_data); np = sense_page4(d, np); } np = sense_cache(d, np); np = sense_mode_control(cmd->c_lu, np); (void) sense_info_ctrl(np); } break; case 0x00: /* * SPC-3 Revision 21c, section 6.9.1 * Table 97 -- Mode page code usage for all devices * Page Code 00 == Vendor specific. We are going to return * zeros. */ break; default: queue_prt(mgmtq, Q_STE_ERRS, "SBC%x LUN%d Unsupported mode_sense request 0x%x", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, cdb[2]); spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; break; } if (trans_send_datain(cmd, io->da_data, cdb[4], 0, sbc_io_free, True, io) == False) { trans_send_complete(cmd, STATUS_BUSY); } }
/*ARGSUSED*/ void sbc_recap(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { uint64_t capacity; int len; uint32_t lba; struct scsi_capacity *cap; disk_params_t *d; disk_io_t *io; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; capacity = d->d_size; len = sizeof (struct scsi_capacity); /* * SBC-2 Revision 16, section 5.10.1 * Any of the following conditions will generate an error. * (1) PMI bit is zero and LOGICAL block address is non-zero * (2) Rserved bytes are not zero * (3) Reseved bits are not zero * (4) Reserved CONTROL bits are not zero */ if ((((cdb[8] & SBC_CAPACITY_PMI) == 0) && (cdb[2] || cdb[3] || cdb[4] || cdb[5])) || cdb[1] || cdb[6] || cdb[7] || (cdb[8] & ~SBC_CAPACITY_PMI) || SAM_CONTROL_BYTE_RESERVED(cdb[9])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } /* * if the device capacity larger than 32 bits then set * the capacity of the device to all 0xf's. * a device that supports LBAs larger than 32 bits which * should be used read_capacity(16) comand to get the capacity. * NOTE: the adjustment to subject one from the capacity is * done below. */ if (capacity & 0xFFFFFFFF00000000ULL) capacity = 0xFFFFFFFF; io = sbc_io_alloc(cmd); if ((cap = (struct scsi_capacity *)calloc(1, len)) == NULL) { sbc_io_free(io); trans_send_complete(cmd, STATUS_BUSY); return; } io->da_data = (char *)cap; io->da_data_alloc = True; io->da_clear_overlap = False; io->da_data_len = len; if (capacity != 0xFFFFFFFF) { /* * Look at the PMI information */ if (cdb[8] & SBC_CAPACITY_PMI) { lba = cdb[2] << 24 | cdb[3] << 16 | cdb[4] << 8 | cdb[5]; if (lba >= capacity) cap->capacity = htonl(0xffffffff); else cap->capacity = (capacity - 1); } else { cap->capacity = htonl(capacity - 1); } } else { cap->capacity = htonl(capacity); } cap->lbasize = htonl(d->d_bytes_sect); if (trans_send_datain(cmd, io->da_data, io->da_data_len, 0, sbc_io_free, True, io) == False) { trans_send_complete(cmd, STATUS_BUSY); } }
/*ARGSUSED*/ static void sbc_read_capacity16(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { uint64_t capacity, lba; int rep_size; /* response data size */ struct scsi_capacity_16 *cap16; disk_params_t *d; disk_io_t *io; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; capacity = d->d_size; /* * READ_CAPACITY(16) command */ rep_size = cdb[10] << 24 | cdb[11] << 16 | cdb[12] << 8 | cdb[13]; if (rep_size == 0) { /* * A zero length field means we're done. */ trans_send_complete(cmd, STATUS_GOOD); return; } rep_size = MIN(rep_size, sizeof (*cap16)); /* * Reserve bit checks. */ if ((cdb[1] & ~SPC_GROUP4_SERVICE_ACTION_MASK) || (cdb[14] & ~SBC_CAPACITY_PMI) || SAM_CONTROL_BYTE_RESERVED(cdb[15])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } lba = (uint64_t)cdb[2] << 56 | (uint64_t)cdb[3] << 48 | (uint64_t)cdb[4] << 40 | (uint64_t)cdb[5] << 32 | (uint64_t)cdb[6] << 24 | (uint64_t)cdb[7] << 16 | (uint64_t)cdb[8] << 8 | (uint64_t)cdb[9]; io = sbc_io_alloc(cmd); /* * We'll malloc enough space for the structure so that we can * set the values as we place. However, we'll set the transfer * length to the minimum of the requested size and our structure. * This is per SBC-2 revision 16, section 5.11.1 regarding * ALLOCATION LENGTH. */ if ((cap16 = (struct scsi_capacity_16 *)calloc(1, sizeof (*cap16))) == NULL) { trans_send_complete(cmd, STATUS_BUSY); return; } io->da_data = (char *)cap16; io->da_data_len = sizeof (*cap16); io->da_data_alloc = True; io->da_clear_overlap = False; if (cdb[14] & SBC_CAPACITY_PMI) { if (lba >= capacity) cap16->sc_capacity = htonll(0xffffffffffffffffULL); else cap16->sc_capacity = htonll(capacity - 1); } else { cap16->sc_capacity = htonll(capacity - 1); } cap16->sc_lbasize = htonl(d->d_bytes_sect); if (trans_send_datain(cmd, io->da_data, io->da_data_len, 0, sbc_io_free, True, io) == False) { trans_send_complete(cmd, STATUS_BUSY); } }