/* * []------------------------------------------------------------------[] * | SCSI Object-Based Storage Device Commands | * | T10/1355-D | * | The following functions implement the emulation of OSD type | * | commands. | * []------------------------------------------------------------------[] */ static void osd_service_action(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { osd_generic_cdb_t *o; uint16_t service_action; /* * debug only -- no need to drop core if someone doesn't play right. */ assert(cdb_len == sizeof (*o)); if (cdb_len != sizeof (*o)) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, SPC_ASCQ_INVALID_CDB); trans_send_complete(cmd, STATUS_CHECK); return; } o = (osd_generic_cdb_t *)cdb; service_action = o->ocdb_basic.b_service_action[0] << 8 | o->ocdb_basic.b_service_action[1]; queue_prt(mgmtq, Q_STE_NONIO, "OSD%x LUN%d service=0x%x, options=0x%x, specific_opts=0x%x," " fmt=0x%x", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, service_action, o->ocdb_options, o->ocdb_specific_opts, o->ocdb_fmt); switch (service_action) { case OSD_APPEND: case OSD_CREATE: case OSD_CREATE_AND_WRITE: case OSD_CREATE_COLLECTION: case OSD_CREATE_PARTITION: case OSD_FLUSH: case OSD_FLUSH_COLLECTION: case OSD_FLUSH_OSD: case OSD_FLUSH_PARTITION: case OSD_FORMAT_OSD: case OSD_GET_ATTR: case OSD_LIST: osd_list(cmd, cdb, cdb_len); break; case OSD_LIST_COLLECTION: case OSD_PERFORM_SCSI: case OSD_TASK_MGMT: default: spc_unsupported(cmd, cdb, cdb_len); break; } }
/*ARGSUSED*/ void sbc_msense(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { struct mode_header *mode_hdr; char *np; disk_params_t *d; disk_io_t *io; if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL) return; /* * SPC-3 Revision 21c section 6.8 * Reserve bit checks */ if ((cdb[1] & ~SPC_MODE_SENSE_DBD) || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } /* * Zero length causes a simple ack to occur. */ if (cdb[4] == 0) { trans_send_complete(cmd, STATUS_GOOD); return; } io = sbc_io_alloc(cmd); /* * Make sure that we have enough room in the data buffer. We'll * only send back the amount requested though */ io->da_data_len = MAX(cdb[4], sizeof (struct mode_format) + sizeof (struct mode_geometry) + sizeof (struct mode_control_scsi3) + sizeof (struct mode_cache_scsi3) + sizeof (struct mode_info_ctrl) + (MODE_BLK_DESC_LENGTH * 5)); if ((io->da_data = (char *)calloc(1, io->da_data_len)) == NULL) { sbc_io_free(io); trans_send_complete(cmd, STATUS_BUSY); return; } io->da_clear_overlap = False; io->da_data_alloc = True; mode_hdr = (struct mode_header *)io->da_data; switch (cdb[2]) { case MODE_SENSE_PAGE3_CODE: if ((d->d_heads == 0) && (d->d_cyl == 0) && (d->d_spt == 0)) { sbc_io_free(io); spc_unsupported(cmd, cdb, cdb_len); return; } mode_hdr->length = sizeof (struct mode_format); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_page3(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_PAGE4_CODE: if ((d->d_heads == 0) && (d->d_cyl == 0) && (d->d_spt == 0)) { sbc_io_free(io); spc_unsupported(cmd, cdb, cdb_len); return; } mode_hdr->length = sizeof (struct mode_geometry); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_page4(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_CACHE: mode_hdr->length = sizeof (struct mode_cache_scsi3); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_cache(d, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_CONTROL: mode_hdr->length = sizeof (struct mode_control_scsi3); mode_hdr->bdesc_length = MODE_BLK_DESC_LENGTH; (void) sense_mode_control(cmd->c_lu, io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length); break; case MODE_SENSE_INFO_CTRL: (void) sense_info_ctrl(io->da_data); break; case MODE_SENSE_SEND_ALL: /* * SPC-3 revision 21c * Section 6.9.1 Table 97 * "Return all subpage 00h mode pages in page_0 format" */ if (io->da_data_len < (sizeof (struct mode_format) + sizeof (struct mode_geometry) + sizeof (struct mode_control_scsi3) + sizeof (struct mode_info_ctrl))) { /* * Believe it or not, there's an initiator out * there which sends a mode sense request for all * of the pages, without always sending a data-in * size which is large enough. * NOTE: Need to check the error key returned * here and see if something else should be used. */ spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); trans_send_complete(cmd, STATUS_CHECK); } else { /* * If we don't have geometry then don't attempt * report that information. */ if (d->d_heads && d->d_cyl && d->d_spt) { np = sense_page3(d, io->da_data); np = sense_page4(d, np); } np = sense_cache(d, np); np = sense_mode_control(cmd->c_lu, np); (void) sense_info_ctrl(np); } break; case 0x00: /* * SPC-3 Revision 21c, section 6.9.1 * Table 97 -- Mode page code usage for all devices * Page Code 00 == Vendor specific. We are going to return * zeros. */ break; default: queue_prt(mgmtq, Q_STE_ERRS, "SBC%x LUN%d Unsupported mode_sense request 0x%x", cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, cdb[2]); spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; break; } if (trans_send_datain(cmd, io->da_data, cdb[4], 0, sbc_io_free, True, io) == False) { trans_send_complete(cmd, STATUS_BUSY); } }