void ctl_scsi_persistent_res_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int action, ctl_tag_type tag_type, uint8_t control) { struct scsi_per_res_in *cdb; ctl_scsi_zero_io(io); cdb = (struct scsi_per_res_in *)io->scsiio.cdb; cdb->opcode = PERSISTENT_RES_IN; cdb->action = action; scsi_ulto2b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; }
void ctl_scsi_inquiry(union ctl_io *io, uint8_t *data_ptr, int32_t data_len, uint8_t byte2, uint8_t page_code, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_inquiry *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_inquiry *)ctsio->cdb; cdb->opcode = INQUIRY; cdb->byte2 = byte2; cdb->page_code = page_code; cdb->control = control; scsi_ulto2b(data_len, cdb->length); io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_len = data_len; ctsio->ext_data_ptr = data_ptr; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; }
void ctl_scsi_sync_cache(union ctl_io *io, int immed, int reladr, int minimum_cdb_size, uint64_t starting_lba, uint32_t block_count, ctl_tag_type tag_type, uint8_t control) { ctl_scsi_zero_io(io); if ((minimum_cdb_size < 16) && ((block_count & 0xffff) == block_count) && ((starting_lba & 0xffffffff) == starting_lba)) { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)io->scsiio.cdb; cdb->opcode = SYNCHRONIZE_CACHE; if (reladr) cdb->byte2 |= SSC_RELADR; if (immed) cdb->byte2 |= SSC_IMMED; scsi_ulto4b(starting_lba, cdb->begin_lba); scsi_ulto2b(block_count, cdb->lb_count); cdb->control = control; } else { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)io->scsiio.cdb; cdb->opcode = SYNCHRONIZE_CACHE_16; if (reladr) cdb->byte2 |= SSC_RELADR; if (immed) cdb->byte2 |= SSC_IMMED; scsi_u64to8b(starting_lba, cdb->begin_lba); scsi_ulto4b(block_count, cdb->lb_count); cdb->control = control; } io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_NONE; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = NULL; io->scsiio.ext_data_len = 0; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; }
void ctl_scsi_mode_sense(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int dbd, int llbaa, uint8_t page_code, uint8_t pc, uint8_t subpage, int minimum_cdb_size, ctl_tag_type tag_type, uint8_t control) { ctl_scsi_zero_io(io); if ((minimum_cdb_size < 10) && (llbaa == 0) && (data_len < 256)) { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)io->scsiio.cdb; cdb->opcode = MODE_SENSE_6; if (dbd) cdb->byte2 |= SMS_DBD; cdb->page = page_code | pc; cdb->subpage = subpage; cdb->length = data_len; cdb->control = control; } else { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)io->scsiio.cdb; cdb->opcode = MODE_SENSE_10; if (dbd) cdb->byte2 |= SMS_DBD; if (llbaa) cdb->byte2 |= SMS10_LLBAA; cdb->page = page_code | pc; cdb->subpage = subpage; scsi_ulto2b(data_len, cdb->length); cdb->control = control; } io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; }
/* * Compose a SMP PHY CONTROL request and put it into a CCB. This is * current as of SPL Revision 7. */ void smp_phy_control(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_phy_control_request *request, int request_len, uint8_t *response, int response_len, int long_response, uint32_t expected_exp_change_count, int phy, int phy_op, int update_pp_timeout_val, uint64_t attached_device_name, int prog_min_prl, int prog_max_prl, int slumber_partial, int pp_timeout_value, uint32_t timeout) { cam_fill_smpio(smpio, retries, cbfcnp, /*flags*/CAM_DIR_BOTH, (uint8_t *)request, request_len - SMP_CRC_LEN, response, response_len, timeout); bzero(request, sizeof(*request)); request->frame_type = SMP_FRAME_TYPE_REQUEST; request->function = SMP_FUNC_PHY_CONTROL; request->response_len = long_response ? SMP_PC_RESPONSE_LEN : 0; request->request_len = long_response ? SMP_PC_REQUEST_LEN : 0; scsi_ulto2b(expected_exp_change_count, request->expected_exp_chg_cnt); request->phy = phy; request->phy_operation = phy_op; if (update_pp_timeout_val != 0) request->update_pp_timeout |= SMP_PC_UPDATE_PP_TIMEOUT; scsi_u64to8b(attached_device_name, request->attached_device_name); request->prog_min_phys_link_rate = (prog_min_prl << SMP_PC_PROG_MIN_PL_RATE_SHIFT) & SMP_PC_PROG_MIN_PL_RATE_MASK; request->prog_max_phys_link_rate = (prog_max_prl << SMP_PC_PROG_MAX_PL_RATE_SHIFT) & SMP_PC_PROG_MAX_PL_RATE_MASK; request->config_bits0 = slumber_partial; request->pp_timeout_value = pp_timeout_value; }
/******************************************************************************** * Perform a SCSI INQUIRY command and return pointers to the relevant data. */ int mlx_scsi_inquiry(int unit, int channel, int target, char **vendor, char **device, char **revision) { struct mlx_usercommand cmd; static struct { struct mlx_dcdb dcdb; union { struct scsi_inquiry_data inq; u_int8_t pad[SHORT_INQUIRY_LENGTH]; } d; } __attribute__ ((packed)) dcdb_cmd; struct scsi_inquiry *inq_cmd = (struct scsi_inquiry *)&dcdb_cmd.dcdb.dcdb_cdb[0]; /* build the command */ cmd.mu_datasize = sizeof(dcdb_cmd); cmd.mu_buf = &dcdb_cmd; cmd.mu_command[0] = MLX_CMD_DIRECT_CDB; /* build the DCDB */ bzero(&dcdb_cmd, sizeof(dcdb_cmd)); dcdb_cmd.dcdb.dcdb_channel = channel; dcdb_cmd.dcdb.dcdb_target = target; dcdb_cmd.dcdb.dcdb_flags = MLX_DCDB_DATA_IN | MLX_DCDB_TIMEOUT_10S; dcdb_cmd.dcdb.dcdb_datasize = SHORT_INQUIRY_LENGTH; dcdb_cmd.dcdb.dcdb_cdb_length = 6; dcdb_cmd.dcdb.dcdb_sense_length = SSD_FULL_SIZE; /* build the cdb */ inq_cmd->opcode = INQUIRY; scsi_ulto2b(SHORT_INQUIRY_LENGTH, inq_cmd->length); /* hand it off for processing */ mlx_perform(unit, mlx_command, &cmd); if (cmd.mu_status == 0) { *vendor = &dcdb_cmd.d.inq.vendor[0]; *device = &dcdb_cmd.d.inq.product[0]; *revision = &dcdb_cmd.d.inq.revision[0]; } return(cmd.mu_status); }
void ctl_scsi_read_write(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int read_op, uint8_t byte2, int minimum_cdb_size, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; /* * Pick out the smallest CDB that will hold the user's request. * minimum_cdb_size allows cranking the CDB size up, even for * requests that would not normally need a large CDB. This can be * useful for testing (e.g. to make sure READ_16 support works without * having an array larger than 2TB) and for compatibility -- e.g. * if your device doesn't support READ_6. (ATAPI drives don't.) */ if ((minimum_cdb_size < 10) && ((lba & 0x1fffff) == lba) && ((num_blocks & 0xff) == num_blocks) && (byte2 == 0)) { struct scsi_rw_6 *cdb; /* * Note that according to SBC-2, the target should return 256 * blocks if the transfer length in a READ(6) or WRITE(6) CDB * is set to 0. Since it's possible that some targets * won't do the right thing, we only send a READ(6) or * WRITE(6) for transfer sizes up to and including 255 blocks. */ cdb = (struct scsi_rw_6 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_6 : WRITE_6; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks & 0xff; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else if ((minimum_cdb_size < 12) && ((num_blocks & 0xffff) == num_blocks) && ((lba & 0xffffffff) == lba)) { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_10 : WRITE_10; cdb->byte2 = byte2; scsi_ulto4b(lba, cdb->addr); cdb->reserved = 0; scsi_ulto2b(num_blocks, cdb->length); cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else if ((minimum_cdb_size < 16) && ((num_blocks & 0xffffffff) == num_blocks) && ((lba & 0xffffffff) == lba)) { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_12 : WRITE_12; cdb->byte2 = byte2; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->reserved = 0; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_16 : WRITE_16; cdb->byte2 = byte2; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->reserved = 0; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } io->io_hdr.io_type = CTL_IO_SCSI; if (read_op != 0) io->io_hdr.flags = CTL_FLAG_DATA_IN; else io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; }
static void ahci_em_emulate_ses_on_led(device_t dev, union ccb *ccb) { struct ahci_enclosure *enc; struct ses_status_page *page; struct ses_status_array_dev_slot *ads, *ads0; struct ses_elm_desc_hdr *elmd; uint8_t *buf; int i; enc = device_get_softc(dev); buf = ccb->ataio.data_ptr; /* General request validation. */ if (ccb->ataio.cmd.command != ATA_SEP_ATTN || ccb->ataio.dxfer_len < ccb->ataio.cmd.sector_count * 4) { ccb->ccb_h.status = CAM_REQ_INVALID; goto out; } /* SEMB IDENTIFY */ if (ccb->ataio.cmd.features == 0xEC && ccb->ataio.cmd.sector_count >= 16) { bzero(buf, ccb->ataio.dxfer_len); buf[0] = 64; /* Valid bytes. */ buf[2] = 0x30; /* NAA Locally Assigned. */ strncpy(&buf[3], device_get_nameunit(dev), 7); strncpy(&buf[10], "AHCI ", SID_VENDOR_SIZE); strncpy(&buf[18], "SGPIO Enclosure ", SID_PRODUCT_SIZE); strncpy(&buf[34], "1.00", SID_REVISION_SIZE); strncpy(&buf[39], "0001", 4); strncpy(&buf[43], "S-E-S ", 6); strncpy(&buf[49], "2.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB RECEIVE DIAGNOSTIC RESULT (0) */ page = (struct ses_status_page *)buf; if (ccb->ataio.cmd.lba_low == 0x02 && ccb->ataio.cmd.features == 0x00 && ccb->ataio.cmd.sector_count >= 2) { bzero(buf, ccb->ataio.dxfer_len); page->hdr.page_code = 0; scsi_ulto2b(4, page->hdr.length); buf[4] = 0; buf[5] = 1; buf[6] = 2; buf[7] = 7; ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB RECEIVE DIAGNOSTIC RESULT (1) */ if (ccb->ataio.cmd.lba_low == 0x02 && ccb->ataio.cmd.features == 0x01 && ccb->ataio.cmd.sector_count >= 13) { struct ses_enc_desc *ed; struct ses_elm_type_desc *td; bzero(buf, ccb->ataio.dxfer_len); page->hdr.page_code = 0x01; scsi_ulto2b(4 + 4 + 36 + 4, page->hdr.length); ed = (struct ses_enc_desc *)&buf[8]; ed->byte0 = 0x11; ed->subenc_id = 0; ed->num_types = 1; ed->length = 36; strncpy(ed->vendor_id, "AHCI ", SID_VENDOR_SIZE); strncpy(ed->product_id, "SGPIO Enclosure ", SID_PRODUCT_SIZE); strncpy(ed->product_rev, " ", SID_REVISION_SIZE); td = (struct ses_elm_type_desc *)ses_enc_desc_next(ed); td->etype_elm_type = 0x17; td->etype_maxelt = enc->channels; td->etype_subenc = 0; td->etype_txt_len = 0; ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB RECEIVE DIAGNOSTIC RESULT (2) */ if (ccb->ataio.cmd.lba_low == 0x02 && ccb->ataio.cmd.features == 0x02 && ccb->ataio.cmd.sector_count >= (3 + enc->channels)) { bzero(buf, ccb->ataio.dxfer_len); page->hdr.page_code = 0x02; scsi_ulto2b(4 + 4 * (1 + enc->channels), page->hdr.length); for (i = 0; i < enc->channels; i++) { ads = &page->elements[i + 1].array_dev_slot; memcpy(ads, enc->status[i], 4); ads->common.bytes[0] |= (enc->ichannels & (1 << i)) ? SES_OBJSTAT_UNKNOWN : SES_OBJSTAT_NOTINSTALLED; } ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB SEND DIAGNOSTIC (2) */ if (ccb->ataio.cmd.lba_low == 0x82 && ccb->ataio.cmd.features == 0x02 && ccb->ataio.cmd.sector_count >= (3 + enc->channels)) { ads0 = &page->elements[0].array_dev_slot; for (i = 0; i < enc->channels; i++) { ads = &page->elements[i + 1].array_dev_slot; if (ads->common.bytes[0] & SESCTL_CSEL) { enc->status[i][0] = 0; enc->status[i][1] = ads->bytes[0] & 0x02; enc->status[i][2] = ads->bytes[1] & (0x80 | SESCTL_RQSID); enc->status[i][3] = ads->bytes[2] & SESCTL_RQSFLT; ahci_em_setleds(dev, i); } else if (ads0->common.bytes[0] & SESCTL_CSEL) { enc->status[i][0] = 0; enc->status[i][1] = ads0->bytes[0] & 0x02; enc->status[i][2] = ads0->bytes[1] & (0x80 | SESCTL_RQSID); enc->status[i][3] = ads0->bytes[2] & SESCTL_RQSFLT; ahci_em_setleds(dev, i); } } ccb->ccb_h.status = CAM_REQ_CMP; goto out; } /* SEMB RECEIVE DIAGNOSTIC RESULT (7) */ if (ccb->ataio.cmd.lba_low == 0x02 && ccb->ataio.cmd.features == 0x07 && ccb->ataio.cmd.sector_count >= (3 + 3 * enc->channels)) { bzero(buf, ccb->ataio.dxfer_len); page->hdr.page_code = 0x07; scsi_ulto2b(4 + 4 + 12 * enc->channels, page->hdr.length); for (i = 0; i < enc->channels; i++) { elmd = (struct ses_elm_desc_hdr *)&buf[8 + 4 + 12 * i]; scsi_ulto2b(8, elmd->length); snprintf((char *)(elmd + 1), 9, "SLOT %03d", i); } ccb->ccb_h.status = CAM_REQ_CMP; goto out; } ccb->ccb_h.status = CAM_REQ_INVALID; out: xpt_done(ccb); }