Exemplo n.º 1
0
static void
do_dataout(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len, size_t opt_data_len)
{
	char		*opt_data	= NULL;
	raw_io_t	*io;

	if ((io = calloc(1, sizeof (*io))) == NULL) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}
	if ((opt_data_len != 0) &&
	    ((opt_data = malloc(opt_data_len)) == NULL)) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}
	io->r_cdb	= cdb;
	io->r_cdb_len	= cdb_len;
	io->r_data	= opt_data;
	io->r_data_len	= opt_data_len;
	if (trans_rqst_dataout(cmd, opt_data, opt_data_len, 0, io,
	    raw_free_io) == False) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
}
Exemplo n.º 2
0
/*
 * []----
 * | raw_read_cmplt -- Once we have the data, need to send it along.
 * []----
 */
static void
raw_read_cmplt(emul_handle_t id)
{
	raw_io_t	*io		= (raw_io_t *)id;
	int		sense_len;
	uint64_t	err_blkno;
	t10_cmd_t	*cmd		= io->r_cmd;
	Boolean_t	last;

	if (io->r_aio.a_aio.aio_return != io->r_data_len) {
		err_blkno = io->r_lba + ((io->r_offset + 511) / 512);
		cmd->c_resid = (io->r_lba_cnt * 512) - io->r_offset;
		if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN)
			sense_len = INFORMATION_SENSE_DESCR;
		else
			sense_len = 0;
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, sense_len);
		spc_sense_info(cmd, err_blkno);
		trans_send_complete(cmd, STATUS_CHECK);
		raw_free_io(io);
		return;
	}

	last = ((io->r_offset + io->r_data_len) < (io->r_lba_cnt * 512LL)) ?
	    False : True;
	if (trans_send_datain(cmd, io->r_data, io->r_data_len,
	    io->r_offset, raw_free_io, last, io) == False) {
		raw_free_io(io);
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
}
Exemplo n.º 3
0
/*ARGSUSED*/
static void
raw_write_tape(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	size_t		request_len;
	size_t		xfer;
	raw_io_t	*io;

	request_len	= (cdb[2] << 16) | (cdb[3] << 8) | cdb[4];
	request_len	*= (cdb[1] & 0x1) ? 512 : 1;

	if ((io = calloc(1, sizeof (*io))) == NULL) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}
	if ((io->r_data = malloc(request_len)) == NULL) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
	io->r_data_len		= request_len;
	io->r_cmd		= cmd;

	xfer = min(T10_MAX_OUT(cmd), request_len);
	(void) trans_rqst_dataout(cmd, io->r_data, xfer, io->r_offset, io,
	    raw_free_io);
}
Exemplo n.º 4
0
static void
raw_msense(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	raw_io_t	*io;
	int		len;

	switch (cdb[0]) {
	case SCMD_MODE_SENSE:
		len = cdb[4];
		break;

	case SCMD_MODE_SENSE_G1:
		len = (cdb[7] << 8) | cdb[8];
		break;
	}

	if (((io = do_datain(cmd, cdb, CDB_GROUP0, len)) == NULL) ||
	    (io->r_status != STATUS_GOOD)) {
		if (io != NULL)
			raw_free_io(io);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}
	if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0,
	    raw_free_io, True, io) == False) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
}
Exemplo n.º 5
0
static void
raw_service_actiong4(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	raw_io_t	*io;
	uint32_t	len;
	struct scsi_capacity_16	cap16;
	raw_params_t		*r;

	if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	len = (cdb[10] << 24) | (cdb[11] << 16) | (cdb[12] << 8) | cdb[13];
	if (((io = do_datain(cmd, cdb, CDB_GROUP4, len)) == NULL) ||
	    (io->r_status != STATUS_GOOD)) {
		if (io != NULL)
			raw_free_io(io);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	bcopy(io->r_data, &cap16, sizeof (cap16));
	/*
	 * Currently there's a bug in ZFS which doesn't report a capacity
	 * for any of the volumes. This means that when using ZFS the
	 * administrator must supply the device size.
	 */
	if (cap16.sc_capacity != 0)
		r->r_size = cap16.sc_capacity;
	if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0,
	    raw_free_io, True, io) == False) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
}
Exemplo n.º 6
0
static void
raw_inquiry(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	raw_io_t		*io;
	uint32_t		len;
	struct scsi_inquiry	inq;
	raw_params_t		*r;

	if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	len = (cdb[3] << 8) | cdb[4];
	if (((io = do_datain(cmd, cdb, CDB_GROUP0, len)) == NULL) ||
	    (io->r_status != STATUS_GOOD)) {
		if (io != NULL)
			raw_free_io(io);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	if ((cdb[1] & 1) == 0) {
		bcopy(io->r_data, &inq, sizeof (inq));
		r->r_dtype = inq.inq_dtype;
	}
	if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0,
	    raw_free_io, True, io) == False) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
}
Exemplo n.º 7
0
static raw_io_t *
do_datain(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len, size_t data_len)
{
	raw_io_t	*io;

	if ((io = calloc(1, sizeof (*io))) == NULL) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		return (NULL);
	}

	io->r_cdb	= cdb;
	io->r_cdb_len	= cdb_len;
	io->r_data_len	= data_len;
	if ((data_len != 0) && ((io->r_data = malloc(data_len)) == NULL)) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		free(io);
		return (NULL);
	}
	(void) do_uscsi(cmd, io, data_len == 0 ? NoData : RawDataFromDevice);
	return (io);
}
Exemplo n.º 8
0
/*ARGSUSED*/
void
sbc_synccache(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	/*
	 * SBC-2 revision 16, section 5.18
	 * Reserve bit checks
	 */
	if ((cdb[1] & ~SBC_SYNC_CACHE_IMMED) || cdb[6] ||
	    SAM_CONTROL_BYTE_RESERVED(cdb[9])) {
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
	} else {
		/*
		 * SBC-3, revision 16, section 5.18
		 * An IMMED bit set to one specifies that the device server
		 * shall return status as soon as the CDB has been validated.
		 */
		if (cdb[1] & SBC_SYNC_CACHE_IMMED) {

			/*
			 * Immediately return a status of GOOD. If an error
			 * occurs with the fsync the next command will pick
			 * up an error.
			 */
			trans_send_complete(cmd, STATUS_GOOD);
			if (fsync(cmd->c_lu->l_common->l_fd) == -1) {
				cmd->c_lu->l_status	= KEY_HARDWARE_ERROR;
				cmd->c_lu->l_asc	= 0x00;
				cmd->c_lu->l_ascq	= 0x00;
			}
		} else {
			if (fsync(cmd->c_lu->l_common->l_fd) == -1) {
				spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
				trans_send_complete(cmd, STATUS_CHECK);
			} else
				trans_send_complete(cmd, STATUS_GOOD);
		}
	}
}
Exemplo n.º 9
0
/*ARGSUSED*/
void
sbc_service_actiong4(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	switch (cdb[1] & SPC_GROUP4_SERVICE_ACTION_MASK) {
	case SSVC_ACTION_READ_CAPACITY_G4:
		sbc_read_capacity16(cmd, cdb, cdb_len);
		break;
	default:
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, 0x20, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		break;
	}
}
Exemplo n.º 10
0
/*
 * []------------------------------------------------------------------[]
 * | SCSI Object-Based Storage Device Commands				|
 * | T10/1355-D								|
 * | The following functions implement the emulation of OSD type	|
 * | commands.								|
 * []------------------------------------------------------------------[]
 */
static void
osd_service_action(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	osd_generic_cdb_t	*o;
	uint16_t		service_action;

	/*
	 * debug only -- no need to drop core if someone doesn't play right.
	 */
	assert(cdb_len == sizeof (*o));
	if (cdb_len != sizeof (*o)) {
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, SPC_ASCQ_INVALID_CDB);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}
	o = (osd_generic_cdb_t *)cdb;
	service_action = o->ocdb_basic.b_service_action[0] << 8 |
	    o->ocdb_basic.b_service_action[1];

	queue_prt(mgmtq, Q_STE_NONIO,
	    "OSD%x  LUN%d service=0x%x, options=0x%x, specific_opts=0x%x,"
	    " fmt=0x%x", cmd->c_lu->l_targ->s_targ_num,
	    cmd->c_lu->l_common->l_num, service_action, o->ocdb_options,
	    o->ocdb_specific_opts, o->ocdb_fmt);

	switch (service_action) {
	case OSD_APPEND:
	case OSD_CREATE:
	case OSD_CREATE_AND_WRITE:
	case OSD_CREATE_COLLECTION:
	case OSD_CREATE_PARTITION:
	case OSD_FLUSH:
	case OSD_FLUSH_COLLECTION:
	case OSD_FLUSH_OSD:
	case OSD_FLUSH_PARTITION:
	case OSD_FORMAT_OSD:
	case OSD_GET_ATTR:
	case OSD_LIST:
		osd_list(cmd, cdb, cdb_len);
		break;

	case OSD_LIST_COLLECTION:
	case OSD_PERFORM_SCSI:
	case OSD_TASK_MGMT:
	default:
		spc_unsupported(cmd, cdb, cdb_len);
		break;
	}
}
Exemplo n.º 11
0
static void
raw_request_sense(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	raw_io_t	*io;

	if (((io = do_datain(cmd, cdb, CDB_GROUP0, cdb[4])) == NULL) ||
	    (io->r_status != STATUS_GOOD)) {
		if (io != NULL)
			raw_free_io(io);
		trans_send_complete(cmd, STATUS_CHECK);
	} else {
		if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0,
		    raw_free_io, True, io) == False) {
			spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
			trans_send_complete(cmd, STATUS_CHECK);
		}
	}
}
Exemplo n.º 12
0
static int
do_uscsi(t10_cmd_t *cmd, raw_io_t *io, raw_direction_t dir)
{
	struct uscsi_cmd	u;
	uchar_t			sense_buf[128];

	bzero(&u, sizeof (u));
	u.uscsi_cdb	= (caddr_t)io->r_cdb;
	u.uscsi_cdblen	= io->r_cdb_len;
	u.uscsi_bufaddr	= io->r_data;
	u.uscsi_buflen	= io->r_data_len;
	u.uscsi_flags	= ((dir == RawDataToDevice) ? USCSI_WRITE :
	    (dir == RawDataFromDevice) ? USCSI_READ : 0) | USCSI_RQENABLE;
	u.uscsi_rqbuf	= (char *)sense_buf;
	u.uscsi_rqlen	= sizeof (sense_buf);

	if ((ioctl(cmd->c_lu->l_common->l_fd, USCSICMD, &u) == 0) &&
	    (u.uscsi_status == 0)) {
		io->r_status = 0;
		return (0);
	}
	queue_prt(mgmtq, Q_STE_ERRS,
	    "RAW%d  LUN%d USCSICMD errno %d, cmd_status %d, rqstatus %d, "
	    "rqresid %d",
	    cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, errno,
	    u.uscsi_status, u.uscsi_rqstatus, u.uscsi_rqresid);

	if ((u.uscsi_rqlen - u.uscsi_rqresid) <
	    sizeof (struct scsi_extended_sense)) {
		queue_prt(mgmtq, Q_STE_ERRS,
		    "RAW%x  LUN%d -- No sense data, got=%d, needed=%d",
		    cmd->c_lu->l_targ->s_targ_num,
		    cmd->c_lu->l_common->l_num,
		    u.uscsi_rqlen - u.uscsi_rqresid,
		    sizeof (struct scsi_extended_sense));
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		io->r_status = STATUS_CHECK;
		return (STATUS_CHECK);
	} else {
		spc_sense_raw(cmd, sense_buf, u.uscsi_rqlen - u.uscsi_rqresid);
		io->r_status = u.uscsi_status;
		return (u.uscsi_status);
	}
}
Exemplo n.º 13
0
/*ARGSUSED*/
static void
sbc_reserve(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	disk_params_t	*p = (disk_params_t *)T10_PARAMS_AREA(cmd);
	t10_lu_impl_t	*lu;

	if (p == NULL)
		return;

	if (cdb[1] & 0xe0 || SAM_CONTROL_BYTE_RESERVED(cdb[5])) {
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	if ((p->d_reserve_owner != NULL) &&
	    (p->d_reserve_owner != cmd->c_lu)) {

		trans_send_complete(cmd, STATUS_RESERVATION_CONFLICT);
		return;

	} else if (p->d_reserve_owner == cmd->c_lu) {

		/*
		 * According SPC-2 revision 20, section 7.21.2
		 * It shall be permissible for an initiator to
		 * reserve a logic unit that is currently reserved
		 * by that initiator
		 */
		trans_send_complete(cmd, STATUS_GOOD);
	} else {

		lu = avl_first(&cmd->c_lu->l_common->l_all_open);
		do {
			if (lu != cmd->c_lu)
				lu->l_cmd = sbc_cmd_reserved;
			lu = AVL_NEXT(&cmd->c_lu->l_common->l_all_open, lu);
		} while (lu != NULL);
		p->d_reserve_owner = cmd->c_lu;
		trans_send_complete(cmd, STATUS_GOOD);
	}
}
Exemplo n.º 14
0
static void
raw_report_tpgs(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	raw_io_t	*io;
	uint32_t	len;

	len = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
	if (((io = do_datain(cmd, cdb, CDB_GROUP5, len)) == NULL) ||
	    (io->r_status != STATUS_GOOD)) {
		if (io != NULL)
			raw_free_io(io);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}
	if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0,
	    raw_free_io, True, io) == False) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
}
Exemplo n.º 15
0
static void
raw_read_limits(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	raw_io_t	*io;

	/*
	 * spec defines this command to return 6 bytes of data
	 */
	if (((io = do_datain(cmd, cdb, CDB_GROUP0, 6)) == NULL) ||
	    (io->r_status != STATUS_GOOD)) {
		if (io != NULL)
			raw_free_io(io);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}
	if (trans_send_datain(cmd, io->r_data, io->r_data_len, 0,
	    raw_free_io, True, io) == False) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
}
Exemplo n.º 16
0
/*ARGSUSED*/
static void
raw_read_tape(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	size_t		req_len;
	size_t		xfer;
	off_t		offset		= 0;
	raw_io_t	*io;
	Boolean_t	last;
	t10_cmd_t	*c;

	req_len = (cdb[2] << 16) | (cdb[3] << 8) | cdb[4];
	if (cdb[1] & 0x1)
		req_len *= 512;

	if (((io = do_datain(cmd, cdb, CDB_GROUP0, req_len)) == NULL) ||
	    (io->r_status != STATUS_GOOD)) {
		if (io != NULL)
			raw_free_io(io);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	while (offset < io->r_data_len) {
		xfer = min(T10_MAX_OUT(cmd), io->r_data_len - offset);
		last = ((offset + xfer) >= io->r_data_len) ? True : False;
		if (last == True)
			c = cmd;
		else
			c = trans_cmd_dup(cmd);

		if (trans_send_datain(c, io->r_data + offset,
		    xfer, offset, raw_free_io, last, io) == False) {
			raw_free_io(io);
			spc_sense_create(c, KEY_HARDWARE_ERROR, 0);
			trans_send_complete(c, STATUS_CHECK);
			return;
		}
		offset += xfer;
	}
}
Exemplo n.º 17
0
/*ARGSUSED*/
static void
sbc_release(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	disk_params_t	*p = (disk_params_t *)T10_PARAMS_AREA(cmd);
	t10_lu_impl_t	*lu;

	if (p == NULL)
		return;

	if (cdb[1] & 0xe0 || cdb[3] || cdb[4] ||
	    SAM_CONTROL_BYTE_RESERVED(cdb[5])) {
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	if (p->d_reserve_owner == NULL) {

		/*
		 * If nobody is the owner this command is successful.
		 */
		trans_send_complete(cmd, STATUS_GOOD);
		return;
	}

	/*
	 * At this point the only way to get in here is to be the owner
	 * of the reservation.
	 */
	lu = avl_first(&cmd->c_lu->l_common->l_all_open);
	do {
		lu->l_cmd = sbc_cmd;
		lu = AVL_NEXT(&cmd->c_lu->l_common->l_all_open, lu);
	} while (lu != NULL);
	p->d_reserve_owner = NULL;
	trans_send_complete(cmd, STATUS_GOOD);
}
Exemplo n.º 18
0
/*ARGSUSED*/
void
sbc_write_data(t10_cmd_t *cmd, emul_handle_t id, size_t offset, char *data,
    size_t data_len)
{
	disk_io_t	*io = (disk_io_t *)id;
	disk_params_t	*d;

	if (cmd->c_lu->l_common->l_mmap == MAP_FAILED) {
		trans_aiowrite(cmd, data, data_len, (io->da_lba * 512) +
		    (off_t)io->da_offset, (aio_result_t *)io);
	} else {
		if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
			return;

		if (d->d_fast_write == False) {

			/*
			 * We only need to worry about sync'ing the blocks
			 * in the mmap case because if the fast cache isn't
			 * enabled for AIO the file will be opened with F_SYNC
			 * which performs the correct action.
			 */
			if (fsync(cmd->c_lu->l_common->l_fd) == -1) {
				spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
				trans_send_complete(cmd, STATUS_CHECK);
				return;
			}
		}

		/*
		 * Since the data has already been transfered from the
		 * transport to the mmap area we just need to call
		 * the complete routine.
		 */
		sbc_write_cmplt(id);
	}
}
Exemplo n.º 19
0
/*ARGSUSED*/
static void
sbc_read_capacity16(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	uint64_t		capacity,
				lba;
	int			rep_size;	/* response data size */
	struct scsi_capacity_16	*cap16;
	disk_params_t		*d;
	disk_io_t		*io;

	if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	capacity = d->d_size;
	/*
	 * READ_CAPACITY(16) command
	 */
	rep_size = cdb[10] << 24 | cdb[11] << 16 | cdb[12] << 8 | cdb[13];
	if (rep_size == 0) {

		/*
		 * A zero length field means we're done.
		 */
		trans_send_complete(cmd, STATUS_GOOD);
		return;
	}
	rep_size = MIN(rep_size, sizeof (*cap16));

	/*
	 * Reserve bit checks.
	 */
	if ((cdb[1] & ~SPC_GROUP4_SERVICE_ACTION_MASK) ||
	    (cdb[14] & ~SBC_CAPACITY_PMI) ||
	    SAM_CONTROL_BYTE_RESERVED(cdb[15])) {
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	lba = (uint64_t)cdb[2] << 56 | (uint64_t)cdb[3] << 48 |
		(uint64_t)cdb[4] << 40 | (uint64_t)cdb[5] << 32 |
		(uint64_t)cdb[6] << 24 | (uint64_t)cdb[7] << 16 |
		(uint64_t)cdb[8] << 8 | (uint64_t)cdb[9];

	io = sbc_io_alloc(cmd);

	/*
	 * We'll malloc enough space for the structure so that we can
	 * set the values as we place. However, we'll set the transfer
	 * length to the minimum of the requested size and our structure.
	 * This is per SBC-2 revision 16, section 5.11.1 regarding
	 * ALLOCATION LENGTH.
	 */
	if ((cap16 = (struct scsi_capacity_16 *)calloc(1, sizeof (*cap16))) ==
	    NULL) {
		trans_send_complete(cmd, STATUS_BUSY);
		return;
	}
	io->da_data		= (char *)cap16;
	io->da_data_len		= sizeof (*cap16);
	io->da_data_alloc	= True;
	io->da_clear_overlap	= False;

	if (cdb[14] & SBC_CAPACITY_PMI) {
		if (lba >= capacity)
			cap16->sc_capacity = htonll(0xffffffffffffffffULL);
		else
			cap16->sc_capacity = htonll(capacity - 1);
	} else {
		cap16->sc_capacity	= htonll(capacity - 1);
	}
	cap16->sc_lbasize	= htonl(d->d_bytes_sect);

	if (trans_send_datain(cmd, io->da_data, io->da_data_len, 0,
	    sbc_io_free, True, io) == False) {
		trans_send_complete(cmd, STATUS_BUSY);
	}
}
Exemplo n.º 20
0
/*ARGSUSED*/
static void
raw_write(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	/*LINTED*/
	union scsi_cdb	*cdbp		= (union scsi_cdb *)cdb;
	off_t		addr;
	uint64_t	err_blkno;
	uint32_t	cnt;
	uchar_t		addl_sense_len;
	char		debug[80]; /* debug */
	raw_params_t	*r;
	raw_io_t	*io;
	size_t		max_out;

	if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	if (r->r_dtype == DTYPE_SEQUENTIAL) {
		raw_write_tape(cmd, cdb, cdb_len);
		return;
	}

	switch (cdb[0]) {
	case SCMD_WRITE:
		/*
		 * SBC-2 revision 16, section 5.24
		 * Reserve bit checks.
		 */
		if ((cdb[1] & 0xe0) || (cdb[5] & 0x38)) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, 0x24, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}
		addr = (off_t)cdbp->g0_addr2 << 16 |
		    (off_t)cdbp->g0_addr1 << 8 | (off_t)cdbp->g0_addr0;
		cnt = cdbp->g0_count0;
		/*
		 * SBC-2 Revision 16/Section 5.24 WRITE(6)
		 * A TRANSFER LENGHT of 0 indicates that 256 logical blocks
		 * shall be written.
		 */
		if (cnt == 0)
			cnt = 256;
		break;

	case SCMD_WRITE_G1:
		/*
		 * SBC-2 revision 16, section 5.25
		 * Reserve bit checks.
		 */
		if ((cdb[1] & 0x6) || cdb[6] || (cdb[9] & 0x38)) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, 0x24, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}
		addr = (off_t)cdbp->g1_addr3 << 24 |
		    (off_t)cdbp->g1_addr2 << 16 |
		    (off_t)cdbp->g1_addr1 << 8 |
		    (off_t)cdbp->g1_addr0;
		cnt = cdbp->g1_count1 << 8 | cdbp->g1_count0;
		break;

	case SCMD_WRITE_G4:
		/*
		 * SBC-2 revision 16, section 5.27
		 * Reserve bit checks.
		 */
		if ((cdb[1] & 0x6) || cdb[14] || (cdb[15] & 0x38)) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, 0x24, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}
		addr = (off_t)(cdbp->g4_addr3 & 0xff) << 56 |
		    (off_t)(cdbp->g4_addr2 & 0xff) << 48 |
		    (off_t)(cdbp->g4_addr1 & 0xff) << 40 |
		    (off_t)(cdbp->g4_addr0 & 0xff) << 32 |
		    (off_t)(cdbp->g4_addtl_cdb_data3 & 0xff) << 24 |
		    (off_t)(cdbp->g4_addtl_cdb_data2 & 0xff) << 16 |
		    (off_t)(cdbp->g4_addtl_cdb_data1 & 0xff) << 8 |
		    (off_t)(cdbp->g4_addtl_cdb_data0 & 0xff);
		cnt = cdbp->g4_count3 << 24 | cdbp->g4_count2 << 16 |
		    cdbp->g4_count1 << 8 | cdbp->g4_count0;
		break;

	default:
		queue_str(mgmtq, Q_STE_ERRS, msg_log,
		    "Unprocessed WRITE type");
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, 0x24, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;

	}

	if ((addr < 0) || ((addr + cnt) > r->r_size)) {

		/*
		 * request exceed the capacity of disk
		 * set error block number to capacity + 1
		 */
		err_blkno = r->r_size + 1;

		/*
		 * XXX: What's SBC-2 say about ASC/ASCQ here. Solaris
		 * doesn't care about these values when key is set
		 * to KEY_ILLEGAL_REQUEST.
		 */
		if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN)
			addl_sense_len = INFORMATION_SENSE_DESCR;
		else
			addl_sense_len = 0;

		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, addl_sense_len);
		spc_sense_info(cmd, err_blkno);
		spc_sense_ascq(cmd, 0x21, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);

		(void) snprintf(debug, sizeof (debug),
		    "RAW%d  WRITE Illegal sector (0x%llx + 0x%x) > 0x%llx",
		    cmd->c_lu->l_common->l_num, addr, cnt, r->r_size);
		queue_str(mgmtq, Q_STE_ERRS, msg_log, debug);
		return;
	}

	if (cnt == 0) {
		trans_send_complete(cmd, STATUS_GOOD);
		return;
	}

	io = (raw_io_t *)cmd->c_emul_id;
	if (io == NULL) {
		if ((io = (raw_io_t *)calloc(1, sizeof (*io))) == NULL) {
			spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}
		io->r_lba		= addr;
		io->r_lba_cnt		= cnt;
		io->r_cmd		= cmd;
		io->r_aio.a_aio_cmplt	= raw_write_cmplt;
		io->r_aio.a_id		= io;

		/*
		 * Only update the statistics the first time through
		 * for this particular command. If the requested transfer
		 * is larger than the transport can handle this routine
		 * will be called many times.
		 */
		cmd->c_lu->l_cmds_write++;
		cmd->c_lu->l_sects_write += cnt;
	}

	/*
	 * If a transport sets the maximum output value to zero we'll
	 * just request the entire amount. Otherwise, transfer no more
	 * than the maximum output or the reminder, whichever is less.
	 */
	max_out = cmd->c_lu->l_targ->s_maxout;
	io->r_data_len = max_out ? MIN(max_out,
	    (cnt * 512) - io->r_offset) : (cnt * 512);

#ifdef FULL_DEBUG
	(void) snprintf(debug, sizeof (debug),
	    "RAW%d  blk 0x%llx, cnt %d, offset 0x%llx, size %d",
	    cmd->c_lu->l_common->l_num, addr, cnt, io->r_offset,
	    io->r_data_len);
	queue_str(mgmtq, Q_STE_IO, msg_log, debug);
#endif

	if ((io->r_data = (char *)malloc(io->r_data_len)) == NULL) {

		/*
		 * NOTE: May need a different ASC code
		 */
		err_blkno = addr + ((io->r_offset + 511) / 512);
		if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN)
			addl_sense_len = INFORMATION_SENSE_DESCR;
		else
			addl_sense_len = 0;

		spc_sense_create(cmd, KEY_HARDWARE_ERROR, addl_sense_len);
		spc_sense_info(cmd, err_blkno);
		trans_send_complete(cmd, STATUS_CHECK);
		return;

	}
	if (trans_rqst_dataout(cmd, io->r_data, io->r_data_len, io->r_offset,
	    io, raw_free_io) == False) {
		spc_sense_create(cmd, KEY_HARDWARE_ERROR, 0);
		trans_send_complete(cmd, STATUS_CHECK);
	}
}
Exemplo n.º 21
0
/*ARGSUSED*/
static void
raw_read(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	/*LINTED*/
	union scsi_cdb	*u		= (union scsi_cdb *)cdb;
	diskaddr_t	addr;
	off_t		offset		= 0;
	uint32_t	cnt;
	uint32_t	min;
	raw_io_t	*io;
	uint64_t	err_blkno;
	int		sense_len;
	char		debug[80];
	raw_params_t	*r;
	uchar_t		addl_sense_len;
	t10_cmd_t	*c;

	if ((r = (raw_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	if (r->r_dtype == DTYPE_SEQUENTIAL) {
		raw_read_tape(cmd, cdb, cdb_len);
		return;
	}

	switch (u->scc_cmd) {
	case SCMD_READ:
		/*
		 * SBC-2 Revision 16, section 5.5
		 * Reserve bit checks
		 */
		if ((cdb[1] & 0xe0) || (cdb[5] & 0x38)) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, 0x24, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}

		addr = (diskaddr_t)(uint32_t)GETG0ADDR(u);
		cnt = GETG0COUNT(u);

		/*
		 * SBC-2 Revision 16
		 * Section: 5.5 READ(6) command
		 *	A TRANSFER LENGTH field set to zero specifies
		 *	that 256 logical blocks shall be read.
		 */
		if (cnt == 0)
			cnt = 256;
		break;

	case SCMD_READ_G1:
		/*
		 * SBC-2 Revision 16, section 5.6
		 * Reserve bit checks.
		 */
		if ((cdb[1] & 6) || cdb[6] || (cdb[9] & 0x38)) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, 0x24, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}

		addr = (diskaddr_t)(uint32_t)GETG1ADDR(u);
		cnt = GETG1COUNT(u);
		break;

	case SCMD_READ_G4:
		/*
		 * SBC-2 Revision 16, section 5.8
		 * Reserve bit checks
		 */
		if ((cdb[1] & 0x6) || (cdb[10] & 6) || cdb[14] ||
		    (cdb[15] & 0x38)) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, 0x24, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}

		addr = GETG4LONGADDR(u);
		cnt = GETG4COUNT(u);
		break;

	default:
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	if ((addr + cnt) > r->r_size) {

		/*
		 * request exceed the capacity of disk
		 * set error block number to capacity + 1
		 */
		err_blkno = r->r_size + 1;

		/*
		 * XXX: What's SBC-2 say about ASC/ASCQ here. Solaris
		 * doesn't care about these values when key is set
		 * to KEY_ILLEGAL_REQUEST.
		 */
		if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN)
			addl_sense_len = INFORMATION_SENSE_DESCR;
		else
			addl_sense_len = 0;

		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, addl_sense_len);
		spc_sense_info(cmd, err_blkno);
		spc_sense_ascq(cmd, 0x21, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);

		(void) snprintf(debug, sizeof (debug),
		    "RAW%d  READ Illegal sector (0x%llx + 0x%x) > 0x%llx",
		    cmd->c_lu->l_common->l_num, addr, cnt, r->r_size);
		queue_str(mgmtq, Q_STE_ERRS, msg_log, debug);
		return;
	}

	cmd->c_lu->l_cmds_read++;
	cmd->c_lu->l_sects_read += cnt;

	if (cnt == 0) {
		trans_send_complete(cmd, STATUS_GOOD);
		return;
	}

	do {
		min = MIN((cnt * 512) - offset, T10_MAX_OUT(cmd));
		if ((offset + min) < (cnt * 512LL))
			c = trans_cmd_dup(cmd);
		else
			c = cmd;
		if ((io = (raw_io_t *)calloc(1, sizeof (*io))) == NULL) {

			/*
			 * We're pretty much dead in the water. If we can't
			 * allocate memory. It's unlikey we'll be able to
			 * allocate a sense buffer or queue the command
			 * up to be sent back to the transport for delivery.
			 */
			spc_sense_create(c, KEY_HARDWARE_ERROR, 0);
			trans_send_complete(c, STATUS_CHECK);
			return;
		}

		io->r_cmd		= c;
		io->r_lba		= addr;
		io->r_lba_cnt		= cnt;
		io->r_offset		= offset;
		io->r_data_len		= min;
		io->r_aio.a_aio_cmplt	= raw_read_cmplt;
		io->r_aio.a_id		= io;

#ifdef FULL_DEBUG
		(void) snprintf(debug, sizeof (debug),
		    "RAW%d  blk 0x%llx, cnt %d, offset 0x%llx, size %d",
		    c->c_lu->l_common->l_num, addr, cnt, io->r_offset, min);
		queue_str(mgmtq, Q_STE_IO, msg_log, debug);
#endif
		if ((io->r_data = (char *)malloc(min)) == NULL) {
			err_blkno = addr + ((offset + 511) / 512);
			if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN)
				sense_len = INFORMATION_SENSE_DESCR;
			else
				sense_len = 0;
			spc_sense_create(c, KEY_HARDWARE_ERROR,
			    sense_len);
			spc_sense_info(c, err_blkno);
			trans_send_complete(c, STATUS_CHECK);
			return;
		}
		trans_aioread(c, io->r_data, min, (addr * 512LL) +
		    (off_t)io->r_offset, &io->r_aio);
		offset += min;
	} while (offset < (off_t)(cnt * 512));
}
Exemplo n.º 22
0
/*ARGSUSED*/
static void
sbc_read(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	/*LINTED*/
	union scsi_cdb	*u		= (union scsi_cdb *)cdb;
	diskaddr_t	addr;
	off_t		offset		= 0;
	uint32_t	cnt,
			min;
	disk_io_t	*io;
	void		*mmap_data	= T10_MMAP_AREA(cmd);
	uint64_t	err_blkno;
	disk_params_t	*d;
	uchar_t		addl_sense_len;

	if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	switch (u->scc_cmd) {
	case SCMD_READ:
		/*
		 * SBC-2 Revision 16, section 5.5
		 * Reserve bit checks
		 */
		if ((cdb[1] & 0xe0) || SAM_CONTROL_BYTE_RESERVED(cdb[5])) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}

		addr = (diskaddr_t)(uint32_t)GETG0ADDR(u);
		cnt = GETG0COUNT(u);

		/*
		 * SBC-2 Revision 16
		 * Section: 5.5 READ(6) command
		 *	A TRANSFER LENGTH field set to zero specifies
		 *	that 256 logical blocks shall be read.
		 */
		if (cnt == 0)
			cnt = 256;
		break;

	case SCMD_READ_G1:
		/*
		 * SBC-2 Revision 16, section 5.6
		 * Reserve bit checks.
		 */
		if ((cdb[1] & 6) || cdb[6] ||
		    SAM_CONTROL_BYTE_RESERVED(cdb[9])) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}

		addr = (diskaddr_t)(uint32_t)GETG1ADDR(u);
		cnt = GETG1COUNT(u);
		break;

	case SCMD_READ_G4:
		/*
		 * SBC-2 Revision 16, section 5.8
		 * Reserve bit checks
		 */
		if ((cdb[1] & 0x6) || cdb[14] ||
		    SAM_CONTROL_BYTE_RESERVED(cdb[15])) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}

		addr = GETG4LONGADDR(u);
		cnt = GETG4COUNT(u);
		break;

	default:
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	if ((addr + cnt) > d->d_size) {

		if (addr > d->d_size)
			err_blkno = addr;
		else
			err_blkno = d->d_size;

		/*
		 * XXX: What's SBC-2 say about ASC/ASCQ here. Solaris
		 * doesn't care about these values when key is set
		 * to KEY_ILLEGAL_REQUEST.
		 */
		if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN)
			addl_sense_len = INFORMATION_SENSE_DESCR;
		else
			addl_sense_len = 0;

		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, addl_sense_len);
		spc_sense_info(cmd, err_blkno);
		spc_sense_ascq(cmd, 0x21, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);

		queue_prt(mgmtq, Q_STE_ERRS,
		    "SBC%x  LUN%d READ Illegal sector "
		    "(0x%llx + 0x%x) > 0x%ullx", cmd->c_lu->l_targ->s_targ_num,
		    cmd->c_lu->l_common->l_num, addr, cnt, d->d_size);
		return;
	}

	cmd->c_lu->l_cmds_read++;
	cmd->c_lu->l_sects_read += cnt;

	if (cnt == 0) {
		trans_send_complete(cmd, STATUS_GOOD);
		return;
	}

	do {
		io = sbc_io_alloc(cmd);
		min = MIN((cnt * 512) - offset, T10_MAX_OUT(cmd));

		io->da_lba	= addr;
		io->da_lba_cnt	= cnt;
		io->da_offset	= offset;
		io->da_data_len	= min;

#ifdef FULL_DEBUG
		queue_prt(mgmtq, Q_STE_IO,
		    "SBC%x  LUN%d blk 0x%llx, cnt %d, offset 0x%llx, size %d",
		    cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num,
		    addr, cnt, io->da_offset, min);
#endif
		if (mmap_data != MAP_FAILED) {

			io->da_clear_overlap		= True;
			io->da_data_alloc		= False;
			io->da_aio.a_aio.aio_return	= min;
			io->da_data = (char *)mmap_data + (addr * 512LL) +
			    io->da_offset;
			sbc_overlap_store(io);
			sbc_read_cmplt((emul_handle_t)io);

		} else {
			if ((io->da_data = (char *)malloc(min)) == NULL) {
				trans_send_complete(cmd, STATUS_BUSY);
				return;
			}
			io->da_clear_overlap	= False;
			io->da_data_alloc	= True;
			io->da_aio.a_aio_cmplt	= sbc_read_cmplt;
			io->da_aio.a_id		= io;
			trans_aioread(cmd, io->da_data, min, (addr * 512LL) +
			    (off_t)io->da_offset, (aio_result_t *)io);
		}
		offset += min;
	} while (offset < (off_t)(cnt * 512));
}
Exemplo n.º 23
0
/*ARGSUSED*/
static void
sbc_write(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	union scsi_cdb	*u;
	diskaddr_t	addr;
	uint64_t	err_blkno;
	uint32_t	cnt;
	uchar_t		addl_sense_len;
	disk_params_t	*d;
	disk_io_t	*io;
	size_t		max_out;
	void		*mmap_area;

	if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	/*LINTED*/
	u = (union scsi_cdb *)cdb;

	switch (u->scc_cmd) {
	case SCMD_WRITE:
		/*
		 * SBC-2 revision 16, section 5.24
		 * Reserve bit checks.
		 */
		if ((cdb[1] & 0xe0) || SAM_CONTROL_BYTE_RESERVED(cdb[5])) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}
		addr = (diskaddr_t)(uint32_t)GETG0ADDR(u);
		cnt = GETG0COUNT(u);
		/*
		 * SBC-2 Revision 16/Section 5.24 WRITE(6)
		 * A TRANSFER LENGHT of 0 indicates that 256 logical blocks
		 * shall be written.
		 */
		if (cnt == 0)
			cnt = 256;
		break;

	case SCMD_WRITE_G1:
		/*
		 * SBC-2 revision 16, section 5.25
		 * Reserve bit checks.
		 */
		if ((cdb[1] & 0x6) || cdb[6] ||
		    SAM_CONTROL_BYTE_RESERVED(cdb[9])) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}
		addr = (diskaddr_t)(uint32_t)GETG1ADDR(u);
		cnt = GETG1COUNT(u);
		break;

	case SCMD_WRITE_G4:
		/*
		 * SBC-2 revision 16, section 5.27
		 * Reserve bit checks.
		 */
		if ((cdb[1] & 0x6) || cdb[14] ||
		    SAM_CONTROL_BYTE_RESERVED(cdb[15])) {
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}
		addr = (diskaddr_t)GETG4LONGADDR(u);
		cnt = GETG4COUNT(u);
		break;

	default:
		queue_prt(mgmtq, Q_STE_ERRS, "Unprocessed WRITE type");
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;

	}

	if ((addr + cnt) > d->d_size) {

		if (addr > d->d_size)
			err_blkno = addr;
		else
			err_blkno = d->d_size;

		/*
		 * XXX: What's SBC-2 say about ASC/ASCQ here. Solaris
		 * doesn't care about these values when key is set
		 * to KEY_ILLEGAL_REQUEST.
		 */
		if (err_blkno > FIXED_SENSE_ADDL_INFO_LEN)
			addl_sense_len = INFORMATION_SENSE_DESCR;
		else
			addl_sense_len = 0;

		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, addl_sense_len);
		spc_sense_info(cmd, err_blkno);
		spc_sense_ascq(cmd, 0x21, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);

		queue_prt(mgmtq, Q_STE_ERRS,
		    "SBC%x  LUN%d WRITE Illegal sector "
		    "(0x%llx + 0x%x) > 0x%ullx", cmd->c_lu->l_targ->s_targ_num,
		    cmd->c_lu->l_common->l_num, addr, cnt, d->d_size);
		return;
	}

	if (cnt == 0) {
		queue_prt(mgmtq, Q_STE_NONIO,
		    "SBC%x  LUN%d WRITE zero block count for addr 0x%x",
		    cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num,
		    addr);
		trans_send_complete(cmd, STATUS_GOOD);
		return;
	}

	io = (disk_io_t *)cmd->c_emul_id;
	if (io == NULL) {
		io = sbc_io_alloc(cmd);
		io->da_lba		= addr;
		io->da_lba_cnt		= cnt;
		io->da_clear_overlap	= False;
		io->da_aio.a_aio_cmplt	= sbc_write_cmplt;
		io->da_aio.a_id		= io;

		/*
		 * Only update the statistics the first time through
		 * for this particular command. If the requested transfer
		 * is larger than the transport can handle this routine
		 * will be called many times.
		 */
		cmd->c_lu->l_cmds_write++;
		cmd->c_lu->l_sects_write += cnt;
	}

#ifdef FULL_DEBUG
	queue_prt(mgmtq, Q_STE_IO,
	    "SBC%x  LUN%d blk 0x%llx, cnt %d, offset 0x%llx, size %d",
	    cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num, addr,
	    cnt, io->da_offset, io->da_data_len);
#endif

	/*
	 * If a transport sets the maximum output value to zero we'll
	 * just request the entire amount. Otherwise, transfer no more
	 * than the maximum output or the reminder, whichever is less.
	 */
	max_out = cmd->c_lu->l_targ->s_maxout;
	io->da_data_len = max_out ? MIN(max_out,
	    (cnt * 512) - io->da_offset) : (cnt * 512);

	mmap_area = T10_MMAP_AREA(cmd);
	if (mmap_area != MAP_FAILED) {

		io->da_data_alloc	= False;
		io->da_data		= (char *)mmap_area + (addr * 512LL) +
		    io->da_offset;
		sbc_overlap_check(io);

	} else if ((io->da_data = (char *)malloc(io->da_data_len)) == NULL) {

		trans_send_complete(cmd, STATUS_BUSY);
		return;

	} else {

		io->da_data_alloc	= True;
	}
	if (trans_rqst_dataout(cmd, io->da_data, io->da_data_len,
	    io->da_offset, io) == False) {
		trans_send_complete(cmd, STATUS_BUSY);
	}
}
Exemplo n.º 24
0
/*ARGSUSED*/
void
sbc_startstop(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	/*
	 * SBC-2 revision 16, section 5.17
	 * Reserve bit checks
	 */
	if ((cdb[1] & 0xfe) || cdb[2] || cdb[3] ||
	    (cdb[4] & ~(SBC_PWR_MASK|SBC_PWR_LOEJ|SBC_PWR_START)) ||
	    SAM_CONTROL_BYTE_RESERVED(cdb[5])) {
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	/*
	 * More reserve bit checks
	 */
	switch ((cdb[4] & SBC_PWR_MASK) >> SBC_PWR_SHFT) {
		case SBC_PWR_START_VALID:
			/*
			 * It's an error to ask that the media be ejected.
			 *
			 * NOTE: Look for method to pass the START bit
			 * along to underlying storage. If we're asked to
			 * stop the drive there's not much that we can do
			 * for the virtual storage, but maybe everything else
			 * has been requested to stop as well.
			 */
			if (cdb[4] & SBC_PWR_LOEJ) {
				goto send_error;
			}
			break;

		case SBC_PWR_ACTIVE:
		case SBC_PWR_IDLE:
		case SBC_PWR_STANDBY:
		case SBC_PWR_OBSOLETE:
			break;

		case SBC_PWR_LU_CONTROL:
		case SBC_PWR_FORCE_IDLE_0:
		case SBC_PWR_FORCE_STANDBY_0:
			break;

		default:
send_error:
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
	}

	if ((cdb[1] & 1) == 0) {
		/*
		 * Immediate bit is not set, so go ahead a flush things.
		 */
		if (fsync(cmd->c_lu->l_common->l_fd) != 0) {
			spc_sense_create(cmd, KEY_MEDIUM_ERROR, 0);
			trans_send_complete(cmd, STATUS_CHECK);
			return;
		}
	}
	trans_send_complete(cmd, STATUS_GOOD);
}
Exemplo n.º 25
0
/*ARGSUSED*/
void
sbc_msense(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	struct mode_header	*mode_hdr;
	char			*np;
	disk_params_t		*d;
	disk_io_t		*io;

	if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	/*
	 * SPC-3 Revision 21c section 6.8
	 * Reserve bit checks
	 */
	if ((cdb[1] & ~SPC_MODE_SENSE_DBD) ||
	    SAM_CONTROL_BYTE_RESERVED(cdb[5])) {
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	/*
	 * Zero length causes a simple ack to occur.
	 */
	if (cdb[4] == 0) {
		trans_send_complete(cmd, STATUS_GOOD);
		return;
	}

	io = sbc_io_alloc(cmd);

	/*
	 * Make sure that we have enough room in the data buffer. We'll
	 * only send back the amount requested though
	 */
	io->da_data_len = MAX(cdb[4], sizeof (struct mode_format) +
	    sizeof (struct mode_geometry) +
	    sizeof (struct mode_control_scsi3) +
	    sizeof (struct mode_cache_scsi3) +
	    sizeof (struct mode_info_ctrl) + (MODE_BLK_DESC_LENGTH * 5));
	if ((io->da_data = (char *)calloc(1, io->da_data_len)) == NULL) {
		sbc_io_free(io);
		trans_send_complete(cmd, STATUS_BUSY);
		return;
	}
	io->da_clear_overlap	= False;
	io->da_data_alloc	= True;
	mode_hdr		= (struct mode_header *)io->da_data;

	switch (cdb[2]) {
	case MODE_SENSE_PAGE3_CODE:
		if ((d->d_heads == 0) && (d->d_cyl == 0) && (d->d_spt == 0)) {
			sbc_io_free(io);
			spc_unsupported(cmd, cdb, cdb_len);
			return;
		}
		mode_hdr->length	= sizeof (struct mode_format);
		mode_hdr->bdesc_length	= MODE_BLK_DESC_LENGTH;
		(void) sense_page3(d,
		    io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length);

		break;

	case MODE_SENSE_PAGE4_CODE:
		if ((d->d_heads == 0) && (d->d_cyl == 0) && (d->d_spt == 0)) {
			sbc_io_free(io);
			spc_unsupported(cmd, cdb, cdb_len);
			return;
		}
		mode_hdr->length	= sizeof (struct mode_geometry);
		mode_hdr->bdesc_length	= MODE_BLK_DESC_LENGTH;
		(void) sense_page4(d,
		    io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length);
		break;

	case MODE_SENSE_CACHE:
		mode_hdr->length	= sizeof (struct mode_cache_scsi3);
		mode_hdr->bdesc_length	= MODE_BLK_DESC_LENGTH;
		(void) sense_cache(d,
		    io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length);
		break;

	case MODE_SENSE_CONTROL:
		mode_hdr->length	= sizeof (struct mode_control_scsi3);
		mode_hdr->bdesc_length	= MODE_BLK_DESC_LENGTH;
		(void) sense_mode_control(cmd->c_lu,
		    io->da_data + sizeof (*mode_hdr) + mode_hdr->bdesc_length);
		break;

	case MODE_SENSE_INFO_CTRL:
		(void) sense_info_ctrl(io->da_data);
		break;

	case MODE_SENSE_SEND_ALL:
		/*
		 * SPC-3 revision 21c
		 * Section 6.9.1 Table 97
		 * "Return all subpage 00h mode pages in page_0 format"
		 */
		if (io->da_data_len < (sizeof (struct mode_format) +
		    sizeof (struct mode_geometry) +
		    sizeof (struct mode_control_scsi3) +
		    sizeof (struct mode_info_ctrl))) {

			/*
			 * Believe it or not, there's an initiator out
			 * there which sends a mode sense request for all
			 * of the pages, without always sending a data-in
			 * size which is large enough.
			 * NOTE: Need to check the error key returned
			 * here and see if something else should be used.
			 */
			spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
			trans_send_complete(cmd, STATUS_CHECK);

		} else {

			/*
			 * If we don't have geometry then don't attempt
			 * report that information.
			 */
			if (d->d_heads && d->d_cyl && d->d_spt) {
				np = sense_page3(d, io->da_data);
				np = sense_page4(d, np);
			}
			np = sense_cache(d, np);
			np = sense_mode_control(cmd->c_lu, np);
			(void) sense_info_ctrl(np);

		}
		break;

	case 0x00:
		/*
		 * SPC-3 Revision 21c, section 6.9.1
		 * Table 97 -- Mode page code usage for all devices
		 * Page Code 00 == Vendor specific. We are going to return
		 *    zeros.
		 */
		break;

	default:
		queue_prt(mgmtq, Q_STE_ERRS,
		    "SBC%x  LUN%d Unsupported mode_sense request 0x%x",
		    cmd->c_lu->l_targ->s_targ_num, cmd->c_lu->l_common->l_num,
		    cdb[2]);
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
		break;
	}

	if (trans_send_datain(cmd, io->da_data, cdb[4], 0, sbc_io_free,
	    True, io) == False) {
		trans_send_complete(cmd, STATUS_BUSY);
	}
}
Exemplo n.º 26
0
/*ARGSUSED*/
void
sbc_recap(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len)
{
	uint64_t			capacity;
	int				len;
	uint32_t			lba;
	struct scsi_capacity *cap;
	disk_params_t			*d;
	disk_io_t			*io;

	if ((d = (disk_params_t *)T10_PARAMS_AREA(cmd)) == NULL)
		return;

	capacity = d->d_size;

	len = sizeof (struct scsi_capacity);

	/*
	 * SBC-2 Revision 16, section 5.10.1
	 * Any of the following conditions will generate an error.
	 *    (1) PMI bit is zero and LOGICAL block address is non-zero
	 *    (2) Rserved bytes are not zero
	 *    (3) Reseved bits are not zero
	 *    (4) Reserved CONTROL bits are not zero
	 */
	if ((((cdb[8] & SBC_CAPACITY_PMI) == 0) &&
	    (cdb[2] || cdb[3] || cdb[4] || cdb[5])) ||
	    cdb[1] || cdb[6] || cdb[7] || (cdb[8] & ~SBC_CAPACITY_PMI) ||
	    SAM_CONTROL_BYTE_RESERVED(cdb[9])) {
		spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0);
		spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00);
		trans_send_complete(cmd, STATUS_CHECK);
		return;
	}

	/*
	 * if the device capacity larger than 32 bits then set
	 * the capacity of the device to all 0xf's.
	 * a device that supports LBAs larger than 32 bits which
	 * should be used read_capacity(16) comand to get the capacity.
	 * NOTE: the adjustment to subject one from the capacity is
	 * done below.
	 */
	if (capacity & 0xFFFFFFFF00000000ULL)
		capacity = 0xFFFFFFFF;

	io = sbc_io_alloc(cmd);

	if ((cap = (struct scsi_capacity *)calloc(1, len)) == NULL) {
		sbc_io_free(io);
		trans_send_complete(cmd, STATUS_BUSY);
		return;
	}
	io->da_data		= (char *)cap;
	io->da_data_alloc	= True;
	io->da_clear_overlap	= False;
	io->da_data_len		= len;

	if (capacity != 0xFFFFFFFF) {
		/*
		 * Look at the PMI information
		 */
		if (cdb[8] & SBC_CAPACITY_PMI) {
			lba = cdb[2] << 24 | cdb[3] << 16 |
			    cdb[4] << 8 | cdb[5];
			if (lba >= capacity)
				cap->capacity = htonl(0xffffffff);
			else
				cap->capacity = (capacity - 1);
		} else {
			cap->capacity = htonl(capacity - 1);
		}
	} else {
		cap->capacity = htonl(capacity);
	}
	cap->lbasize = htonl(d->d_bytes_sect);

	if (trans_send_datain(cmd, io->da_data, io->da_data_len, 0,
	    sbc_io_free, True, io) == False) {
		trans_send_complete(cmd, STATUS_BUSY);
	}
}