void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
	int prot_count = scsi_prot_sg_count(iser_task->sc);

	if (iser_task->dir[ISER_DIR_IN]) {
		iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
		iser_dma_unmap_task_data(iser_task,
					 &iser_task->data[ISER_DIR_IN],
					 DMA_FROM_DEVICE);
		if (prot_count)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->prot[ISER_DIR_IN],
						 DMA_FROM_DEVICE);
	}

	if (iser_task->dir[ISER_DIR_OUT]) {
		iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
		iser_dma_unmap_task_data(iser_task,
					 &iser_task->data[ISER_DIR_OUT],
					 DMA_TO_DEVICE);
		if (prot_count)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->prot[ISER_DIR_OUT],
						 DMA_TO_DEVICE);
	}
}
示例#2
0
int iser_reg_rdma_mem(struct iscsi_iser_task *task,
		      enum iser_data_dir dir,
		      bool all_imm)
{
	struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
	struct iser_data_buf *mem = &task->data[dir];
	struct iser_mem_reg *reg = &task->rdma_reg[dir];
	struct iser_mem_reg *data_reg;
	struct iser_fr_desc *desc = NULL;
	bool use_dma_key;
	int err;

	use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
		      scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;

	if (!use_dma_key) {
		desc = device->reg_ops->reg_desc_get(ib_conn);
		reg->mem_h = desc;
	}

	if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
		data_reg = reg;
	else
		data_reg = &task->desc.data_reg;

	err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
	if (unlikely(err))
		goto err_reg;

	if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
		struct iser_mem_reg *prot_reg = &task->desc.prot_reg;

		if (scsi_prot_sg_count(task->sc)) {
			mem = &task->prot[dir];
			err = iser_reg_prot_sg(task, mem, desc,
					       use_dma_key, prot_reg);
			if (unlikely(err))
				goto err_reg;
		}

		err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
				      prot_reg, reg);
		if (unlikely(err))
			goto err_reg;

		desc->pi_ctx->sig_protected = 1;
	}

	return 0;

err_reg:
	if (desc)
		device->reg_ops->reg_desc_put(ib_conn, desc);

	return err;
}
示例#3
0
static int
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
		struct iser_pi_context *pi_ctx,
		struct iser_mem_reg *data_reg,
		struct iser_mem_reg *prot_reg,
		struct iser_mem_reg *sig_reg)
{
	struct iser_tx_desc *tx_desc = &iser_task->desc;
	struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
	struct ib_sig_handover_wr *wr;
	struct ib_mr *mr = pi_ctx->sig_mr;
	int ret;

	memset(sig_attrs, 0, sizeof(*sig_attrs));
	ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
	if (ret)
		goto err;

	iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);

	if (pi_ctx->sig_mr_valid)
		iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);

	ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));

	wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr,
			  wr);
	wr->wr.opcode = IB_WR_REG_SIG_MR;
	wr->wr.wr_cqe = cqe;
	wr->wr.sg_list = &data_reg->sge;
	wr->wr.num_sge = 1;
	wr->wr.send_flags = 0;
	wr->sig_attrs = sig_attrs;
	wr->sig_mr = mr;
	if (scsi_prot_sg_count(iser_task->sc))
		wr->prot = &prot_reg->sge;
	else
		wr->prot = NULL;
	wr->access_flags = IB_ACCESS_LOCAL_WRITE |
			   IB_ACCESS_REMOTE_READ |
			   IB_ACCESS_REMOTE_WRITE;
	pi_ctx->sig_mr_valid = 1;

	sig_reg->sge.lkey = mr->lkey;
	sig_reg->rkey = mr->rkey;
	sig_reg->sge.addr = 0;
	sig_reg->sge.length = scsi_transfer_length(iser_task->sc);

	iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
		 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
		 sig_reg->sge.length);
err:
	return ret;
}
/* Register user buffer memory and initialize passive rdma
 *  dto descriptor. Data size is stored in
 *  task->data[ISER_DIR_IN].data_len, Protection size
 *  os stored in task->prot[ISER_DIR_IN].data_len
 */
static int iser_prepare_read_cmd(struct iscsi_task *task)

{
	struct iscsi_iser_task *iser_task = task->dd_data;
	struct iser_device  *device = iser_task->iser_conn->ib_conn.device;
	struct iser_regd_buf *regd_buf;
	int err;
	struct iser_hdr *hdr = &iser_task->desc.iser_header;
	struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];

	err = iser_dma_map_task_data(iser_task,
				     buf_in,
				     ISER_DIR_IN,
				     DMA_FROM_DEVICE);
	if (err)
		return err;

	if (scsi_prot_sg_count(iser_task->sc)) {
		struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];

		err = iser_dma_map_task_data(iser_task,
					     pbuf_in,
					     ISER_DIR_IN,
					     DMA_FROM_DEVICE);
		if (err)
			return err;
	}

	err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
	if (err) {
		iser_err("Failed to set up Data-IN RDMA\n");
		return err;
	}
	regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];

	hdr->flags    |= ISER_RSV;
	hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
	hdr->read_va   = cpu_to_be64(regd_buf->reg.va);

	iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
		 task->itt, regd_buf->reg.rkey,
		 (unsigned long long)regd_buf->reg.va);

	return 0;
}
示例#5
0
/* Register user buffer memory and initialize passive rdma
 *  dto descriptor. Data size is stored in
 *  task->data[ISER_DIR_IN].data_len, Protection size
 *  os stored in task->prot[ISER_DIR_IN].data_len
 */
static int iser_prepare_read_cmd(struct iscsi_task *task)

{
	struct iscsi_iser_task *iser_task = task->dd_data;
	struct iser_mem_reg *mem_reg;
	int err;
	struct iser_ctrl *hdr = &iser_task->desc.iser_header;
	struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];

	err = iser_dma_map_task_data(iser_task,
				     buf_in,
				     ISER_DIR_IN,
				     DMA_FROM_DEVICE);
	if (err)
		return err;

	if (scsi_prot_sg_count(iser_task->sc)) {
		struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];

		err = iser_dma_map_task_data(iser_task,
					     pbuf_in,
					     ISER_DIR_IN,
					     DMA_FROM_DEVICE);
		if (err)
			return err;
	}

	err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false);
	if (err) {
		iser_err("Failed to set up Data-IN RDMA\n");
		return err;
	}
	mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];

	hdr->flags    |= ISER_RSV;
	hdr->read_stag = cpu_to_be32(mem_reg->rkey);
	hdr->read_va   = cpu_to_be64(mem_reg->sge.addr);

	iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
		 task->itt, mem_reg->rkey,
		 (unsigned long long)mem_reg->sge.addr);

	return 0;
}
示例#6
0
static inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
	uint8_t	guard = scsi_host_get_guard(cmd->device->host);

	/* We only support T10 DIF right now */
	if (guard != SHOST_DIX_GUARD_CRC) {
		ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
		    "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
		return 0;
	}

	/* We always use DIFF Bundling for best performance */
	*fw_prot_opts = 0;

	/* Translate SCSI opcode to a protection opcode */
	switch (scsi_get_prot_op(cmd)) {
	case SCSI_PROT_READ_STRIP:
		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
		break;
	case SCSI_PROT_WRITE_INSERT:
		*fw_prot_opts |= PO_MODE_DIF_INSERT;
		break;
	case SCSI_PROT_READ_INSERT:
		*fw_prot_opts |= PO_MODE_DIF_INSERT;
		break;
	case SCSI_PROT_WRITE_STRIP:
		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
		break;
	case SCSI_PROT_READ_PASS:
		*fw_prot_opts |= PO_MODE_DIF_PASS;
		break;
	case SCSI_PROT_WRITE_PASS:
		*fw_prot_opts |= PO_MODE_DIF_PASS;
		break;
	default:	/* Normal Request */
		*fw_prot_opts |= PO_MODE_DIF_PASS;
		break;
	}

	return scsi_prot_sg_count(cmd);
}
示例#7
0
static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
{
	unsigned int prot_op = SCSI_PROT_NORMAL;
	unsigned int dix = scsi_prot_sg_count(scmd);

	if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
		if (dif && dix)
			prot_op = SCSI_PROT_READ_PASS;
		else if (dif && !dix)
			prot_op = SCSI_PROT_READ_STRIP;
		else if (!dif && dix)
			prot_op = SCSI_PROT_READ_INSERT;
	} else {
		if (dif && dix)
			prot_op = SCSI_PROT_WRITE_PASS;
		else if (dif && !dix)
			prot_op = SCSI_PROT_WRITE_INSERT;
		else if (!dif && dix)
			prot_op = SCSI_PROT_WRITE_STRIP;
	}

	scsi_set_prot_op(scmd, prot_op);
	scsi_set_prot_type(scmd, dif);
}
/* Register user buffer memory and initialize passive rdma
 *  dto descriptor. Data size is stored in
 *  task->data[ISER_DIR_OUT].data_len, Protection size
 *  is stored at task->prot[ISER_DIR_OUT].data_len
 */
static int
iser_prepare_write_cmd(struct iscsi_task *task,
		       unsigned int imm_sz,
		       unsigned int unsol_sz,
		       unsigned int edtl)
{
	struct iscsi_iser_task *iser_task = task->dd_data;
	struct iser_mem_reg *mem_reg;
	int err;
	struct iser_hdr *hdr = &iser_task->desc.iser_header;
	struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
	struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];

	err = iser_dma_map_task_data(iser_task,
				     buf_out,
				     ISER_DIR_OUT,
				     DMA_TO_DEVICE);
	if (err)
		return err;

	if (scsi_prot_sg_count(iser_task->sc)) {
		struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];

		err = iser_dma_map_task_data(iser_task,
					     pbuf_out,
					     ISER_DIR_OUT,
					     DMA_TO_DEVICE);
		if (err)
			return err;
	}

	err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
	if (err != 0) {
		iser_err("Failed to register write cmd RDMA mem\n");
		return err;
	}

	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];

	if (unsol_sz < edtl) {
		hdr->flags     |= ISER_WSV;
		hdr->write_stag = cpu_to_be32(mem_reg->rkey);
		hdr->write_va   = cpu_to_be64(mem_reg->sge.addr + unsol_sz);

		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
			 "VA:%#llX + unsol:%d\n",
			 task->itt, mem_reg->rkey,
			 (unsigned long long)mem_reg->sge.addr, unsol_sz);
	}

	if (imm_sz > 0) {
		iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
			 task->itt, imm_sz);
		tx_dsg->addr = mem_reg->sge.addr;
		tx_dsg->length = imm_sz;
		tx_dsg->lkey = mem_reg->sge.lkey;
		iser_task->desc.num_sge = 2;
	}

	return 0;
}
/**
 * iser_send_command - send command PDU
 */
int iser_send_command(struct iscsi_conn *conn,
		      struct iscsi_task *task)
{
	struct iser_conn *iser_conn = conn->dd_data;
	struct iscsi_iser_task *iser_task = task->dd_data;
	unsigned long edtl;
	int err;
	struct iser_data_buf *data_buf, *prot_buf;
	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
	struct scsi_cmnd *sc  =  task->sc;
	struct iser_tx_desc *tx_desc = &iser_task->desc;
	u8 sig_count = ++iser_conn->ib_conn.sig_count;

	edtl = ntohl(hdr->data_length);

	/* build the tx desc regd header and add it to the tx desc dto */
	tx_desc->type = ISCSI_TX_SCSI_COMMAND;
	iser_create_send_desc(iser_conn, tx_desc);

	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
		data_buf = &iser_task->data[ISER_DIR_IN];
		prot_buf = &iser_task->prot[ISER_DIR_IN];
	} else {
		data_buf = &iser_task->data[ISER_DIR_OUT];
		prot_buf = &iser_task->prot[ISER_DIR_OUT];
	}

	if (scsi_sg_count(sc)) { /* using a scatter list */
		data_buf->sg = scsi_sglist(sc);
		data_buf->size = scsi_sg_count(sc);
	}
	data_buf->data_len = scsi_bufflen(sc);

	if (scsi_prot_sg_count(sc)) {
		prot_buf->sg  = scsi_prot_sglist(sc);
		prot_buf->size = scsi_prot_sg_count(sc);
		prot_buf->data_len = (data_buf->data_len >>
				     ilog2(sc->device->sector_size)) * 8;
	}

	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
		err = iser_prepare_read_cmd(task);
		if (err)
			goto send_command_error;
	}
	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
		err = iser_prepare_write_cmd(task,
					     task->imm_count,
				             task->imm_count +
					     task->unsol_r2t.data_length,
					     edtl);
		if (err)
			goto send_command_error;
	}

	iser_task->status = ISER_TASK_STATUS_STARTED;

	err = iser_post_send(&iser_conn->ib_conn, tx_desc,
			     iser_signal_comp(sig_count));
	if (!err)
		return 0;

send_command_error:
	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
	return err;
}
示例#10
0
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
	int is_rdma_data_aligned = 1;
	int is_rdma_prot_aligned = 1;
	int prot_count = scsi_prot_sg_count(iser_task->sc);

	/* if we were reading, copy back to unaligned sglist,
	 * anyway dma_unmap and free the copy
	 */
	if (iser_task->data[ISER_DIR_IN].orig_sg) {
		is_rdma_data_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->data[ISER_DIR_IN],
						ISER_DIR_IN);
	}

	if (iser_task->data[ISER_DIR_OUT].orig_sg) {
		is_rdma_data_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->data[ISER_DIR_OUT],
						ISER_DIR_OUT);
	}

	if (iser_task->prot[ISER_DIR_IN].orig_sg) {
		is_rdma_prot_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->prot[ISER_DIR_IN],
						ISER_DIR_IN);
	}

	if (iser_task->prot[ISER_DIR_OUT].orig_sg) {
		is_rdma_prot_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->prot[ISER_DIR_OUT],
						ISER_DIR_OUT);
	}

	if (iser_task->dir[ISER_DIR_IN]) {
		iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
		if (is_rdma_data_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->data[ISER_DIR_IN],
						 DMA_FROM_DEVICE);
		if (prot_count && is_rdma_prot_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->prot[ISER_DIR_IN],
						 DMA_FROM_DEVICE);
	}

	if (iser_task->dir[ISER_DIR_OUT]) {
		iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
		if (is_rdma_data_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->data[ISER_DIR_OUT],
						 DMA_TO_DEVICE);
		if (prot_count && is_rdma_prot_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->prot[ISER_DIR_OUT],
						 DMA_TO_DEVICE);
	}
}
示例#11
0
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
	struct iser_device *device = iser_task->iser_conn->ib_conn.device;
	int is_rdma_data_aligned = 1;
	int is_rdma_prot_aligned = 1;
	int prot_count = scsi_prot_sg_count(iser_task->sc);

	/* if we were reading, copy back to unaligned sglist,
	 * anyway dma_unmap and free the copy
	 */
	if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
		is_rdma_data_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->data[ISER_DIR_IN],
						&iser_task->data_copy[ISER_DIR_IN],
						ISER_DIR_IN);
	}

	if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
		is_rdma_data_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->data[ISER_DIR_OUT],
						&iser_task->data_copy[ISER_DIR_OUT],
						ISER_DIR_OUT);
	}

	if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
		is_rdma_prot_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->prot[ISER_DIR_IN],
						&iser_task->prot_copy[ISER_DIR_IN],
						ISER_DIR_IN);
	}

	if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
		is_rdma_prot_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->prot[ISER_DIR_OUT],
						&iser_task->prot_copy[ISER_DIR_OUT],
						ISER_DIR_OUT);
	}

	if (iser_task->dir[ISER_DIR_IN]) {
		device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
		if (is_rdma_data_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->data[ISER_DIR_IN]);
		if (prot_count && is_rdma_prot_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->prot[ISER_DIR_IN]);
	}

	if (iser_task->dir[ISER_DIR_OUT]) {
		device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
		if (is_rdma_data_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->data[ISER_DIR_OUT]);
		if (prot_count && is_rdma_prot_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->prot[ISER_DIR_OUT]);
	}
}
示例#12
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0, transfer_length;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	transfer_length = scsi_transfer_length(sc);
	if (!scsi_prot_sg_count(sc) &&
	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
		se_cmd->prot_pto = true;
		/*
		 * loopback transport doesn't support
		 * WRITE_GENERATE, READ_STRIP protection
		 * information operations, go ahead unprotected.
		 */
		transfer_length = scsi_bufflen(sc);
	}

	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			transfer_length, tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count,
			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	sc->scsi_done(sc);
	return;
}