Esempio n. 1
0
static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
					       struct domain_device *dev,
					       gfp_t gfp_flags)
{
	struct sas_task *task = sas_alloc_task(gfp_flags);
	struct scsi_lun lun;

	if (!task)
		return NULL;

	task->uldd_task = cmd;
	ASSIGN_SAS_TASK(cmd, task);

	task->dev = dev;
	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */

	task->ssp_task.retry_count = 1;
	int_to_scsilun(cmd->device->lun, &lun);
	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
	memcpy(task->ssp_task.cdb, cmd->cmnd, 16);

	task->scatter = scsi_sglist(cmd);
	task->num_scatter = scsi_sg_count(cmd);
	task->total_xfer_len = scsi_bufflen(cmd);
	task->data_dir = cmd->sc_data_direction;

	task->task_done = sas_scsi_task_done;

	return task;
}
Esempio n. 2
0
static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
	struct fsc_state *state;

#if 0
	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
		int i;
		printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd);
		for (i = 0; i < cmd->cmd_len; ++i)
			printk(KERN_CONT " %.2x", cmd->cmnd[i]);
		printk(KERN_CONT "\n");
		printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
		       scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
	}
#endif

	cmd->scsi_done = done;
	cmd->host_scribble = NULL;

	state = (struct fsc_state *) cmd->device->host->hostdata;

	if (state->request_q == NULL)
		state->request_q = cmd;
	else
		state->request_qtail->host_scribble = (void *) cmd;
	state->request_qtail = cmd;

	if (state->phase == idle)
		mac53c94_start(state);

	return 0;
}
Esempio n. 3
0
/*
 * copy data from device into scatter/gather buffer
 */
static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
{
	int k, req_len, act_len, len, active;
	void *kaddr;
	struct scatterlist *sgpnt;
	unsigned int buflen;

	buflen = scsi_bufflen(cmd);
	if (!buflen)
		return 0;

	if (!scsi_sglist(cmd))
		return -1;

	active = 1;
	req_len = act_len = 0;
	scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
		if (active) {
			kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
			len = sgpnt->length;
			if ((req_len + len) > buflen) {
				active = 0;
				len = buflen - req_len;
			}
			memcpy(kaddr + sgpnt->offset, buf + req_len, len);
			flush_kernel_dcache_page(sg_page(sgpnt));
			kunmap_atomic(kaddr, KM_IRQ0);
			act_len += len;
		}
		req_len += sgpnt->length;
	}
	scsi_set_resid(cmd, req_len - act_len);
	return 0;
}
Esempio n. 4
0
/**************************************************************************
 *   qla2x00_print_scsi_cmd
 *	 Dumps out info about the scsi cmd and srb.
 *   Input
 *	 cmd : struct scsi_cmnd
 **************************************************************************/
void
qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
{
	int i;
	struct scsi_qla_host *ha;
	srb_t *sp;

	ha = shost_priv(cmd->device->host);

	sp = (srb_t *) cmd->SCp.ptr;
	printk("SCSI Command @=0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
	printk("  chan=0x%02x, target=0x%02x, lun=0x%02x, cmd_len=0x%02x\n",
	    cmd->device->channel, cmd->device->id, cmd->device->lun,
	    cmd->cmd_len);
	printk(" CDB: ");
	for (i = 0; i < cmd->cmd_len; i++) {
		printk("0x%02x ", cmd->cmnd[i]);
	}
	printk("\n  seg_cnt=%d, allowed=%d, retries=%d\n",
	       scsi_sg_count(cmd), cmd->allowed, cmd->retries);
	printk("  request buffer=0x%p, request buffer len=0x%x\n",
	       scsi_sglist(cmd), scsi_bufflen(cmd));
	printk("  tag=%d, transfersize=0x%x\n",
	    cmd->tag, cmd->transfersize);
	printk("  serial_number=%lx, SP=%p\n", cmd->serial_number, sp);
	printk("  data direction=%d\n", cmd->sc_data_direction);

	if (!sp)
		return;

	printk("  sp flags=0x%x\n", sp->flags);
}
Esempio n. 5
0
/*
 * copy data from scatter/gather into device's buffer
 */
static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf)
{
	int k, req_len, len, fin;
	void *kaddr;
	struct scatterlist *sgpnt;
	unsigned int buflen;

	buflen = scsi_bufflen(cmd);
	if (!buflen)
		return 0;

	if (!scsi_sglist(cmd))
		return -1;

	req_len = fin = 0;
	scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
		kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
		len = sgpnt->length;
		if ((req_len + len) > buflen) {
			len = buflen - req_len;
			fin = 1;
		}
		memcpy(buf + req_len, kaddr + sgpnt->offset, len);
		kunmap_atomic(kaddr, KM_IRQ0);
		if (fin)
			return req_len + len;
		req_len += sgpnt->length;
	}
Esempio n. 6
0
static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
                           enum dma_data_direction dir, srp_rdma_t rdma_io,
                           int dma_map, int ext_desc)
{
    struct iu_entry *iue = NULL;
    struct scatterlist *sg = NULL;
    int err, nsg = 0, len;

    if (dma_map) {
        iue = (struct iu_entry *) sc->SCp.ptr;
        sg = scsi_sglist(sc);

        dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc),
                md->len, scsi_sg_count(sc));

        nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
                         DMA_BIDIRECTIONAL);
        if (!nsg) {
            printk("fail to map %p %d\n", iue, scsi_sg_count(sc));
            return 0;
        }
        len = min(scsi_bufflen(sc), md->len);
    } else
        len = md->len;

    err = rdma_io(sc, sg, nsg, md, 1, dir, len);

    if (dma_map)
        dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

    return err;
}
/*
 * fnic_queue_wq_copy_desc
 * Routine to enqueue a wq copy desc
 */
static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
					  struct vnic_wq_copy *wq,
					  struct fnic_io_req *io_req,
					  struct scsi_cmnd *sc,
					  int sg_count)
{
	struct scatterlist *sg;
	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
	struct fc_rport_libfc_priv *rp = rport->dd_data;
	struct host_sg_desc *desc;
	u8 pri_tag = 0;
	unsigned int i;
	unsigned long intr_flags;
	int flags;
	u8 exch_flags;
	struct scsi_lun fc_lun;
	char msg[2];

	if (sg_count) {
		/* For each SGE, create a device desc entry */
		desc = io_req->sgl_list;
		for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
			desc->addr = cpu_to_le64(sg_dma_address(sg));
			desc->len = cpu_to_le32(sg_dma_len(sg));
			desc->_resvd = 0;
			desc++;
		}

		io_req->sgl_list_pa = pci_map_single
			(fnic->pdev,
			 io_req->sgl_list,
			 sizeof(io_req->sgl_list[0]) * sg_count,
			 PCI_DMA_TODEVICE);
	}
Esempio n. 8
0
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
    struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
                                  struct tcm_loop_cmd, tl_se_cmd);
    struct scsi_cmnd *sc = tl_cmd->sc;
    struct scatterlist *sgl_bidi = NULL;
    u32 sgl_bidi_count = 0;
    int ret;
    /*
     * Allocate the necessary tasks to complete the received CDB+data
     */
    ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
    if (ret != 0)
        return ret;
    /*
     * For BIDI commands, pass in the extra READ buffer
     * to transport_generic_map_mem_to_cmd() below..
     */
    if (se_cmd->se_cmd_flags & SCF_BIDI) {
        struct scsi_data_buffer *sdb = scsi_in(sc);

        sgl_bidi = sdb->table.sgl;
        sgl_bidi_count = sdb->table.nents;
    }
    /*
     * Because some userspace code via scsi-generic do not memset their
     * associated read buffers, go ahead and do that here for type
     * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
     * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
     * by target core in transport_generic_allocate_tasks() ->
     * transport_generic_cmd_sequencer().
     */
    if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
            se_cmd->data_direction == DMA_FROM_DEVICE) {
        struct scatterlist *sg = scsi_sglist(sc);
        unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

        if (buf != NULL) {
            memset(buf, 0, sg->length);
            kunmap(sg_page(sg));
        }
    }

    /* Tell the core about our preallocated memory */
    return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
                                            scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
}
Esempio n. 9
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}
	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			scsi_bufflen(sc), tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count);
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
Esempio n. 10
0
/**
 * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command
 * @fsf_req: request to be processed
 * @sbtype: SBALE flags
 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
 *	to fill SBALs
 */
int
zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
			      unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
{
	return zfcp_qdio_sbals_from_sg(fsf_req,	sbtype, scsi_sglist(scsi_cmnd),
				       scsi_sg_count(scsi_cmnd),
				       ZFCP_MAX_SBALS_PER_REQ);
}
Esempio n. 11
0
/**
 * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
 * @cmd:	scsi command
 */
void scsi_dma_unmap(struct scsi_cmnd *cmd)
{
	if (scsi_sg_count(cmd)) {
		struct device *dev = cmd->device->host->shost_gendev.parent;

		dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
			     cmd->sc_data_direction);
	}
}
Esempio n. 12
0
static void
bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
{
	int             sgeid, nsges, i;
	struct bfi_sge_s      *sge;
	struct bfa_sgpg_s *sgpg;
	u32        pgcumsz;
	u64        addr;
	struct scatterlist *sg;
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;

	sgeid = BFI_SGE_INLINE;
	ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);

	sg = scsi_sglist(cmnd);
	sg = sg_next(sg);

	do {
		sge = sgpg->sgpg->sges;
		nsges = ioim->nsges - sgeid;
		if (nsges > BFI_SGPG_DATA_SGES)
			nsges = BFI_SGPG_DATA_SGES;

		pgcumsz = 0;
		for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
			addr = bfa_os_sgaddr(sg_dma_address(sg));
			sge->sga = *(union bfi_addr_u *) &addr;
			sge->sg_len = sg_dma_len(sg);
			pgcumsz += sge->sg_len;

			/**
			 * set flags
			 */
			if (i < (nsges - 1))
				sge->flags = BFI_SGE_DATA;
			else if (sgeid < (ioim->nsges - 1))
				sge->flags = BFI_SGE_DATA_CPL;
			else
				sge->flags = BFI_SGE_DATA_LAST;
		}

		sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);

		/**
		 * set the link element of each page
		 */
		if (sgeid == ioim->nsges) {
			sge->flags = BFI_SGE_PGDLEN;
			sge->sga.a32.addr_lo = 0;
			sge->sga.a32.addr_hi = 0;
		} else {
			sge->flags = BFI_SGE_LINK;
			sge->sga = sgpg->sgpg_pa;
		}
		sge->sg_len = pgcumsz;
	} while (sgeid < ioim->nsges);
}
Esempio n. 13
0
//----- usb_stor_bulk_srb() ---------------------
int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe, struct scsi_cmnd* srb)
{
	unsigned int partial;
	int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
				      scsi_sg_count(srb), scsi_bufflen(srb),
				      &partial);

	scsi_set_resid(srb, scsi_bufflen(srb) - partial);
	return result;
}
Esempio n. 14
0
/**
 * iser_send_command - send command PDU
 */
int iser_send_command(struct iscsi_conn *conn,
		      struct iscsi_task *task)
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
	struct iscsi_iser_task *iser_task = task->dd_data;
	unsigned long edtl;
	int err;
	struct iser_data_buf *data_buf;
	struct iscsi_cmd *hdr =  (struct iscsi_cmd *)task->hdr;
	struct scsi_cmnd *sc  =  task->sc;
	struct iser_tx_desc *tx_desc = &iser_task->desc;

	edtl = ntohl(hdr->data_length);

	/* build the tx desc regd header and add it to the tx desc dto */
	tx_desc->type = ISCSI_TX_SCSI_COMMAND;
	iser_create_send_desc(iser_conn->ib_conn, tx_desc);

	if (hdr->flags & ISCSI_FLAG_CMD_READ)
		data_buf = &iser_task->data[ISER_DIR_IN];
	else
		data_buf = &iser_task->data[ISER_DIR_OUT];

	if (scsi_sg_count(sc)) { /* using a scatter list */
		data_buf->buf  = scsi_sglist(sc);
		data_buf->size = scsi_sg_count(sc);
	}

	data_buf->data_len = scsi_bufflen(sc);

	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
		err = iser_prepare_read_cmd(task, edtl);
		if (err)
			goto send_command_error;
	}
	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
		err = iser_prepare_write_cmd(task,
					     task->imm_count,
				             task->imm_count +
					     task->unsol_r2t.data_length,
					     edtl);
		if (err)
			goto send_command_error;
	}

	iser_task->status = ISER_TASK_STATUS_STARTED;

	err = iser_post_send(iser_conn->ib_conn, tx_desc);
	if (!err)
		return 0;

send_command_error:
	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
	return err;
}
Esempio n. 15
0
static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
					 unsigned int good_bytes)
{
	struct request *rq = scmd->request;
	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
	struct sg_mapping_iter miter;
	struct blk_zone_report_hdr hdr;
	struct blk_zone zone;
	unsigned int offset, bytes = 0;
	unsigned long flags;
	u8 *buf;

	if (good_bytes < 64)
		return;

	memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));

	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
		       SG_MITER_TO_SG | SG_MITER_ATOMIC);

	local_irq_save(flags);
	while (sg_miter_next(&miter) && bytes < good_bytes) {

		buf = miter.addr;
		offset = 0;

		if (bytes == 0) {
			/* Set the report header */
			hdr.nr_zones = min_t(unsigned int,
					 (good_bytes - 64) / 64,
					 get_unaligned_be32(&buf[0]) / 64);
			memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
			offset += 64;
			bytes += 64;
		}

		/* Parse zone descriptors */
		while (offset < miter.length && hdr.nr_zones) {
			WARN_ON(offset > miter.length);
			buf = miter.addr + offset;
			sd_zbc_parse_report(sdkp, buf, &zone);
			memcpy(buf, &zone, sizeof(struct blk_zone));
			offset += 64;
			bytes += 64;
			hdr.nr_zones--;
		}

		if (!hdr.nr_zones)
			break;

	}
Esempio n. 16
0
/**
 * scsi_dma_map - perform DMA mapping against command's sg lists
 * @cmd:	scsi command
 *
 * Returns the number of sg lists actually used, zero if the sg lists
 * is NULL, or -ENOMEM if the mapping failed.
 */
int scsi_dma_map(struct scsi_cmnd *cmd)
{
	int nseg = 0;

	if (scsi_sg_count(cmd)) {
		struct device *dev = cmd->device->host->shost_gendev.parent;

		nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
				  cmd->sc_data_direction);
		if (unlikely(!nseg))
			return -ENOMEM;
	}
	return nseg;
}
/*
 * Common used function. Transfer a complete command
 * via usb_stor_bulk_transfer_sglist() above. Set cmnd resid
 */
int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe,
		      struct scsi_cmnd* srb)
{
	unsigned int partial;
	
//ALPS00445134, add more debug message for CR debugging
	US_DEBUGP("%s, line %d: \n", __func__, __LINE__);
//ALPS00445134, add more debug message for CR debugging

	int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
				      scsi_sg_count(srb), scsi_bufflen(srb),
				      &partial);

	scsi_set_resid(srb, scsi_bufflen(srb) - partial);
	return result;
}
Esempio n. 18
0
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;
	/*
	 * Allocate the necessary tasks to complete the received CDB+data
	 */
	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		/* Out of Resources */
		return PYX_TRANSPORT_LU_COMM_FAILURE;
	} else if (ret == -EINVAL) {
		/*
		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
		 */
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			return PYX_TRANSPORT_RESERVATION_CONFLICT;
		/*
		 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
		 * sense data.
		 */
		return PYX_TRANSPORT_USE_SENSE_REASON;
	}

	/*
	 * For BIDI commands, pass in the extra READ buffer
	 * to transport_generic_map_mem_to_cmd() below..
	 */
	if (se_cmd->t_tasks_bidi) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
	}

	/* Tell the core about our preallocated memory */
	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret < 0)
		return PYX_TRANSPORT_LU_COMM_FAILURE;

	return 0;
}
Esempio n. 19
0
/**
 * scsi_dma_map - perform DMA mapping against command's sg lists
 * @cmd:	scsi command
 *
 * Returns the number of sg lists actually used, zero if the sg lists
 * is NULL, or -ENOMEM if the mapping failed.
 */
int scsi_dma_map(struct scsi_cmnd *cmd)
{
	int nseg = 0;
	struct dma_attrs *attrs = &scsi_direct_attrs;

	if (scsi_sg_count(cmd)) {
		struct device *dev = cmd->device->host->dma_dev;

		if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
			attrs = (cmd->request->cmd_flags & REQ_KERNEL) ?
				&scsi_direct_attrs : NULL;
		nseg = dma_map_sg_attr(dev, scsi_sglist(cmd),
				scsi_sg_count(cmd),
				cmd->sc_data_direction, attrs);
		if (unlikely(!nseg))
			return -ENOMEM;
	}
	return nseg;
}
Esempio n. 20
0
static void
mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
{
	int pipe;
	struct scatterlist * sg;
	
	MTS_DEBUG_GOT_HERE();

	desc->context.instance = desc;
	desc->context.srb = srb;
	desc->context.fragment = 0;

	if (!scsi_bufflen(srb)) {
		desc->context.data = NULL;
		desc->context.data_length = 0;
		return;
	} else {
		sg = scsi_sglist(srb);
		desc->context.data = sg_virt(&sg[0]);
		desc->context.data_length = sg[0].length;
	}


	/* can't rely on srb->sc_data_direction */

	/* Brutally ripped from usb-storage */

	if ( !memcmp( srb->cmnd, mts_read_image_sig, mts_read_image_sig_len )
) { 		pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_image);
		MTS_DEBUG( "transfering from desc->ep_image == %d\n",
			   (int)desc->ep_image );
	} else if ( MTS_DIRECTION_IS_IN(srb->cmnd[0]) ) {
			pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_response);
			MTS_DEBUG( "transfering from desc->ep_response == %d\n",
				   (int)desc->ep_response);
	} else {
		MTS_DEBUG("transfering to desc->ep_out == %d\n",
			  (int)desc->ep_out);
		pipe = usb_sndbulkpipe(desc->usb_dev,desc->ep_out);
	}
	desc->context.data_pipe = pipe;
}
Esempio n. 21
0
static void
mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
{
	int pipe;
	struct scatterlist * sg;
	
	MTS_DEBUG_GOT_HERE();

	desc->context.instance = desc;
	desc->context.srb = srb;
	desc->context.fragment = 0;

	if (!scsi_bufflen(srb)) {
		desc->context.data = NULL;
		desc->context.data_length = 0;
		return;
	} else {
		sg = scsi_sglist(srb);
		desc->context.data = sg_virt(&sg[0]);
		desc->context.data_length = sg
Esempio n. 22
0
static void mts_do_sg (struct urb* transfer)
{
	struct scatterlist * sg;
	int status = transfer->status;
	MTS_INT_INIT();

	MTS_DEBUG("Processing fragment %d of %d\n", context->fragment,
	                                          scsi_sg_count(context->srb));

	if (unlikely(status)) {
                context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
		mts_transfer_cleanup(transfer);
        }

	sg = scsi_sglist(context->srb);
	context->fragment++;
	mts_int_submit_urb(transfer,
			   context->data_pipe,
			   sg_virt(&sg[context->fragment]),
			   sg[context->fragment].length,
			   context->fragment + 1 == scsi_sg_count(context->srb) ?
			   mts_data_done : mts_do_sg);
}
Esempio n. 23
0
static int idescsi_map_sg(ide_drive_t *drive, struct ide_atapi_pc *pc)
{
	ide_hwif_t *hwif = drive->hwif;
	struct scatterlist *sg, *scsi_sg;
	int segments;

	if (!pc->req_xfer || pc->req_xfer % 1024)
		return 1;

	if (idescsi_set_direction(pc))
		return 1;

	sg = hwif->sg_table;
	scsi_sg = scsi_sglist(pc->scsi_cmd);
	segments = scsi_sg_count(pc->scsi_cmd);

	if (segments > hwif->sg_max_nents)
		return 1;

	hwif->sg_nents = segments;
	memcpy(sg, scsi_sg, sizeof(*sg) * segments);

	return 0;
}
static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
			       struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
			       struct PVSCSIRingReqDesc *e)
{
	unsigned count;
	unsigned bufflen = scsi_bufflen(cmd);
	struct scatterlist *sg;

	e->dataLen = bufflen;
	e->dataAddr = 0;
	if (bufflen == 0)
		return;

	sg = scsi_sglist(cmd);
	count = scsi_sg_count(cmd);
	if (count != 0) {
		int segs = scsi_dma_map(cmd);
		if (segs > 1) {
			pvscsi_create_sg(ctx, sg, segs);

			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
			ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
						    SGL_SIZE, PCI_DMA_TODEVICE);
			e->dataAddr = ctx->sglPA;
		} else
			e->dataAddr = sg_dma_address(sg);
	} else {
		/*
		 * In case there is no S/G list, scsi_sglist points
		 * directly to the buffer.
		 */
		ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
					     cmd->sc_data_direction);
		e->dataAddr = ctx->dataPA;
	}
}
Esempio n. 25
0
static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
                             struct srp_indirect_buf *id,
                             enum dma_data_direction dir, srp_rdma_t rdma_io,
                             int dma_map, int ext_desc)
{
    struct iu_entry *iue = NULL;
    struct srp_direct_buf *md = NULL;
    struct scatterlist dummy, *sg = NULL;
    dma_addr_t token = 0;
    int err = 0;
    int nmd, nsg = 0, len;

    if (dma_map || ext_desc) {
        iue = (struct iu_entry *) sc->SCp.ptr;
        sg = scsi_sglist(sc);

        dprintk("%p %u %u %d %d\n",
                iue, scsi_bufflen(sc), id->len,
                cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
    }

    nmd = id->table_desc.len / sizeof(struct srp_direct_buf);

    if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
            (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
        md = &id->desc_list[0];
        goto rdma;
    }

    if (ext_desc && dma_map) {
        md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
                                &token, GFP_KERNEL);
        if (!md) {
            eprintk("Can't get dma memory %u\n", id->table_desc.len);
            return -ENOMEM;
        }

        sg_init_one(&dummy, md, id->table_desc.len);
        sg_dma_address(&dummy) = token;
        sg_dma_len(&dummy) = id->table_desc.len;
        err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
                      id->table_desc.len);
        if (err) {
            eprintk("Error copying indirect table %d\n", err);
            goto free_mem;
        }
    } else {
        eprintk("This command uses external indirect buffer\n");
        return -EINVAL;
    }

rdma:
    if (dma_map) {
        nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
                         DMA_BIDIRECTIONAL);
        if (!nsg) {
            eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
            err = -EIO;
            goto free_mem;
        }
        len = min(scsi_bufflen(sc), id->len);
    } else
        len = id->len;

    err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

    if (dma_map)
        dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

free_mem:
    if (token && dma_map)
        dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);

    return err;
}
Esempio n. 26
0
int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
	struct esas2r_adapter *a =
		(struct esas2r_adapter *)cmd->device->host->hostdata;
	struct esas2r_request *rq;
	struct esas2r_sg_context sgc;
	unsigned bufflen;

	/* Assume success, if it fails we will fix the result later. */
	cmd->result = DID_OK << 16;

	if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
		cmd->result = DID_NO_CONNECT << 16;
		cmd->scsi_done(cmd);
		return 0;
	}

	rq = esas2r_alloc_request(a);
	if (unlikely(rq == NULL)) {
		esas2r_debug("esas2r_alloc_request failed");
		return SCSI_MLQUEUE_HOST_BUSY;
	}

	rq->cmd = cmd;
	bufflen = scsi_bufflen(cmd);

	if (likely(bufflen != 0)) {
		if (cmd->sc_data_direction == DMA_TO_DEVICE)
			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
	}

	memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
	rq->vrq->scsi.length = cpu_to_le32(bufflen);
	rq->target_id = cmd->device->id;
	rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
	rq->sense_buf = cmd->sense_buffer;
	rq->sense_len = SCSI_SENSE_BUFFERSIZE;

	esas2r_sgc_init(&sgc, a, rq, NULL);

	sgc.length = bufflen;
	sgc.cur_offset = NULL;

	sgc.cur_sgel = scsi_sglist(cmd);
	sgc.exp_offset = NULL;
	sgc.num_sgel = scsi_dma_map(cmd);
	sgc.sgel_count = 0;

	if (unlikely(sgc.num_sgel < 0)) {
		esas2r_free_request(a, rq);
		return SCSI_MLQUEUE_HOST_BUSY;
	}

	sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;

	if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
		scsi_dma_unmap(cmd);
		esas2r_free_request(a, rq);
		return SCSI_MLQUEUE_HOST_BUSY;
	}

	esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
		     (int)cmd->device->lun);

	esas2r_start_request(a, rq);

	return 0;
}
Esempio n. 27
0
static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
			    void (*done)(struct scsi_cmnd *))
{
#ifdef NSP_DEBUG
	/*unsigned int host_id = SCpnt->device->host->this_id;*/
	/*unsigned int base    = SCpnt->device->host->io_port;*/
	unsigned char target = scmd_id(SCpnt);
#endif
	nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;

	nsp_dbg(NSP_DEBUG_QUEUECOMMAND,
		"SCpnt=0x%p target=%d lun=%d sglist=0x%p bufflen=%d sg_count=%d",
		SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt),
		scsi_bufflen(SCpnt), scsi_sg_count(SCpnt));
	//nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC);

	SCpnt->scsi_done	= done;

	if (data->CurrentSC != NULL) {
		nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen");
		SCpnt->result   = DID_BAD_TARGET << 16;
		nsp_scsi_done(SCpnt);
		return 0;
	}

#if 0
	/* XXX: pcmcia-cs generates SCSI command with "scsi_info" utility.
	        This makes kernel crash when suspending... */
	if (data->ScsiInfo->stop != 0) {
		nsp_msg(KERN_INFO, "suspending device. reject command.");
		SCpnt->result  = DID_BAD_TARGET << 16;
		nsp_scsi_done(SCpnt);
		return SCSI_MLQUEUE_HOST_BUSY;
	}
#endif

	show_command(SCpnt);

	data->CurrentSC		= SCpnt;

	SCpnt->SCp.Status	= CHECK_CONDITION;
	SCpnt->SCp.Message	= 0;
	SCpnt->SCp.have_data_in = IO_UNKNOWN;
	SCpnt->SCp.sent_command = 0;
	SCpnt->SCp.phase	= PH_UNDETERMINED;
	scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));

	/* setup scratch area
	   SCp.ptr		: buffer pointer
	   SCp.this_residual	: buffer length
	   SCp.buffer		: next buffer
	   SCp.buffers_residual : left buffers in list
	   SCp.phase		: current state of the command */
	if (scsi_bufflen(SCpnt)) {
		SCpnt->SCp.buffer	    = scsi_sglist(SCpnt);
		SCpnt->SCp.ptr		    = BUFFER_ADDR;
		SCpnt->SCp.this_residual    = SCpnt->SCp.buffer->length;
		SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
	} else {
		SCpnt->SCp.ptr		    = NULL;
		SCpnt->SCp.this_residual    = 0;
		SCpnt->SCp.buffer	    = NULL;
		SCpnt->SCp.buffers_residual = 0;
	}

	if (nsphw_start_selection(SCpnt) == FALSE) {
		nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail");
		SCpnt->result   = DID_BUS_BUSY << 16;
		nsp_scsi_done(SCpnt);
		return 0;
	}


	//nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "out");
#ifdef NSP_DEBUG
	data->CmdId++;
#endif
	return 0;
}
Esempio n. 28
0
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;
	/*
	 * Allocate the necessary tasks to complete the received CDB+data
	 */
	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		/* Out of Resources */
		return PYX_TRANSPORT_LU_COMM_FAILURE;
	} else if (ret == -EINVAL) {
		/*
		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
		 */
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			return PYX_TRANSPORT_RESERVATION_CONFLICT;
		/*
		 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
		 * sense data.
		 */
		return PYX_TRANSPORT_USE_SENSE_REASON;
	}

	/*
	 * For BIDI commands, pass in the extra READ buffer
	 * to transport_generic_map_mem_to_cmd() below..
	 */
	if (se_cmd->t_tasks_bidi) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
	}
	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in transport_generic_allocate_tasks() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	/* Tell the core about our preallocated memory */
	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret < 0)
		return PYX_TRANSPORT_LU_COMM_FAILURE;

	return 0;
}
Esempio n. 29
0
/**
 * Send I/O request to firmware.
 */
static          bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
{
	struct bfa_itnim_s *itnim = ioim->itnim;
	struct bfi_ioim_req_s *m;
	static struct fcp_cmnd_s cmnd_z0 = { 0 };
	struct bfi_sge_s      *sge;
	u32        pgdlen = 0;
	u64 addr;
	struct scatterlist *sg;
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
	if (!m) {
		bfa_reqq_wait(ioim->bfa, ioim->reqq,
				  &ioim->iosp->reqq_wait);
		return BFA_FALSE;
	}

	/**
	 * build i/o request message next
	 */
	m->io_tag = bfa_os_htons(ioim->iotag);
	m->rport_hdl = ioim->itnim->rport->fw_handle;
	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);

	/**
	 * build inline IO SG element here
	 */
	sge = &m->sges[0];
	if (ioim->nsges) {
		sg = (struct scatterlist *)scsi_sglist(cmnd);
		addr = bfa_os_sgaddr(sg_dma_address(sg));
		sge->sga = *(union bfi_addr_u *) &addr;
		pgdlen = sg_dma_len(sg);
		sge->sg_len = pgdlen;
		sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
		bfa_sge_to_be(sge);
		sge++;
	}

	if (ioim->nsges > BFI_SGE_INLINE) {
		sge->sga = ioim->sgpg->sgpg_pa;
	} else {
		sge->sga.a32.addr_lo = 0;
		sge->sga.a32.addr_hi = 0;
	}
	sge->sg_len = pgdlen;
	sge->flags = BFI_SGE_PGDLEN;
	bfa_sge_to_be(sge);

	/**
	 * set up I/O command parameters
	 */
	bfa_os_assign(m->cmnd, cmnd_z0);
	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
	bfa_os_assign(m->cmnd.cdb,
			*(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
	m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));

	/**
	 * set up I/O message header
	 */
	switch (m->cmnd.iodir) {
	case FCP_IODIR_READ:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, input_reqs);
		break;
	case FCP_IODIR_WRITE:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, output_reqs);
		break;
	case FCP_IODIR_RW:
		bfa_stats(itnim, input_reqs);
		bfa_stats(itnim, output_reqs);
	default:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
	}
	if (itnim->seq_rec ||
	    (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));

#ifdef IOIM_ADVANCED
	m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);

	/**
	 * Handle large CDB (>16 bytes).
	 */
	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
					FCP_CMND_CDB_LEN) / sizeof(u32);
	if (m->cmnd.addl_cdb_len) {
		bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
				m->cmnd.addl_cdb_len * sizeof(u32));
		fcp_cmnd_fcpdl(&m->cmnd) =
				bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
	}
#endif

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(ioim->bfa, ioim->reqq);
	return BFA_TRUE;
}
Esempio n. 30
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}

	transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
			tl_nexus->se_sess,
			scsi_bufflen(sc), sc->sc_data_direction,
			tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);

	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in target_setup_cmd_from_cdb() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		transport_send_check_condition_and_sense(se_cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	} else if (ret < 0) {
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			tcm_loop_queue_status(se_cmd);
		else
			transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}

	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret) {
		transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}
	transport_handle_cdb_direct(se_cmd);
	return;

out_done:
	sc->scsi_done(sc);
	return;
}