Пример #1
0
static void
simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
{
	int i;
	struct scatterlist *sl;
	struct disk_stat stat;
	struct disk_req req;

	stat.fd = desc[sc->device->id];

	scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) {
		req.addr = __pa(sg_virt(sl));
		req.len  = sl->length;
		if (DBG)
			printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
			       mode == SSC_READ ? "read":"write", req.addr, offset,
			       scsi_sg_count(sc) - i, sl->length);
		ia64_ssc(stat.fd, 1, __pa(&req), offset, mode);
		ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION);

		/* should not happen in our case */
		if (stat.count != req.len) {
			sc->result = DID_ERROR << 16;
			return;
		}
		offset +=  sl->length;
	}
Пример #2
0
static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
                           enum dma_data_direction dir, srp_rdma_t rdma_io,
                           int dma_map, int ext_desc)
{
    struct iu_entry *iue = NULL;
    struct scatterlist *sg = NULL;
    int err, nsg = 0, len;

    if (dma_map) {
        iue = (struct iu_entry *) sc->SCp.ptr;
        sg = scsi_sglist(sc);

        dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc),
                md->len, scsi_sg_count(sc));

        nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
                         DMA_BIDIRECTIONAL);
        if (!nsg) {
            printk("fail to map %p %d\n", iue, scsi_sg_count(sc));
            return 0;
        }
        len = min(scsi_bufflen(sc), md->len);
    } else
        len = md->len;

    err = rdma_io(sc, sg, nsg, md, 1, dir, len);

    if (dma_map)
        dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

    return err;
}
Пример #3
0
/**
 * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
 * @cmd:	scsi command
 */
void scsi_dma_unmap(struct scsi_cmnd *cmd)
{
	if (scsi_sg_count(cmd)) {
		struct device *dev = cmd->device->host->shost_gendev.parent;

		dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
			     cmd->sc_data_direction);
	}
}
Пример #4
0
/**
 * iser_send_command - send command PDU
 */
int iser_send_command(struct iscsi_conn *conn,
		      struct iscsi_task *task)
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
	struct iscsi_iser_task *iser_task = task->dd_data;
	unsigned long edtl;
	int err;
	struct iser_data_buf *data_buf;
	struct iscsi_cmd *hdr =  (struct iscsi_cmd *)task->hdr;
	struct scsi_cmnd *sc  =  task->sc;
	struct iser_tx_desc *tx_desc = &iser_task->desc;

	edtl = ntohl(hdr->data_length);

	/* build the tx desc regd header and add it to the tx desc dto */
	tx_desc->type = ISCSI_TX_SCSI_COMMAND;
	iser_create_send_desc(iser_conn->ib_conn, tx_desc);

	if (hdr->flags & ISCSI_FLAG_CMD_READ)
		data_buf = &iser_task->data[ISER_DIR_IN];
	else
		data_buf = &iser_task->data[ISER_DIR_OUT];

	if (scsi_sg_count(sc)) { /* using a scatter list */
		data_buf->buf  = scsi_sglist(sc);
		data_buf->size = scsi_sg_count(sc);
	}

	data_buf->data_len = scsi_bufflen(sc);

	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
		err = iser_prepare_read_cmd(task, edtl);
		if (err)
			goto send_command_error;
	}
	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
		err = iser_prepare_write_cmd(task,
					     task->imm_count,
				             task->imm_count +
					     task->unsol_r2t.data_length,
					     edtl);
		if (err)
			goto send_command_error;
	}

	iser_task->status = ISER_TASK_STATUS_STARTED;

	err = iser_post_send(iser_conn->ib_conn, tx_desc);
	if (!err)
		return 0;

send_command_error:
	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
	return err;
}
Пример #5
0
/**
 * scsi_dma_map - perform DMA mapping against command's sg lists
 * @cmd:	scsi command
 *
 * Returns the number of sg lists actually used, zero if the sg lists
 * is NULL, or -ENOMEM if the mapping failed.
 */
int scsi_dma_map(struct scsi_cmnd *cmd)
{
	int nseg = 0;

	if (scsi_sg_count(cmd)) {
		struct device *dev = cmd->device->host->shost_gendev.parent;

		nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
				  cmd->sc_data_direction);
		if (unlikely(!nseg))
			return -ENOMEM;
	}
	return nseg;
}
Пример #6
0
/*
 * copy data from device into scatter/gather buffer
 */
static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
{
	int k, req_len, act_len, len, active;
	void *kaddr;
	struct scatterlist *sgpnt;
	unsigned int buflen;

	buflen = scsi_bufflen(cmd);
	if (!buflen)
		return 0;

	if (!scsi_sglist(cmd))
		return -1;

	active = 1;
	req_len = act_len = 0;
	scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
		if (active) {
			kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
			len = sgpnt->length;
			if ((req_len + len) > buflen) {
				active = 0;
				len = buflen - req_len;
			}
			memcpy(kaddr + sgpnt->offset, buf + req_len, len);
			flush_kernel_dcache_page(sg_page(sgpnt));
			kunmap_atomic(kaddr, KM_IRQ0);
			act_len += len;
		}
		req_len += sgpnt->length;
	}
	scsi_set_resid(cmd, req_len - act_len);
	return 0;
}
Пример #7
0
/*
 * copy data from scatter/gather into device's buffer
 */
static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf)
{
	int k, req_len, len, fin;
	void *kaddr;
	struct scatterlist *sgpnt;
	unsigned int buflen;

	buflen = scsi_bufflen(cmd);
	if (!buflen)
		return 0;

	if (!scsi_sglist(cmd))
		return -1;

	req_len = fin = 0;
	scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
		kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
		len = sgpnt->length;
		if ((req_len + len) > buflen) {
			len = buflen - req_len;
			fin = 1;
		}
		memcpy(buf + req_len, kaddr + sgpnt->offset, len);
		kunmap_atomic(kaddr, KM_IRQ0);
		if (fin)
			return req_len + len;
		req_len += sgpnt->length;
	}
Пример #8
0
/**************************************************************************
 *   qla2x00_print_scsi_cmd
 *	 Dumps out info about the scsi cmd and srb.
 *   Input
 *	 cmd : struct scsi_cmnd
 **************************************************************************/
void
qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
{
	int i;
	struct scsi_qla_host *ha;
	srb_t *sp;

	ha = shost_priv(cmd->device->host);

	sp = (srb_t *) cmd->SCp.ptr;
	printk("SCSI Command @=0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
	printk("  chan=0x%02x, target=0x%02x, lun=0x%02x, cmd_len=0x%02x\n",
	    cmd->device->channel, cmd->device->id, cmd->device->lun,
	    cmd->cmd_len);
	printk(" CDB: ");
	for (i = 0; i < cmd->cmd_len; i++) {
		printk("0x%02x ", cmd->cmnd[i]);
	}
	printk("\n  seg_cnt=%d, allowed=%d, retries=%d\n",
	       scsi_sg_count(cmd), cmd->allowed, cmd->retries);
	printk("  request buffer=0x%p, request buffer len=0x%x\n",
	       scsi_sglist(cmd), scsi_bufflen(cmd));
	printk("  tag=%d, transfersize=0x%x\n",
	    cmd->tag, cmd->transfersize);
	printk("  serial_number=%lx, SP=%p\n", cmd->serial_number, sp);
	printk("  data direction=%d\n", cmd->sc_data_direction);

	if (!sp)
		return;

	printk("  sp flags=0x%x\n", sp->flags);
}
Пример #9
0
static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
					       struct domain_device *dev,
					       gfp_t gfp_flags)
{
	struct sas_task *task = sas_alloc_task(gfp_flags);
	struct scsi_lun lun;

	if (!task)
		return NULL;

	task->uldd_task = cmd;
	ASSIGN_SAS_TASK(cmd, task);

	task->dev = dev;
	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */

	task->ssp_task.retry_count = 1;
	int_to_scsilun(cmd->device->lun, &lun);
	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
	memcpy(task->ssp_task.cdb, cmd->cmnd, 16);

	task->scatter = scsi_sglist(cmd);
	task->num_scatter = scsi_sg_count(cmd);
	task->total_xfer_len = scsi_bufflen(cmd);
	task->data_dir = cmd->sc_data_direction;

	task->task_done = sas_scsi_task_done;

	return task;
}
static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
				 struct pvscsi_ctx *ctx)
{
	struct scsi_cmnd *cmd;
	unsigned bufflen;

	cmd = ctx->cmd;
	bufflen = scsi_bufflen(cmd);

	if (bufflen != 0) {
		unsigned count = scsi_sg_count(cmd);

		if (count != 0) {
			scsi_dma_unmap(cmd);
			if (ctx->sglPA) {
				pci_unmap_single(adapter->dev, ctx->sglPA,
						 SGL_SIZE, PCI_DMA_TODEVICE);
				ctx->sglPA = 0;
			}
		} else
			pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
					 cmd->sc_data_direction);
	}
	if (cmd->sense_buffer)
		pci_unmap_single(adapter->dev, ctx->sensePA,
				 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
}
Пример #11
0
static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
	struct fsc_state *state;

#if 0
	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
		int i;
		printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd);
		for (i = 0; i < cmd->cmd_len; ++i)
			printk(KERN_CONT " %.2x", cmd->cmnd[i]);
		printk(KERN_CONT "\n");
		printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
		       scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
	}
#endif

	cmd->scsi_done = done;
	cmd->host_scribble = NULL;

	state = (struct fsc_state *) cmd->device->host->hostdata;

	if (state->request_q == NULL)
		state->request_q = cmd;
	else
		state->request_qtail->host_scribble = (void *) cmd;
	state->request_qtail = cmd;

	if (state->phase == idle)
		mac53c94_start(state);

	return 0;
}
Пример #12
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}
	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			scsi_bufflen(sc), tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count);
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
Пример #13
0
/**
 * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command
 * @fsf_req: request to be processed
 * @sbtype: SBALE flags
 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
 *	to fill SBALs
 */
int
zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
			      unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
{
	return zfcp_qdio_sbals_from_sg(fsf_req,	sbtype, scsi_sglist(scsi_cmnd),
				       scsi_sg_count(scsi_cmnd),
				       ZFCP_MAX_SBALS_PER_REQ);
}
Пример #14
0
//----- usb_stor_bulk_srb() ---------------------
int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe, struct scsi_cmnd* srb)
{
	unsigned int partial;
	int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
				      scsi_sg_count(srb), scsi_bufflen(srb),
				      &partial);

	scsi_set_resid(srb, scsi_bufflen(srb) - partial);
	return result;
}
Пример #15
0
/**
 * scsi_dma_map - perform DMA mapping against command's sg lists
 * @cmd:	scsi command
 *
 * Returns the number of sg lists actually used, zero if the sg lists
 * is NULL, or -ENOMEM if the mapping failed.
 */
int scsi_dma_map(struct scsi_cmnd *cmd)
{
	int nseg = 0;
	struct dma_attrs *attrs = &scsi_direct_attrs;

	if (scsi_sg_count(cmd)) {
		struct device *dev = cmd->device->host->dma_dev;

		if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
			attrs = (cmd->request->cmd_flags & REQ_KERNEL) ?
				&scsi_direct_attrs : NULL;
		nseg = dma_map_sg_attr(dev, scsi_sglist(cmd),
				scsi_sg_count(cmd),
				cmd->sc_data_direction, attrs);
		if (unlikely(!nseg))
			return -ENOMEM;
	}
	return nseg;
}
Пример #16
0
static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
					 unsigned int good_bytes)
{
	struct request *rq = scmd->request;
	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
	struct sg_mapping_iter miter;
	struct blk_zone_report_hdr hdr;
	struct blk_zone zone;
	unsigned int offset, bytes = 0;
	unsigned long flags;
	u8 *buf;

	if (good_bytes < 64)
		return;

	memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));

	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
		       SG_MITER_TO_SG | SG_MITER_ATOMIC);

	local_irq_save(flags);
	while (sg_miter_next(&miter) && bytes < good_bytes) {

		buf = miter.addr;
		offset = 0;

		if (bytes == 0) {
			/* Set the report header */
			hdr.nr_zones = min_t(unsigned int,
					 (good_bytes - 64) / 64,
					 get_unaligned_be32(&buf[0]) / 64);
			memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
			offset += 64;
			bytes += 64;
		}

		/* Parse zone descriptors */
		while (offset < miter.length && hdr.nr_zones) {
			WARN_ON(offset > miter.length);
			buf = miter.addr + offset;
			sd_zbc_parse_report(sdkp, buf, &zone);
			memcpy(buf, &zone, sizeof(struct blk_zone));
			offset += 64;
			bytes += 64;
			hdr.nr_zones--;
		}

		if (!hdr.nr_zones)
			break;

	}
Пример #17
0
static void aha1542_free_cmd(struct scsi_cmnd *cmd)
{
	struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
	struct device *dev = cmd->device->host->dma_dev;
	size_t len = scsi_sg_count(cmd) * sizeof(struct chain);

	if (acmd->chain) {
		dma_unmap_single(dev, acmd->chain_handle, len, DMA_TO_DEVICE);
		kfree(acmd->chain);
	}

	acmd->chain = NULL;
	scsi_dma_unmap(cmd);
}
Пример #18
0
static void mts_do_sg (struct urb* transfer)
{
	struct scatterlist * sg;
	int status = transfer->status;
	MTS_INT_INIT();

	MTS_DEBUG("Processing fragment %d of %d\n", context->fragment,
	                                          scsi_sg_count(context->srb));

	if (unlikely(status)) {
                context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
		mts_transfer_cleanup(transfer);
        }

	sg = scsi_sglist(context->srb);
	context->fragment++;
	mts_int_submit_urb(transfer,
			   context->data_pipe,
			   sg_virt(&sg[context->fragment]),
			   sg[context->fragment].length,
			   context->fragment + 1 == scsi_sg_count(context->srb) ?
			   mts_data_done : mts_do_sg);
}
Пример #19
0
/*
 * Common used function. Transfer a complete command
 * via usb_stor_bulk_transfer_sglist() above. Set cmnd resid
 */
int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe,
		      struct scsi_cmnd* srb)
{
	unsigned int partial;
	
//ALPS00445134, add more debug message for CR debugging
	US_DEBUGP("%s, line %d: \n", __func__, __LINE__);
//ALPS00445134, add more debug message for CR debugging

	int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
				      scsi_sg_count(srb), scsi_bufflen(srb),
				      &partial);

	scsi_set_resid(srb, scsi_bufflen(srb) - partial);
	return result;
}
Пример #20
0
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
    struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
                                  struct tcm_loop_cmd, tl_se_cmd);
    struct scsi_cmnd *sc = tl_cmd->sc;
    struct scatterlist *sgl_bidi = NULL;
    u32 sgl_bidi_count = 0;
    int ret;
    /*
     * Allocate the necessary tasks to complete the received CDB+data
     */
    ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
    if (ret != 0)
        return ret;
    /*
     * For BIDI commands, pass in the extra READ buffer
     * to transport_generic_map_mem_to_cmd() below..
     */
    if (se_cmd->se_cmd_flags & SCF_BIDI) {
        struct scsi_data_buffer *sdb = scsi_in(sc);

        sgl_bidi = sdb->table.sgl;
        sgl_bidi_count = sdb->table.nents;
    }
    /*
     * Because some userspace code via scsi-generic do not memset their
     * associated read buffers, go ahead and do that here for type
     * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
     * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
     * by target core in transport_generic_allocate_tasks() ->
     * transport_generic_cmd_sequencer().
     */
    if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
            se_cmd->data_direction == DMA_FROM_DEVICE) {
        struct scatterlist *sg = scsi_sglist(sc);
        unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

        if (buf != NULL) {
            memset(buf, 0, sg->length);
            kunmap(sg_page(sg));
        }
    }

    /* Tell the core about our preallocated memory */
    return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
                                            scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
}
Пример #21
0
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;
	/*
	 * Allocate the necessary tasks to complete the received CDB+data
	 */
	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		/* Out of Resources */
		return PYX_TRANSPORT_LU_COMM_FAILURE;
	} else if (ret == -EINVAL) {
		/*
		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
		 */
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			return PYX_TRANSPORT_RESERVATION_CONFLICT;
		/*
		 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
		 * sense data.
		 */
		return PYX_TRANSPORT_USE_SENSE_REASON;
	}

	/*
	 * For BIDI commands, pass in the extra READ buffer
	 * to transport_generic_map_mem_to_cmd() below..
	 */
	if (se_cmd->t_tasks_bidi) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
	}

	/* Tell the core about our preallocated memory */
	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret < 0)
		return PYX_TRANSPORT_LU_COMM_FAILURE;

	return 0;
}
Пример #22
0
static void mts_command_done( struct urb *transfer )
/* Interrupt context! */
{
	int status = transfer->status;
	MTS_INT_INIT();

	if ( unlikely(status) ) {
	        if (status == -ENOENT) {
		        /* We are being killed */
			MTS_DEBUG_GOT_HERE();
			context->srb->result = DID_ABORT<<16;
                } else {
		        /* A genuine error has occurred */
			MTS_DEBUG_GOT_HERE();

		        context->srb->result = DID_ERROR<<16;
                }
		mts_transfer_cleanup(transfer);

		return;
	}

	if (context->srb->cmnd[0] == REQUEST_SENSE) {
		mts_int_submit_urb(transfer,
				   context->data_pipe,
				   context->srb->sense_buffer,
				   context->data_length,
				   mts_data_done);
	} else { if ( context->data ) {
			mts_int_submit_urb(transfer,
					   context->data_pipe,
					   context->data,
					   context->data_length,
					   scsi_sg_count(context->srb) > 1 ?
					           mts_do_sg : mts_data_done);
		} else {
			mts_get_status(transfer);
		}
	}

	return;
}
Пример #23
0
static void map_dma(unsigned int i, unsigned int j) {
    unsigned int data_len = 0;
    unsigned int k, pci_dir;
    int count;
    struct scatterlist *sg;
    struct mscp *cpp;
    struct scsi_cmnd *SCpnt;

    cpp = &HD(j)->cp[i];
    SCpnt = cpp->SCpnt;
    pci_dir = SCpnt->sc_data_direction;

    if (SCpnt->sense_buffer)
        cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
                                               SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));

    cpp->sense_len = SCSI_SENSE_BUFFERSIZE;

    if (scsi_bufflen(SCpnt)) {
        count = scsi_dma_map(SCpnt);
        BUG_ON(count < 0);

        scsi_for_each_sg(SCpnt, sg, count, k) {
            cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
            cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
            data_len += sg->length;
        }

        cpp->sg = TRUE;
        cpp->use_sg = scsi_sg_count(SCpnt);
        cpp->data_address =
            H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
                                 cpp->use_sg * sizeof(struct sg_list),
                                 pci_dir));
        cpp->data_len = H2DEV(data_len);

    } else {
static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
			       struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
			       struct PVSCSIRingReqDesc *e)
{
	unsigned count;
	unsigned bufflen = scsi_bufflen(cmd);
	struct scatterlist *sg;

	e->dataLen = bufflen;
	e->dataAddr = 0;
	if (bufflen == 0)
		return;

	sg = scsi_sglist(cmd);
	count = scsi_sg_count(cmd);
	if (count != 0) {
		int segs = scsi_dma_map(cmd);
		if (segs > 1) {
			pvscsi_create_sg(ctx, sg, segs);

			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
			ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
						    SGL_SIZE, PCI_DMA_TODEVICE);
			e->dataAddr = ctx->sglPA;
		} else
			e->dataAddr = sg_dma_address(sg);
	} else {
		/*
		 * In case there is no S/G list, scsi_sglist points
		 * directly to the buffer.
		 */
		ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
					     cmd->sc_data_direction);
		e->dataAddr = ctx->dataPA;
	}
}
Пример #25
0
static int idescsi_map_sg(ide_drive_t *drive, struct ide_atapi_pc *pc)
{
	ide_hwif_t *hwif = drive->hwif;
	struct scatterlist *sg, *scsi_sg;
	int segments;

	if (!pc->req_xfer || pc->req_xfer % 1024)
		return 1;

	if (idescsi_set_direction(pc))
		return 1;

	sg = hwif->sg_table;
	scsi_sg = scsi_sglist(pc->scsi_cmd);
	segments = scsi_sg_count(pc->scsi_cmd);

	if (segments > hwif->sg_max_nents)
		return 1;

	hwif->sg_nents = segments;
	memcpy(sg, scsi_sg, sizeof(*sg) * segments);

	return 0;
}
Пример #26
0
static void mac53c94_interrupt(int irq, void *dev_id)
{
	struct fsc_state *state = (struct fsc_state *) dev_id;
	struct mac53c94_regs __iomem *regs = state->regs;
	struct dbdma_regs __iomem *dma = state->dma;
	struct scsi_cmnd *cmd = state->current_req;
	int nb, stat, seq, intr;
	static int mac53c94_errors;

	/*
	 * Apparently, reading the interrupt register unlatches
	 * the status and sequence step registers.
	 */
	seq = readb(&regs->seqstep);
	stat = readb(&regs->status);
	intr = readb(&regs->interrupt);

#if 0
	printk(KERN_DEBUG "mac53c94_intr, intr=%x stat=%x seq=%x phase=%d\n",
	       intr, stat, seq, state->phase);
#endif

	if (intr & INTR_RESET) {
		/* SCSI bus was reset */
		printk(KERN_INFO "external SCSI bus reset detected\n");
		writeb(CMD_NOP, &regs->command);
		writel(RUN << 16, &dma->control);	/* stop dma */
		cmd_done(state, DID_RESET << 16);
		return;
	}
	if (intr & INTR_ILL_CMD) {
		printk(KERN_ERR "53c94: invalid cmd, intr=%x stat=%x seq=%x phase=%d\n",
		       intr, stat, seq, state->phase);
		cmd_done(state, DID_ERROR << 16);
		return;
	}
	if (stat & STAT_ERROR) {
#if 0
		/* XXX these seem to be harmless? */
		printk("53c94: bad error, intr=%x stat=%x seq=%x phase=%d\n",
		       intr, stat, seq, state->phase);
#endif
		++mac53c94_errors;
		writeb(CMD_NOP + CMD_DMA_MODE, &regs->command);
	}
	if (cmd == 0) {
		printk(KERN_DEBUG "53c94: interrupt with no command active?\n");
		return;
	}
	if (stat & STAT_PARITY) {
		printk(KERN_ERR "mac53c94: parity error\n");
		cmd_done(state, DID_PARITY << 16);
		return;
	}
	switch (state->phase) {
	case selecting:
		if (intr & INTR_DISCONNECT) {
			/* selection timed out */
			cmd_done(state, DID_BAD_TARGET << 16);
			return;
		}
		if (intr != INTR_BUS_SERV + INTR_DONE) {
			printk(KERN_DEBUG "got intr %x during selection\n", intr);
			cmd_done(state, DID_ERROR << 16);
			return;
		}
		if ((seq & SS_MASK) != SS_DONE) {
			printk(KERN_DEBUG "seq step %x after command\n", seq);
			cmd_done(state, DID_ERROR << 16);
			return;
		}
		writeb(CMD_NOP, &regs->command);
		/* set DMA controller going if any data to transfer */
		if ((stat & (STAT_MSG|STAT_CD)) == 0
		    && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
			nb = cmd->SCp.this_residual;
			if (nb > 0xfff0)
				nb = 0xfff0;
			cmd->SCp.this_residual -= nb;
			writeb(nb, &regs->count_lo);
			writeb(nb >> 8, &regs->count_mid);
			writeb(CMD_DMA_MODE + CMD_NOP, &regs->command);
			writel(virt_to_phys(state->dma_cmds), &dma->cmdptr);
			writel((RUN << 16) | RUN, &dma->control);
			writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command);
			state->phase = dataing;
			break;
		} else if ((stat & STAT_PHASE) == STAT_CD + STAT_IO) {
Пример #27
0
static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
                             struct srp_indirect_buf *id,
                             enum dma_data_direction dir, srp_rdma_t rdma_io,
                             int dma_map, int ext_desc)
{
    struct iu_entry *iue = NULL;
    struct srp_direct_buf *md = NULL;
    struct scatterlist dummy, *sg = NULL;
    dma_addr_t token = 0;
    int err = 0;
    int nmd, nsg = 0, len;

    if (dma_map || ext_desc) {
        iue = (struct iu_entry *) sc->SCp.ptr;
        sg = scsi_sglist(sc);

        dprintk("%p %u %u %d %d\n",
                iue, scsi_bufflen(sc), id->len,
                cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
    }

    nmd = id->table_desc.len / sizeof(struct srp_direct_buf);

    if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
            (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
        md = &id->desc_list[0];
        goto rdma;
    }

    if (ext_desc && dma_map) {
        md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
                                &token, GFP_KERNEL);
        if (!md) {
            eprintk("Can't get dma memory %u\n", id->table_desc.len);
            return -ENOMEM;
        }

        sg_init_one(&dummy, md, id->table_desc.len);
        sg_dma_address(&dummy) = token;
        sg_dma_len(&dummy) = id->table_desc.len;
        err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
                      id->table_desc.len);
        if (err) {
            eprintk("Error copying indirect table %d\n", err);
            goto free_mem;
        }
    } else {
        eprintk("This command uses external indirect buffer\n");
        return -EINVAL;
    }

rdma:
    if (dma_map) {
        nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
                         DMA_BIDIRECTIONAL);
        if (!nsg) {
            eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
            err = -EIO;
            goto free_mem;
        }
        len = min(scsi_bufflen(sc), id->len);
    } else
        len = id->len;

    err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

    if (dma_map)
        dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

free_mem:
    if (token && dma_map)
        dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);

    return err;
}
Пример #28
0
static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
			    void (*done)(struct scsi_cmnd *))
{
#ifdef NSP_DEBUG
	/*unsigned int host_id = SCpnt->device->host->this_id;*/
	/*unsigned int base    = SCpnt->device->host->io_port;*/
	unsigned char target = scmd_id(SCpnt);
#endif
	nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;

	nsp_dbg(NSP_DEBUG_QUEUECOMMAND,
		"SCpnt=0x%p target=%d lun=%d sglist=0x%p bufflen=%d sg_count=%d",
		SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt),
		scsi_bufflen(SCpnt), scsi_sg_count(SCpnt));
	//nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC);

	SCpnt->scsi_done	= done;

	if (data->CurrentSC != NULL) {
		nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen");
		SCpnt->result   = DID_BAD_TARGET << 16;
		nsp_scsi_done(SCpnt);
		return 0;
	}

#if 0
	/* XXX: pcmcia-cs generates SCSI command with "scsi_info" utility.
	        This makes kernel crash when suspending... */
	if (data->ScsiInfo->stop != 0) {
		nsp_msg(KERN_INFO, "suspending device. reject command.");
		SCpnt->result  = DID_BAD_TARGET << 16;
		nsp_scsi_done(SCpnt);
		return SCSI_MLQUEUE_HOST_BUSY;
	}
#endif

	show_command(SCpnt);

	data->CurrentSC		= SCpnt;

	SCpnt->SCp.Status	= CHECK_CONDITION;
	SCpnt->SCp.Message	= 0;
	SCpnt->SCp.have_data_in = IO_UNKNOWN;
	SCpnt->SCp.sent_command = 0;
	SCpnt->SCp.phase	= PH_UNDETERMINED;
	scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));

	/* setup scratch area
	   SCp.ptr		: buffer pointer
	   SCp.this_residual	: buffer length
	   SCp.buffer		: next buffer
	   SCp.buffers_residual : left buffers in list
	   SCp.phase		: current state of the command */
	if (scsi_bufflen(SCpnt)) {
		SCpnt->SCp.buffer	    = scsi_sglist(SCpnt);
		SCpnt->SCp.ptr		    = BUFFER_ADDR;
		SCpnt->SCp.this_residual    = SCpnt->SCp.buffer->length;
		SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
	} else {
		SCpnt->SCp.ptr		    = NULL;
		SCpnt->SCp.this_residual    = 0;
		SCpnt->SCp.buffer	    = NULL;
		SCpnt->SCp.buffers_residual = 0;
	}

	if (nsphw_start_selection(SCpnt) == FALSE) {
		nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail");
		SCpnt->result   = DID_BUS_BUSY << 16;
		nsp_scsi_done(SCpnt);
		return 0;
	}


	//nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "out");
#ifdef NSP_DEBUG
	data->CmdId++;
#endif
	return 0;
}
Пример #29
0
static int map_data_for_request(struct vscsifrnt_info *info,
				struct scsi_cmnd *sc,
				struct vscsiif_request *ring_req,
				struct vscsifrnt_shadow *shadow)
{
	grant_ref_t gref_head;
	struct page *page;
	int err, ref, ref_cnt = 0;
	int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
	unsigned int i, off, len, bytes;
	unsigned int data_len = scsi_bufflen(sc);
	unsigned int data_grants = 0, seg_grants = 0;
	struct scatterlist *sg;
	unsigned long mfn;
	struct scsiif_request_segment *seg;

	ring_req->nr_segments = 0;
	if (sc->sc_data_direction == DMA_NONE || !data_len)
		return 0;

	scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
		data_grants += PFN_UP(sg->offset + sg->length);

	if (data_grants > VSCSIIF_SG_TABLESIZE) {
		if (data_grants > info->host->sg_tablesize) {
			shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
			     "Unable to map request_buffer for command!\n");
			return -E2BIG;
		}
		seg_grants = vscsiif_grants_sg(data_grants);
		shadow->sg = kcalloc(data_grants,
			sizeof(struct scsiif_request_segment), GFP_ATOMIC);
		if (!shadow->sg)
			return -ENOMEM;
	}
	seg = shadow->sg ? : ring_req->seg;

	err = gnttab_alloc_grant_references(seg_grants + data_grants,
					    &gref_head);
	if (err) {
		kfree(shadow->sg);
		shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
			     "gnttab_alloc_grant_references() error\n");
		return -ENOMEM;
	}

	if (seg_grants) {
		page = virt_to_page(seg);
		off = (unsigned long)seg & ~PAGE_MASK;
		len = sizeof(struct scsiif_request_segment) * data_grants;
		while (len > 0) {
			bytes = min_t(unsigned int, len, PAGE_SIZE - off);

			ref = gnttab_claim_grant_reference(&gref_head);
			BUG_ON(ref == -ENOSPC);

			mfn = pfn_to_mfn(page_to_pfn(page));
			gnttab_grant_foreign_access_ref(ref,
				info->dev->otherend_id, mfn, 1);
			shadow->gref[ref_cnt] = ref;
			ring_req->seg[ref_cnt].gref   = ref;
			ring_req->seg[ref_cnt].offset = (uint16_t)off;
			ring_req->seg[ref_cnt].length = (uint16_t)bytes;

			page++;
			len -= bytes;
			off = 0;
			ref_cnt++;
		}
		BUG_ON(seg_grants < ref_cnt);
		seg_grants = ref_cnt;
	}

	scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
		page = sg_page(sg);
		off = sg->offset;
		len = sg->length;

		while (len > 0 && data_len > 0) {
			/*
			 * sg sends a scatterlist that is larger than
			 * the data_len it wants transferred for certain
			 * IO sizes.
			 */
			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
			bytes = min(bytes, data_len);

			ref = gnttab_claim_grant_reference(&gref_head);
			BUG_ON(ref == -ENOSPC);

			mfn = pfn_to_mfn(page_to_pfn(page));
			gnttab_grant_foreign_access_ref(ref,
				info->dev->otherend_id, mfn, grant_ro);

			shadow->gref[ref_cnt] = ref;
			seg->gref   = ref;
			seg->offset = (uint16_t)off;
			seg->length = (uint16_t)bytes;

			page++;
			seg++;
			len -= bytes;
			data_len -= bytes;
			off = 0;
			ref_cnt++;
		}
	}
Пример #30
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}

	transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
			tl_nexus->se_sess,
			scsi_bufflen(sc), sc->sc_data_direction,
			tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);

	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in target_setup_cmd_from_cdb() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		transport_send_check_condition_and_sense(se_cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	} else if (ret < 0) {
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			tcm_loop_queue_status(se_cmd);
		else
			transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}

	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret) {
		transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}
	transport_handle_cdb_direct(se_cmd);
	return;

out_done:
	sc->scsi_done(sc);
	return;
}