Exemple #1
0
static int stex_map_sg(struct st_hba *hba,
	struct req_msg *req, struct st_ccb *ccb)
{
	struct scsi_cmnd *cmd;
	struct scatterlist *sg;
	struct st_sgtable *dst;
	int i, nseg;

	cmd = ccb->cmd;
	dst = (struct st_sgtable *)req->variable;
	dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
	dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));

	nseg = scsi_dma_map(cmd);
	if (nseg < 0)
		return -EIO;
	if (nseg) {
		ccb->sg_count = nseg;
		dst->sg_count = cpu_to_le16((u16)nseg);

		scsi_for_each_sg(cmd, sg, nseg, i) {
			dst->table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
			dst->table[i].addr =
				cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
			dst->table[i].addr_hi =
				cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
			dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
		}
		dst->table[--i].ctrl |= SG_CF_EOT;
	}
Exemple #2
0
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
{
	int segment;
	int use_sg;

	cp->data_len = 0;

	use_sg = scsi_dma_map(cmd);
	if (use_sg > 0) {
		struct scatterlist *sg;
		struct sym_tcb *tp = &np->target[cp->target];
		struct sym_tblmove *data;

		if (use_sg > SYM_CONF_MAX_SG) {
			scsi_dma_unmap(cmd);
			return -1;
		}

		data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];

		scsi_for_each_sg(cmd, sg, use_sg, segment) {
			dma_addr_t baddr = sg_dma_address(sg);
			unsigned int len = sg_dma_len(sg);

			if ((len & 1) && (tp->head.wval & EWS)) {
				len++;
				cp->odd_byte_adjustment++;
			}

			sym_build_sge(np, &data[segment], baddr, len);
			cp->data_len += len;
		}
	} else {
Exemple #3
0
/**
 * megaraid_mbox_mksgl - make the scatter-gather list
 * @adapter	: controller's soft state
 * @scb		: scsi control block
 *
 * Prepare the scatter-gather list.
 */
static int
megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
{
	struct scatterlist	*sgl;
	mbox_ccb_t		*ccb;
	struct scsi_cmnd	*scp;
	int			sgcnt;
	int			i;


	scp	= scb->scp;
	ccb	= (mbox_ccb_t *)scb->ccb;

	sgcnt = scsi_dma_map(scp);
	BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);

	// no mapping required if no data to be transferred
	if (!sgcnt)
		return 0;

	scb->dma_type = MRAID_DMA_WSG;

	scsi_for_each_sg(scp, sgl, sgcnt, i) {
		ccb->sgl64[i].address	= sg_dma_address(sgl);
		ccb->sgl64[i].length	= sg_dma_len(sgl);
	}
Exemple #4
0
static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
{
	struct Scsi_Host *host = scp->device->host;
	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
	struct scatterlist *sg;
	int idx, nseg;

	nseg = scsi_dma_map(scp);
	BUG_ON(nseg < 0);
	if (!nseg)
		return 0;

	HPT_SCP(scp)->sgcnt = nseg;
	HPT_SCP(scp)->mapped = 1;

	BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);

	scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
		psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
		psg[idx].size = cpu_to_le32(sg_dma_len(sg));
		psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
			cpu_to_le32(1) : 0;
	}
Exemple #5
0
static void map_dma(unsigned int i, unsigned int j) {
    unsigned int data_len = 0;
    unsigned int k, pci_dir;
    int count;
    struct scatterlist *sg;
    struct mscp *cpp;
    struct scsi_cmnd *SCpnt;

    cpp = &HD(j)->cp[i];
    SCpnt = cpp->SCpnt;
    pci_dir = SCpnt->sc_data_direction;

    if (SCpnt->sense_buffer)
        cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
                                               SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));

    cpp->sense_len = SCSI_SENSE_BUFFERSIZE;

    if (scsi_bufflen(SCpnt)) {
        count = scsi_dma_map(SCpnt);
        BUG_ON(count < 0);

        scsi_for_each_sg(SCpnt, sg, count, k) {
            cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
            cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
            data_len += sg->length;
        }

        cpp->sg = TRUE;
        cpp->use_sg = scsi_sg_count(SCpnt);
        cpp->data_address =
            H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
                                 cpp->use_sg * sizeof(struct sg_list),
                                 pci_dir));
        cpp->data_len = H2DEV(data_len);

    } else {
static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
			       struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
			       struct PVSCSIRingReqDesc *e)
{
	unsigned count;
	unsigned bufflen = scsi_bufflen(cmd);
	struct scatterlist *sg;

	e->dataLen = bufflen;
	e->dataAddr = 0;
	if (bufflen == 0)
		return;

	sg = scsi_sglist(cmd);
	count = scsi_sg_count(cmd);
	if (count != 0) {
		int segs = scsi_dma_map(cmd);
		if (segs > 1) {
			pvscsi_create_sg(ctx, sg, segs);

			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
			ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
						    SGL_SIZE, PCI_DMA_TODEVICE);
			e->dataAddr = ctx->sglPA;
		} else
			e->dataAddr = sg_dma_address(sg);
	} else {
		/*
		 * In case there is no S/G list, scsi_sglist points
		 * directly to the buffer.
		 */
		ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
					     cmd->sc_data_direction);
		e->dataAddr = ctx->dataPA;
	}
}
Exemple #7
0
int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
	struct esas2r_adapter *a =
		(struct esas2r_adapter *)cmd->device->host->hostdata;
	struct esas2r_request *rq;
	struct esas2r_sg_context sgc;
	unsigned bufflen;

	/* Assume success, if it fails we will fix the result later. */
	cmd->result = DID_OK << 16;

	if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
		cmd->result = DID_NO_CONNECT << 16;
		cmd->scsi_done(cmd);
		return 0;
	}

	rq = esas2r_alloc_request(a);
	if (unlikely(rq == NULL)) {
		esas2r_debug("esas2r_alloc_request failed");
		return SCSI_MLQUEUE_HOST_BUSY;
	}

	rq->cmd = cmd;
	bufflen = scsi_bufflen(cmd);

	if (likely(bufflen != 0)) {
		if (cmd->sc_data_direction == DMA_TO_DEVICE)
			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
	}

	memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
	rq->vrq->scsi.length = cpu_to_le32(bufflen);
	rq->target_id = cmd->device->id;
	rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
	rq->sense_buf = cmd->sense_buffer;
	rq->sense_len = SCSI_SENSE_BUFFERSIZE;

	esas2r_sgc_init(&sgc, a, rq, NULL);

	sgc.length = bufflen;
	sgc.cur_offset = NULL;

	sgc.cur_sgel = scsi_sglist(cmd);
	sgc.exp_offset = NULL;
	sgc.num_sgel = scsi_dma_map(cmd);
	sgc.sgel_count = 0;

	if (unlikely(sgc.num_sgel < 0)) {
		esas2r_free_request(a, rq);
		return SCSI_MLQUEUE_HOST_BUSY;
	}

	sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;

	if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
		scsi_dma_unmap(cmd);
		esas2r_free_request(a, rq);
		return SCSI_MLQUEUE_HOST_BUSY;
	}

	esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
		     (int)cmd->device->lun);

	esas2r_start_request(a, rq);

	return 0;
}
Exemple #8
0
/* Build a SCB and send it to the card */
static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
	int i, count_sg;
	unsigned long flags;
	struct wd719x_scb *scb = scsi_cmd_priv(cmd);
	struct wd719x *wd = shost_priv(sh);

	scb->cmd = cmd;

	scb->CDB_tag = 0;	/* Tagged queueing not supported yet */
	scb->devid = cmd->device->id;
	scb->lun = cmd->device->lun;

	/* copy the command */
	memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len);

	/* map SCB */
	scb->phys = dma_map_single(&wd->pdev->dev, scb, sizeof(*scb),
				   DMA_BIDIRECTIONAL);

	if (dma_mapping_error(&wd->pdev->dev, scb->phys))
		goto out_error;

	/* map sense buffer */
	scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
	cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
			SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
	if (dma_mapping_error(&wd->pdev->dev, cmd->SCp.dma_handle))
		goto out_unmap_scb;
	scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);

	/* request autosense */
	scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE;

	/* check direction */
	if (cmd->sc_data_direction == DMA_TO_DEVICE)
		scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION
				 |  WD719X_SCB_FLAGS_PCI_TO_SCSI;
	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
		scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION;

	/* Scather/gather */
	count_sg = scsi_dma_map(cmd);
	if (count_sg < 0)
		goto out_unmap_sense;
	BUG_ON(count_sg > WD719X_SG);

	if (count_sg) {
		struct scatterlist *sg;

		scb->data_length = cpu_to_le32(count_sg *
					       sizeof(struct wd719x_sglist));
		scb->data_p = cpu_to_le32(scb->phys +
					  offsetof(struct wd719x_scb, sg_list));

		scsi_for_each_sg(cmd, sg, count_sg, i) {
			scb->sg_list[i].ptr = cpu_to_le32(sg_dma_address(sg));
			scb->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
		}
		scb->SCB_options |= WD719X_SCB_FLAGS_DO_SCATTER_GATHER;
	} else { /* zero length */
Exemple #9
0
static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
	struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
	struct aha1542_hostdata *aha1542 = shost_priv(sh);
	u8 direction;
	u8 target = cmd->device->id;
	u8 lun = cmd->device->lun;
	unsigned long flags;
	int bufflen = scsi_bufflen(cmd);
	int mbo, sg_count;
	struct mailbox *mb = aha1542->mb;
	struct ccb *ccb = aha1542->ccb;

	if (*cmd->cmnd == REQUEST_SENSE) {
		/* Don't do the command - we have the sense data already */
		cmd->result = 0;
		cmd->scsi_done(cmd);
		return 0;
	}
#ifdef DEBUG
	{
		int i = -1;
		if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10)
			i = xscsi2int(cmd->cmnd + 2);
		else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6)
			i = scsi2int(cmd->cmnd + 2);
		shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d",
						target, *cmd->cmnd, i, bufflen);
		print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
	}
#endif
	sg_count = scsi_dma_map(cmd);
	if (sg_count) {
		size_t len = sg_count * sizeof(struct chain);

		acmd->chain = kmalloc(len, GFP_DMA);
		if (!acmd->chain)
			goto out_unmap;
		acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
				len, DMA_TO_DEVICE);
		if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
			goto out_free_chain;
	}

	/* Use the outgoing mailboxes in a round-robin fashion, because this
	   is how the host adapter will scan for them */

	spin_lock_irqsave(sh->host_lock, flags);
	mbo = aha1542->aha1542_last_mbo_used + 1;
	if (mbo >= AHA1542_MAILBOXES)
		mbo = 0;

	do {
		if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
			break;
		mbo++;
		if (mbo >= AHA1542_MAILBOXES)
			mbo = 0;
	} while (mbo != aha1542->aha1542_last_mbo_used);

	if (mb[mbo].status || aha1542->int_cmds[mbo])
		panic("Unable to find empty mailbox for aha1542.\n");

	aha1542->int_cmds[mbo] = cmd;	/* This will effectively prevent someone else from
					   screwing with this cdb. */

	aha1542->aha1542_last_mbo_used = mbo;

#ifdef DEBUG
	shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
#endif

	/* This gets trashed for some reason */
	any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));

	memset(&ccb[mbo], 0, sizeof(struct ccb));

	ccb[mbo].cdblen = cmd->cmd_len;

	direction = 0;
	if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6)
		direction = 8;
	else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6)
		direction = 16;

	memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);

	if (bufflen) {
		struct scatterlist *sg;
		int i;

		ccb[mbo].op = 2;	/* SCSI Initiator Command  w/scatter-gather */
		scsi_for_each_sg(cmd, sg, sg_count, i) {
			any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
			any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
		};
		any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
		any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
#ifdef DEBUG
		shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
		print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
#endif
	} else {