예제 #1
0
/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
powertecscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct powertec_info *info = (struct powertec_info *)host->hostdata;
	int dmach = host->dma_channel;

	if (info->info.ifcfg.capabilities & FASCAP_DMA &&
	    min_type == fasdma_real_all) {
		int bufs, map_dir, dma_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = PCI_DMA_TODEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			map_dir = PCI_DMA_FROMDEVICE,
			dma_dir = DMA_MODE_READ;

		pci_map_sg(NULL, info->sg, bufs, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}

	/*
	 * If we're not doing DMA,
	 *  we'll do slow PIO
	 */
	return fasdma_pio;
}
예제 #2
0
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
eesoxscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
	int dmach = host->dma_channel;

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int bufs, map_dir, dma_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = PCI_DMA_TODEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			map_dir = PCI_DMA_FROMDEVICE,
			dma_dir = DMA_MODE_READ;

		pci_map_sg(NULL, info->sg, bufs, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}
	/*
	 * We don't do DMA, we only do slow PIO
	 *
	 * Some day, we will do Pseudo DMA
	 */
	return fasdma_pseudo;
}
예제 #3
0
int videobuf_dma_pci_map(struct pci_dev *dev, struct videobuf_dmabuf *dma)
{
	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
	BUG_ON(0 == dma->nr_pages);
	
	if (dma->pages) {
		dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
						   dma->offset);
	}
	if (dma->vmalloc) {
		dma->sglist = videobuf_vmalloc_to_sg
			(dma->vmalloc,dma->nr_pages);
	}
	if (dma->bus_addr) {
		dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL);
		if (NULL != dma->sglist) {
			dma->sglen  = 1;
			sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
			dma->sglist[0].offset           = dma->bus_addr & ~PAGE_MASK;
			sg_dma_len(&dma->sglist[0])     = dma->nr_pages * PAGE_SIZE;
		}
	}
	if (NULL == dma->sglist) {
		dprintk(1,"scatterlist is NULL\n");
		return -ENOMEM;
	}

	if (!dma->bus_addr)
		dma->sglen = pci_map_sg(dev,dma->sglist,dma->nr_pages,
					dma->direction);
	return 0;
}
예제 #4
0
/****************************************************************
 *	Name:	BuildSgList		:LOCAL
 *
 *	Description:	Build the scatter gather list for controller.
 *
 *	Parameters:		SCpnt	 - Pointer to SCSI command structure.
 *					padapter - Pointer to adapter information structure.
 *					pdev	 - Pointer to adapter device structure.
 *
 *	Returns:		Non-zero in not scatter gather.
 *
 ****************************************************************/
static int BuildSgList (Scsi_Cmnd *SCpnt, PADAPTER2000 padapter, PDEV2000 pdev)
	{
	int					 z;
	int					 zc;
	struct scatterlist	*sg;

	if ( SCpnt->use_sg )
		{
		sg = (struct scatterlist *)SCpnt->request_buffer;
		zc = pci_map_sg (padapter->pdev, sg, SCpnt->use_sg, scsi_to_pci_dma_dir (SCpnt->sc_data_direction));
		for ( z = 0;  z < zc;  z++ )
			{
			pdev->scatGath[z].address = cpu_to_le32 (sg_dma_address (sg));
			pdev->scatGath[z].length = cpu_to_le32 (sg_dma_len (sg++));
			}
		outl (pdev->scatGathDma, padapter->mb2);
		outl ((zc << 24) | SCpnt->request_bufflen, padapter->mb3);
		return FALSE;
		}
	if ( !SCpnt->request_bufflen)
		{
		outl (0, padapter->mb2);
		outl (0, padapter->mb3);
		return TRUE;
		}
	SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer, SCpnt->request_bufflen, scsi_to_pci_dma_dir (SCpnt->sc_data_direction));
	outl (SCpnt->SCp.have_data_in, padapter->mb2);
	outl (SCpnt->request_bufflen, padapter->mb3);
	return TRUE;
	}
예제 #5
0
int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
		       void __user *userbuf, int size_in_bytes)
{
	struct ivtv_dma_page_info user_dma;
	struct ivtv_user_dma *dma = &itv->udma;
	int i, err;

	IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);

	/* Still in USE */
	if (dma->SG_length || dma->page_count) {
		IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
			   dma->SG_length, dma->page_count);
		return -EBUSY;
	}

	ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);

	if (user_dma.page_count <= 0) {
		IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
			   user_dma.page_count, size_in_bytes, user_dma.offset);
		return -EINVAL;
	}

	/* Get user pages for DMA Xfer */
	down_read(&current->mm->mmap_sem);
	err = get_user_pages(current, current->mm,
			user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL);
	up_read(&current->mm->mmap_sem);

	if (user_dma.page_count != err) {
		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
			   err, user_dma.page_count);
		return -EINVAL;
	}

	dma->page_count = user_dma.page_count;

	/* Fill SG List with new values */
	if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
		for (i = 0; i < dma->page_count; i++) {
			put_page(dma->map[i]);
		}
		dma->page_count = 0;
		return -ENOMEM;
	}

	/* Map SG List */
	dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);

	/* Fill SG Array with new values */
	ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);

	/* Tag SG Array with Interrupt Bit */
	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);

	ivtv_udma_sync_for_device(itv);
	return dma->page_count;
}
예제 #6
0
/**
 * 	"Copied from drivers/ide/ide-dma.c"
 *	sgiioc4_ide_build_sglist - map IDE scatter gather for DMA I/O
 *	@hwif: the interface to build the DMA table for
 *	@rq: the request holding the sg list
 *	@ddir: data direction
 *
 *	Perform the PCI mapping magic neccessary to access the source
 *	or target buffers of a request via PCI DMA. The lower layers
 *	of the kernel provide the neccessary cache management so that
 *	we can operate in a portable fashion.
 *
 *	This code is identical to ide_build_sglist in ide-dma.c
 *	however that it not exported and even if it were would create
 *	dependancy problems for modular drivers.
 */
static int
sgiioc4_ide_build_sglist(ide_hwif_t * hwif, struct request *rq, int ddir)
{
	struct buffer_head *bh;
	struct scatterlist *sg = hwif->sg_table;
	unsigned long lastdataend = ~0UL;
	int nents = 0;

	if (hwif->sg_dma_active)
		BUG();

	bh = rq->bh;
	do {
		int contig = 0;

		if (bh->b_page) {
			if (bh_phys(bh) == lastdataend)
				contig = 1;
		} else {
			if ((unsigned long) bh->b_data == lastdataend)
				contig = 1;
		}

		if (contig) {
			sg[nents - 1].length += bh->b_size;
			lastdataend += bh->b_size;
			continue;
		}

		if (nents >= PRD_ENTRIES)
			return 0;

		memset(&sg[nents], 0, sizeof (*sg));

		if (bh->b_page) {
			sg[nents].page = bh->b_page;
			sg[nents].offset = bh_offset(bh);
			lastdataend = bh_phys(bh) + bh->b_size;
		} else {
			if ((unsigned long) bh->b_data < PAGE_SIZE)
				BUG();

			sg[nents].address = bh->b_data;
			lastdataend = (unsigned long) bh->b_data + bh->b_size;
		}

		sg[nents].length = bh->b_size;
		nents++;
	} while ((bh = bh->b_reqnext) != NULL);

	if (nents == 0)
		BUG();

	hwif->sg_dma_direction = ddir;
	return pci_map_sg(hwif->pci_dev, sg, nents, ddir);
}
예제 #7
0
파일: TW68-core.c 프로젝트: neiderm/tw6869
/**   struct pci_dev *pci,
 * dma_region_alloc - allocate a buffer and map it to the IOMMU
 */
int dma_field_alloc(struct dma_region *dma, unsigned long n_bytes,
		    struct pci_dev *dev, int direction)
{
	unsigned int i;

	/* round up to page size */
	/*  to align the pointer to the (next) page boundary
	   #define PAGE_ALIGN(addr)        (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
	   this worked as PAGE_SIZE and PAGE_MASK were available in page.h.
	 */

	n_bytes = PAGE_ALIGN(n_bytes);

	dma->n_pages = n_bytes >> PAGE_SHIFT;

	dma->kvirt = vmalloc_32(n_bytes);
	if (!dma->kvirt) {
		goto err;
	}

	memset(dma->kvirt, 0, n_bytes);

	/* allocate scatter/gather list */
	dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
	if (!dma->sglist) {
		goto err;
	}

	sg_init_table(dma->sglist, dma->n_pages);

	/* fill scatter/gather list with pages */
	for (i = 0; i < dma->n_pages; i++) {
		unsigned long va =
		    (unsigned long)dma->kvirt + (i << PAGE_SHIFT);

		sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va),
			    PAGE_SIZE, 0);
	}

	/* map sglist to the IOMMU */
	dma->n_dma_pages =
	    pci_map_sg(dev, dma->sglist, dma->n_pages, direction);

	if (dma->n_dma_pages == 0) {
		goto err;
	}

	dma->dev = dev;
	dma->direction = direction;

	return 0;

err:
	dma_field_free(dma);
	return -ENOMEM;
}
예제 #8
0
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
		     struct pci_dev *dev, int direction)
{
	unsigned int i;

	/* round up to page size */
	n_bytes = PAGE_ALIGN(n_bytes);

	dma->n_pages = n_bytes >> PAGE_SHIFT;

	dma->kvirt = vmalloc_32(n_bytes);
	if (!dma->kvirt) {
		printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
		goto err;
	}

	/* Clear the ram out, no junk to the user */
	memset(dma->kvirt, 0, n_bytes);

	/* allocate scatter/gather list */
	dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
	if (!dma->sglist) {
		printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
		goto err;
	}

	/* just to be safe - this will become unnecessary once sglist->address goes away */
	memset(dma->sglist, 0, dma->n_pages * sizeof(*dma->sglist));

	/* fill scatter/gather list with pages */
	for (i = 0; i < dma->n_pages; i++) {
		unsigned long va =
		    (unsigned long)dma->kvirt + (i << PAGE_SHIFT);

		dma->sglist[i].page = vmalloc_to_page((void *)va);
		dma->sglist[i].length = PAGE_SIZE;
	}

	/* map sglist to the IOMMU */
	dma->n_dma_pages =
	    pci_map_sg(dev, dma->sglist, dma->n_pages, direction);

	if (dma->n_dma_pages == 0) {
		printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
		goto err;
	}

	dma->dev = dev;
	dma->direction = direction;

	return 0;

      err:
	dma_region_free(dma);
	return -ENOMEM;
}
예제 #9
0
/**
 * vme_dma_setup() - Setup a DMA transfer
 * @desc: DMA channel to setup
 * @to_user:	1 if the transfer is to/from a user-space buffer.
 *		0 if it is to/from a kernel buffer.
 *
 *  Setup a DMA transfer.
 *
 *  Returns 0 on success, or a standard kernel error code on failure.
 */
static int vme_dma_setup(struct dma_channel *channel, int to_user)
{
	int rc = 0;
	struct vme_dma *desc = &channel->desc;
	unsigned int length = desc->length;
	unsigned int uaddr;
	int nr_pages;

	/* Create the scatter gather list */
	uaddr = (desc->dir == VME_DMA_TO_DEVICE) ?
		desc->src.addrl : desc->dst.addrl;

	/* Check for overflow */
	if ((uaddr + length) < uaddr)
		return -EINVAL;

	nr_pages = ((uaddr & ~PAGE_MASK) + length + ~PAGE_MASK) >> PAGE_SHIFT;

	if ((channel->sgl = kmalloc(nr_pages * sizeof(struct scatterlist),
				    GFP_KERNEL)) == NULL)
		return -ENOMEM;

	/* Map the user pages into the scatter gather list */
	channel->sg_pages = sgl_map_user_pages(channel->sgl, nr_pages, uaddr,
					       length,
					       (desc->dir==VME_DMA_FROM_DEVICE),
					       to_user);

	if (channel->sg_pages <= 0) {
		rc = channel->sg_pages;
		goto out_free_sgl;
	}

	/* Map the sg list entries onto the PCI bus */
	channel->sg_mapped = pci_map_sg(vme_bridge->pdev, channel->sgl,
					channel->sg_pages, desc->dir);

	rc = tsi148_dma_setup(channel);


	if (rc)
		goto out_unmap_sgl;

	return 0;

out_unmap_sgl:
	pci_unmap_sg(vme_bridge->pdev, channel->sgl, channel->sg_mapped,
		     desc->dir);

	sgl_unmap_user_pages(channel->sgl, channel->sg_pages, 0, to_user);

out_free_sgl:
	kfree(channel->sgl);

	return rc;
}
예제 #10
0
파일: dma.c 프로젝트: 12019/hg556a_source
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction direction)
{
	if (dev->bus == &pci_bus_type)
		return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
	if (dev->bus == &vio_bus_type)
		return vio_map_sg(to_vio_dev(dev), sg, nents, direction);
	BUG();
	return 0;
}
예제 #11
0
/* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
cumanascsi_2_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
	int dmach = host->dma_channel;

	outb(ALATCH_DIS_DMA, info->alatch);

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int bufs, map_dir, dma_dir, alatch_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = PCI_DMA_TODEVICE,
			dma_dir = DMA_MODE_WRITE,
			alatch_dir = ALATCH_DMA_OUT;
		else
			map_dir = PCI_DMA_FROMDEVICE,
			dma_dir = DMA_MODE_READ,
			alatch_dir = ALATCH_DMA_IN;

		pci_map_sg(NULL, info->sg, bufs, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs);
		outb(alatch_dir, info->alatch);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		outb(ALATCH_ENA_DMA, info->alatch);
		outb(ALATCH_DIS_BIT32, info->alatch);
		return fasdma_real_all;
	}

	/*
	 * If we're not doing DMA,
	 *  we'll do pseudo DMA
	 */
	return fasdma_pio;
}
예제 #12
0
/**
 * 	Copied from drivers/ide/ide-dma.c
 *	sgiioc4_ide_raw_build_sglist	-	map IDE scatter gather for DMA
 *	@hwif: the interface to build the DMA table for
 *	@rq: the request holding the sg list
 *
 *	Perform the PCI mapping magic neccessary to access the source or
 *	target buffers of a taskfile request via PCI DMA. The lower layers
 *	of the  kernel provide the neccessary cache management so that we can
 *	operate in a portable fashion
 *
 *	This code is identical to ide_raw_build_sglist in ide-dma.c
 *	however that it not exported and even if it were would create
 *	dependancy problems for modular drivers.
 */
static int
sgiioc4_ide_raw_build_sglist(ide_hwif_t * hwif, struct request *rq)
{
	struct scatterlist *sg = hwif->sg_table;
	int nents = 0;
	ide_task_t *args = rq->special;
	u8 *virt_addr = rq->buffer;
	int sector_count = rq->nr_sectors;

	if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
		hwif->sg_dma_direction = PCI_DMA_TODEVICE;
	else
		hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
#if 1
	if (sector_count > 128) {
		memset(&sg[nents], 0, sizeof (*sg));
		sg[nents].address = virt_addr;
		sg[nents].length = 128 * SECTOR_SIZE;
		nents++;
		virt_addr = virt_addr + (128 * SECTOR_SIZE);
		sector_count -= 128;
	}
	memset(&sg[nents], 0, sizeof (*sg));
	sg[nents].address = virt_addr;
	sg[nents].length = sector_count * SECTOR_SIZE;
	nents++;
#else
	while (sector_count > 128) {
		memset(&sg[nents], 0, sizeof (*sg));
		sg[nents].address = virt_addr;
		sg[nents].length = 128 * SECTOR_SIZE;
		nents++;
		virt_addr = virt_addr + (128 * SECTOR_SIZE);
		sector_count -= 128;
	};
	memset(&sg[nents], 0, sizeof (*sg));
	sg[nents].address = virt_addr;
	sg[nents].length = sector_count * SECTOR_SIZE;
	nents++;
#endif
	return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
}
예제 #13
0
int videobuf_dma_map(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
{
	void                   *dev=q->dev;

	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
	BUG_ON(0 == dma->nr_pages);

	if (dma->pages) {
		dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
						   dma->offset);
	}
	if (dma->vmalloc) {
		dma->sglist = videobuf_vmalloc_to_sg
						(dma->vmalloc,dma->nr_pages);
	}
	if (dma->bus_addr) {
		dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL);
		if (NULL != dma->sglist) {
			dma->sglen  = 1;
			sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
			dma->sglist[0].offset           = dma->bus_addr & ~PAGE_MASK;
			sg_dma_len(&dma->sglist[0])     = dma->nr_pages * PAGE_SIZE;
		}
	}
	if (NULL == dma->sglist) {
		dprintk(1,"scatterlist is NULL\n");
		return -ENOMEM;
	}
	if (!dma->bus_addr) {
		dma->sglen = pci_map_sg(dev,dma->sglist,
					dma->nr_pages, dma->direction);
		if (0 == dma->sglen) {
			printk(KERN_WARNING
			       "%s: videobuf_map_sg failed\n",__FUNCTION__);
			kfree(dma->sglist);
			dma->sglist = NULL;
			dma->sglen = 0;
			return -EIO;
		}
	}
	return 0;
}
예제 #14
0
uint BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
	int dir;

	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;

#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
	if (dmah != NULL) {
		int32 nsegs, i, totsegs = 0, totlen = 0;
		struct scatterlist *sg, _sg[16];
		struct sk_buff *skb;
		for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
			sg = &_sg[totsegs];
			if (skb_is_nonlinear(skb)) {
				nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
				ASSERT((nsegs > 0) && (nsegs <= 16));
				pci_map_sg(osh->pdev, sg, nsegs, dir);
			} else {
				nsegs = 1;
				sg->page_link = 0;
				sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));

				pci_map_single(osh->pdev, PKTDATA(osh, skb),
				    PKTISCTF(osh, skb) ? CTFMAPSZ : PKTLEN(osh, skb), dir);
			}
			totsegs += nsegs;
			totlen += PKTLEN(osh, skb);
		}
		dmah->nsegs = totsegs;
		dmah->origsize = totlen;
		for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
			dmah->segs[i].addr = sg_phys(sg);
			dmah->segs[i].length = sg->length;
		}
		return dmah->segs[0].addr;
	}
#endif

	return (pci_map_single(osh->pdev, va, size, dir));
}
예제 #15
0
파일: eesox.c 프로젝트: fgeraci/cs518-sched
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
eesoxscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	EESOXScsi_Info *info = (EESOXScsi_Info *)host->hostdata;
	int dmach = host->dma_channel;

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int bufs = SCp->buffers_residual;
		int pci_dir, dma_dir;

		if (bufs)
			memcpy(info->sg + 1, SCp->buffer + 1,
				sizeof(struct scatterlist) * bufs);
		info->sg[0].address = SCp->ptr;
		info->sg[0].page    = NULL;
		info->sg[0].length  = SCp->this_residual;

		if (direction == DMA_OUT)
			pci_dir = PCI_DMA_TODEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			pci_dir = PCI_DMA_FROMDEVICE,
			dma_dir = DMA_MODE_READ;

		pci_map_sg(NULL, info->sg, bufs + 1, pci_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs + 1);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}
	/*
	 * We don't do DMA, we only do slow PIO
	 *
	 * Some day, we will do Pseudo DMA
	 */
	return fasdma_pseudo;
}
예제 #16
0
void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt)
{
    int pages = (length+PAGE_SIZE-1)/PAGE_SIZE;
    void *mem = vmalloc_32(length);
    int slen = 0;

    if (NULL == mem)
        goto err_null;

    if (!(pt->slist = vmalloc_to_sg(mem, pages)))
        goto err_free_mem;

    if (saa7146_pgtable_alloc(pci, pt))
        goto err_free_slist;

    pt->nents = pages;
    slen = pci_map_sg(pci,pt->slist,pt->nents,PCI_DMA_FROMDEVICE);
    if (0 == slen)
        goto err_free_pgtable;

    if (0 != saa7146_pgtable_build_single(pci, pt, pt->slist, slen))
        goto err_unmap_sg;

    return mem;

err_unmap_sg:
    pci_unmap_sg(pci, pt->slist, pt->nents, PCI_DMA_FROMDEVICE);
err_free_pgtable:
    saa7146_pgtable_free(pci, pt);
err_free_slist:
    kfree(pt->slist);
    pt->slist = NULL;
err_free_mem:
    vfree(mem);
err_null:
    return NULL;
}
예제 #17
0
/**
 * crystalhd_map_dio - Map user address for DMA
 * @adp:	Adapter instance
 * @ubuff:	User buffer to map.
 * @ubuff_sz:	User buffer size.
 * @uv_offset:	UV buffer offset.
 * @en_422mode: TRUE:422 FALSE:420 Capture mode.
 * @dir_tx:	TRUE for Tx (To device from host)
 * @dio_hnd:	Handle to mapped DIO request.
 *
 * Return:
 *	Status.
 *
 * This routine maps user address and lock pages for DMA.
 *
 */
BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
			  uint32_t ubuff_sz, uint32_t uv_offset,
			  bool en_422mode, bool dir_tx,
			  struct crystalhd_dio_req **dio_hnd)
{
	struct device *dev;
	struct crystalhd_dio_req	*dio;
	uint32_t start = 0, end = 0, count = 0;
	uint32_t spsz = 0;
	unsigned long uaddr = 0, uv_start = 0;
	int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0;

	if (!adp || !ubuff || !ubuff_sz || !dio_hnd) {
		printk(KERN_ERR "%s: Invalid arg\n", __func__);
		return BC_STS_INV_ARG;
	}

	dev = &adp->pdev->dev;

	/* Compute pages */
	uaddr = (unsigned long)ubuff;
	count = ubuff_sz;
	end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
	start = uaddr >> PAGE_SHIFT;
	nr_pages = end - start;

	if (!count || ((uaddr + count) < uaddr)) {
		dev_err(dev, "User addr overflow!!\n");
		return BC_STS_INV_ARG;
	}

	dio = crystalhd_alloc_dio(adp);
	if (!dio) {
		dev_err(dev, "dio pool empty..\n");
		return BC_STS_INSUFF_RES;
	}

	if (dir_tx) {
		rw = WRITE;
		dio->direction = DMA_TO_DEVICE;
	} else {
		rw = READ;
		dio->direction = DMA_FROM_DEVICE;
	}

	if (nr_pages > dio->max_pages) {
		dev_err(dev, "max_pages(%d) exceeded(%d)!!\n",
			dio->max_pages, nr_pages);
		crystalhd_unmap_dio(adp, dio);
		return BC_STS_INSUFF_RES;
	}

	if (uv_offset) {
		uv_start = (uaddr + uv_offset)  >> PAGE_SHIFT;
		dio->uinfo.uv_sg_ix = uv_start - start;
		dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & ~PAGE_MASK);
	}

	dio->fb_size = ubuff_sz & 0x03;
	if (dio->fb_size) {
		res = copy_from_user(dio->fb_va,
				     (void *)(uaddr + count - dio->fb_size),
				     dio->fb_size);
		if (res) {
			dev_err(dev, "failed %d to copy %u fill bytes from %p\n",
				res, dio->fb_size,
				(void *)(uaddr + count-dio->fb_size));
			crystalhd_unmap_dio(adp, dio);
			return BC_STS_INSUFF_RES;
		}
	}

	down_read(&current->mm->mmap_sem);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0)
	res = get_user_pages(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
			     dio->pages, NULL);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
	res = get_user_pages_remote(current, current->mm, uaddr, nr_pages, rw == READ,
			     0, dio->pages, NULL);
#else
	res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ,
			     0, dio->pages, NULL);
#endif

	up_read(&current->mm->mmap_sem);

	/* Save for release..*/
	dio->sig = crystalhd_dio_locked;
	if (res < nr_pages) {
		dev_err(dev, "get pages failed: %d-%d\n", nr_pages, res);
		dio->page_cnt = res;
		crystalhd_unmap_dio(adp, dio);
		return BC_STS_ERROR;
	}

	dio->page_cnt = nr_pages;
	/* Get scatter/gather */
	crystalhd_init_sg(dio->sg, dio->page_cnt);
	crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK);
	if (nr_pages > 1) {
		dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
		dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
		count -= dio->sg[0].length;
		for (i = 1; i < nr_pages; i++) {
			if (count < 4) {
				spsz = count;
				skip_fb_sg = 1;
			} else {
				spsz = (count < PAGE_SIZE) ?
					(count & ~0x03) : PAGE_SIZE;
			}
			crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0);
			count -= spsz;
		}
	} else {
		if (count < 4) {
			dio->sg[0].length = count;
			skip_fb_sg = 1;
		} else {
			dio->sg[0].length = count - dio->fb_size;
		}
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
		dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
	}
	dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg,
				 dio->page_cnt, dio->direction);
	if (dio->sg_cnt <= 0) {
		dev_err(dev, "sg map %d-%d\n", dio->sg_cnt, dio->page_cnt);
		crystalhd_unmap_dio(adp, dio);
		return BC_STS_ERROR;
	}
	if (dio->sg_cnt && skip_fb_sg)
		dio->sg_cnt -= 1;
	dio->sig = crystalhd_dio_sg_mapped;
	/* Fill in User info.. */
	dio->uinfo.xfr_len   = ubuff_sz;
	dio->uinfo.xfr_buff  = ubuff;
	dio->uinfo.uv_offset = uv_offset;
	dio->uinfo.b422mode  = en_422mode;
	dio->uinfo.dir_tx    = dir_tx;

	*dio_hnd = dio;

	return BC_STS_SUCCESS;
}
예제 #18
0
파일: skd_main.c 프로젝트: GavinHwa/linux
static int skd_preop_sg_list(struct skd_device *skdev,
			     struct skd_request_context *skreq)
{
	struct request *req = skreq->req;
	int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
	int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
	struct scatterlist *sg = &skreq->sg[0];
	int n_sg;
	int i;

	skreq->sg_byte_count = 0;

	/* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
		   skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */

	n_sg = blk_rq_map_sg(skdev->queue, req, sg);
	if (n_sg <= 0)
		return -EINVAL;

	/*
	 * Map scatterlist to PCI bus addresses.
	 * Note PCI might change the number of entries.
	 */
	n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
	if (n_sg <= 0)
		return -EINVAL;

	SKD_ASSERT(n_sg <= skdev->sgs_per_request);

	skreq->n_sg = n_sg;

	for (i = 0; i < n_sg; i++) {
		struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
		u32 cnt = sg_dma_len(&sg[i]);
		uint64_t dma_addr = sg_dma_address(&sg[i]);

		sgd->control = FIT_SGD_CONTROL_NOT_LAST;
		sgd->byte_count = cnt;
		skreq->sg_byte_count += cnt;
		sgd->host_side_addr = dma_addr;
		sgd->dev_side_addr = 0;
	}

	skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
	skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;

	if (unlikely(skdev->dbg_level > 1)) {
		pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
			 skdev->name, __func__, __LINE__,
			 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
		for (i = 0; i < n_sg; i++) {
			struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
			pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
				 "addr=0x%llx next=0x%llx\n",
				 skdev->name, __func__, __LINE__,
				 i, sgd->byte_count, sgd->control,
				 sgd->host_side_addr, sgd->next_desc_ptr);
		}
	}

	return 0;
}
예제 #19
0
파일: qla_iocb.c 프로젝트: rcplay/snake-os
/**
 * qla24xx_start_scsi() - Send a SCSI command to the ISP
 * @sp: command to send to the ISP
 *
 * Returns non-zero if a failure occured, else zero.
 */
int
qla24xx_start_scsi(srb_t *sp)
{
    int		ret;
    unsigned long   flags;
    scsi_qla_host_t	*ha;
    struct scsi_cmnd *cmd;
    uint32_t	*clr_ptr;
    uint32_t        index;
    uint32_t	handle;
    struct cmd_type_7 *cmd_pkt;
    struct scatterlist *sg;
    uint16_t	cnt;
    uint16_t	req_cnt;
    uint16_t	tot_dsds;
    struct device_reg_24xx __iomem *reg;

    /* Setup device pointers. */
    ret = 0;
    ha = sp->ha;
    reg = &ha->iobase->isp24;
    cmd = sp->cmd;
    /* So we know we haven't pci_map'ed anything yet */
    tot_dsds = 0;

    /* Send marker if required */
    if (ha->marker_needed != 0) {
        if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
            return QLA_FUNCTION_FAILED;
        }
        ha->marker_needed = 0;
    }

    /* Acquire ring specific lock */
    spin_lock_irqsave(&ha->hardware_lock, flags);

    /* Check for room in outstanding command list. */
    handle = ha->current_outstanding_cmd;
    for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
        handle++;
        if (handle == MAX_OUTSTANDING_COMMANDS)
            handle = 1;
        if (ha->outstanding_cmds[handle] == 0)
            break;
    }
    if (index == MAX_OUTSTANDING_COMMANDS)
        goto queuing_error;

    /* Map the sg table so we have an accurate count of sg entries needed */
    if (cmd->use_sg) {
        sg = (struct scatterlist *) cmd->request_buffer;
        tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
                              cmd->sc_data_direction);
        if (tot_dsds == 0)
            goto queuing_error;
    } else if (cmd->request_bufflen) {
        dma_addr_t      req_dma;

        req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
                                 cmd->request_bufflen, cmd->sc_data_direction);
        if (dma_mapping_error(req_dma))
            goto queuing_error;

        sp->dma_handle = req_dma;
        tot_dsds = 1;
    }

    req_cnt = qla24xx_calc_iocbs(tot_dsds);
    if (ha->req_q_cnt < (req_cnt + 2)) {
        cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
        if (ha->req_ring_index < cnt)
            ha->req_q_cnt = cnt - ha->req_ring_index;
        else
            ha->req_q_cnt = ha->request_q_length -
                            (ha->req_ring_index - cnt);
    }
    if (ha->req_q_cnt < (req_cnt + 2))
        goto queuing_error;

    /* Build command packet. */
    ha->current_outstanding_cmd = handle;
    ha->outstanding_cmds[handle] = sp;
    sp->ha = ha;
    sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
    ha->req_q_cnt -= req_cnt;

    cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
    cmd_pkt->handle = handle;

    /* Zero out remaining portion of packet. */
    /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
    clr_ptr = (uint32_t *)cmd_pkt + 2;
    memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
    cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);

    /* Set NPORT-ID and LUN number*/
    cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
    cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
    cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
    cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;

    int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
    host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));

    /* Load SCSI command packet. */
    memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
    host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));

    cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);

    /* Build IOCB segments */
    qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);

    /* Set total data segment count. */
    cmd_pkt->entry_count = (uint8_t)req_cnt;
    wmb();

    /* Adjust ring index. */
    ha->req_ring_index++;
    if (ha->req_ring_index == ha->request_q_length) {
        ha->req_ring_index = 0;
        ha->request_ring_ptr = ha->request_ring;
    } else
        ha->request_ring_ptr++;

    sp->flags |= SRB_DMA_VALID;
    sp->state = SRB_ACTIVE_STATE;

    /* Set chip new ring index. */
    WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
    RD_REG_DWORD_RELAXED(&reg->req_q_in);		/* PCI Posting. */

    /* Manage unprocessed RIO/ZIO commands in response queue. */
    if (ha->flags.process_response_queue &&
            ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
        qla24xx_process_response_queue(ha);

    spin_unlock_irqrestore(&ha->hardware_lock, flags);
    return QLA_SUCCESS;

queuing_error:
    if (cmd->use_sg && tot_dsds) {
        sg = (struct scatterlist *) cmd->request_buffer;
        pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
                     cmd->sc_data_direction);
    } else if (tot_dsds) {
        pci_unmap_single(ha->pdev, sp->dma_handle,
                         cmd->request_bufflen, cmd->sc_data_direction);
    }
    spin_unlock_irqrestore(&ha->hardware_lock, flags);

    return QLA_FUNCTION_FAILED;
}
예제 #20
0
static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
				 struct ivtv_dma_frame *args)
{
	struct ivtv_dma_page_info y_dma;
	struct ivtv_dma_page_info uv_dma;

	int i;
	int y_pages, uv_pages;

	unsigned long y_buffer_offset, uv_buffer_offset;
	int y_decode_height, uv_decode_height, y_size;
	int frame = atomic_read(&itv->yuv_info.next_fill_frame);

	y_buffer_offset = IVTV_DEC_MEM_START + yuv_offset[frame];
	uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET;

	y_decode_height = uv_decode_height = args->src.height + args->src.top;

	if (y_decode_height < 512-16)
		y_buffer_offset += 720 * 16;

	if (y_decode_height & 15)
		y_decode_height = (y_decode_height + 16) & ~15;

	if (uv_decode_height & 31)
		uv_decode_height = (uv_decode_height + 32) & ~31;

	y_size = 720 * y_decode_height;

	/* Still in USE */
	if (dma->SG_length || dma->page_count) {
		IVTV_DEBUG_WARN("prep_user_dma: SG_length %d page_count %d still full?\n",
				dma->SG_length, dma->page_count);
		return -EBUSY;
	}

	ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
	ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);

	/* Get user pages for DMA Xfer */
	down_read(&current->mm->mmap_sem);
	y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], NULL);
	uv_pages = get_user_pages(current, current->mm, uv_dma.uaddr, uv_dma.page_count, 0, 1, &dma->map[y_pages], NULL);
	up_read(&current->mm->mmap_sem);

	dma->page_count = y_dma.page_count + uv_dma.page_count;

	if (y_pages + uv_pages != dma->page_count) {
		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
				y_pages + uv_pages, dma->page_count);

		for (i = 0; i < dma->page_count; i++) {
			put_page(dma->map[i]);
		}
		dma->page_count = 0;
		return -EINVAL;
	}

	/* Fill & map SG List */
	ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0));
	dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);

	/* Fill SG Array with new values */
	ivtv_udma_fill_sg_array (dma, y_buffer_offset, uv_buffer_offset, y_size);

	/* If we've offset the y plane, ensure top area is blanked */
	if (args->src.height + args->src.top < 512-16) {
		if (itv->yuv_info.blanking_dmaptr) {
			dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
			dma->SGarray[dma->SG_length].src = cpu_to_le32(itv->yuv_info.blanking_dmaptr);
			dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DEC_MEM_START + yuv_offset[frame]);
			dma->SG_length++;
		}
	}

	/* Tag SG Array with Interrupt Bit */
	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);

	ivtv_udma_sync_for_device(itv);
	return 0;
}
예제 #21
0
파일: ivtv-yuv.c 프로젝트: acton393/linux
static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
				  struct ivtv_dma_frame *args)
{
	struct ivtv_dma_page_info y_dma;
	struct ivtv_dma_page_info uv_dma;
	struct yuv_playback_info *yi = &itv->yuv_info;
	u8 frame = yi->draw_frame;
	struct yuv_frame_info *f = &yi->new_frame_info[frame];
	int i;
	int y_pages, uv_pages;
	unsigned long y_buffer_offset, uv_buffer_offset;
	int y_decode_height, uv_decode_height, y_size;

	y_buffer_offset = IVTV_DECODER_OFFSET + yuv_offset[frame];
	uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET;

	y_decode_height = uv_decode_height = f->src_h + f->src_y;

	if (f->offset_y)
		y_buffer_offset += 720 * 16;

	if (y_decode_height & 15)
		y_decode_height = (y_decode_height + 16) & ~15;

	if (uv_decode_height & 31)
		uv_decode_height = (uv_decode_height + 32) & ~31;

	y_size = 720 * y_decode_height;

	/* Still in USE */
	if (dma->SG_length || dma->page_count) {
		IVTV_DEBUG_WARN
		    ("prep_user_dma: SG_length %d page_count %d still full?\n",
		     dma->SG_length, dma->page_count);
		return -EBUSY;
	}

	ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
	ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);

	/* Get user pages for DMA Xfer */
	y_pages = get_user_pages_unlocked(y_dma.uaddr,
			y_dma.page_count, &dma->map[0], FOLL_FORCE);
	uv_pages = 0; /* silence gcc. value is set and consumed only if: */
	if (y_pages == y_dma.page_count) {
		uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
				uv_dma.page_count, &dma->map[y_pages],
				FOLL_FORCE);
	}

	if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
		int rc = -EFAULT;

		if (y_pages == y_dma.page_count) {
			IVTV_DEBUG_WARN
				("failed to map uv user pages, returned %d "
				 "expecting %d\n", uv_pages, uv_dma.page_count);

			if (uv_pages >= 0) {
				for (i = 0; i < uv_pages; i++)
					put_page(dma->map[y_pages + i]);
				rc = -EFAULT;
			} else {
				rc = uv_pages;
			}
		} else {
			IVTV_DEBUG_WARN
				("failed to map y user pages, returned %d "
				 "expecting %d\n", y_pages, y_dma.page_count);
		}
		if (y_pages >= 0) {
			for (i = 0; i < y_pages; i++)
				put_page(dma->map[i]);
			/*
			 * Inherit the -EFAULT from rc's
			 * initialization, but allow it to be
			 * overriden by uv_pages above if it was an
			 * actual errno.
			 */
		} else {
			rc = y_pages;
		}
		return rc;
	}

	dma->page_count = y_pages + uv_pages;

	/* Fill & map SG List */
	if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) {
		IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");
		for (i = 0; i < dma->page_count; i++) {
			put_page(dma->map[i]);
		}
		dma->page_count = 0;
		return -ENOMEM;
	}
	dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);

	/* Fill SG Array with new values */
	ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);

	/* If we've offset the y plane, ensure top area is blanked */
	if (f->offset_y && yi->blanking_dmaptr) {
		dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
		dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
		dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
		dma->SG_length++;
	}

	/* Tag SG Array with Interrupt Bit */
	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);

	ivtv_udma_sync_for_device(itv);
	return 0;
}
예제 #22
0
/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl || !sgc)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid >= IXGBE_FCOE_DDP_MAX) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	if (!fcoe->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		return 0;
	}

	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
			xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);

	/* setup dma from scsi command sgl */
	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		return 0;
	}

	/* alloc the udl from our ddp pool */
	ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
				      "not enough descriptors\n",
					xid, i, j, dmacount, (u64)addr);
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
				goto out_noddp_free;
			/*
			 * all but the last buffer
			 * ((i == (dmacount - 1)) && (thislen == len))
			 * must end at bufflen
			 */
			if (((i != (dmacount - 1)) || (thislen != len))
			    && ((thislen + thisoff) != bufflen))
				goto out_noddp_free;

			ddp->udl[j] = (u64)(addr - thisoff);
			/* only the first buffer may have none-zero offset */
			if (j == 0)
				firstoff = thisoff;
			len -= thislen;
			addr += thislen;
			j++;
		}
	}
예제 #23
0
/**
 * qla4xxx_send_command_to_isp - issues command to HBA
 * @ha: pointer to host adapter structure.
 * @srb: pointer to SCSI Request Block to be sent to ISP
 *
 * This routine is called by qla4xxx_queuecommand to build an ISP
 * command and pass it to the ISP for execution.
 **/
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
{
	struct scsi_cmnd *cmd = srb->cmd;
	struct ddb_entry *ddb_entry;
	struct command_t3_entry *cmd_entry;
	struct scatterlist *sg = NULL;

	uint16_t tot_dsds;
	uint16_t req_cnt;

	unsigned long flags;
	uint16_t cnt;
	uint32_t index;
	char tag[2];

	/* Get real lun and adapter */
	ddb_entry = srb->ddb;

	/* Send marker(s) if needed. */
	if (ha->marker_needed == 1) {
		if (qla4xxx_send_marker_iocb(ha, ddb_entry,
					     cmd->device->lun) != QLA_SUCCESS)
			return QLA_ERROR;

		ha->marker_needed = 0;
	}
	tot_dsds = 0;

	/* Acquire hardware specific lock */
	spin_lock_irqsave(&ha->hardware_lock, flags);

	index = (uint32_t)cmd->request->tag;

	/* Calculate the number of request entries needed. */
	if (cmd->use_sg) {
		sg = (struct scatterlist *)cmd->request_buffer;
		tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
				      cmd->sc_data_direction);
		if (tot_dsds == 0)
			goto queuing_error;
	} else if (cmd->request_bufflen) {
		dma_addr_t	req_dma;

		req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
					 cmd->request_bufflen,
					 cmd->sc_data_direction);
		if (dma_mapping_error(req_dma))
			goto queuing_error;

		srb->dma_handle = req_dma;
		tot_dsds = 1;
	}
	req_cnt = qla4xxx_calc_request_entries(tot_dsds);

	if (ha->req_q_count < (req_cnt + 2)) {
		cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
		if (ha->request_in < cnt)
			ha->req_q_count = cnt - ha->request_in;
		else
			ha->req_q_count = REQUEST_QUEUE_DEPTH -
				(ha->request_in - cnt);
	}

	if (ha->req_q_count < (req_cnt + 2))
		goto queuing_error;

	/* total iocbs active */
	if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
		goto queuing_error;

	/* Build command packet */
	cmd_entry = (struct command_t3_entry *) ha->request_ptr;
	memset(cmd_entry, 0, sizeof(struct command_t3_entry));
	cmd_entry->hdr.entryType = ET_COMMAND;
	cmd_entry->handle = cpu_to_le32(index);
	cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
	cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);

	int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
	cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
	cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
	memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
	cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
	cmd_entry->hdr.entryCount = req_cnt;

	/* Set data transfer direction control flags
	 * NOTE: Look at data_direction bits iff there is data to be
	 *	 transferred, as the data direction bit is sometimed filled
	 *	 in when there is no data to be transferred */
	cmd_entry->control_flags = CF_NO_DATA;
	if (cmd->request_bufflen) {
		if (cmd->sc_data_direction == DMA_TO_DEVICE)
			cmd_entry->control_flags = CF_WRITE;
		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
			cmd_entry->control_flags = CF_READ;

		ha->bytes_xfered += cmd->request_bufflen;
		if (ha->bytes_xfered & ~0xFFFFF){
			ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
			ha->bytes_xfered &= 0xFFFFF;
		}
	}
예제 #24
0
static inline int asd_map_scatterlist(struct sas_task *task,
				      struct sg_el *sg_arr,
				      gfp_t gfp_flags)
{
	struct asd_ascb *ascb = task->lldd_task;
	struct asd_ha_struct *asd_ha = ascb->ha;
	struct scatterlist *sc;
	int num_sg, res;

	if (task->data_dir == PCI_DMA_NONE)
		return 0;

	if (task->num_scatter == 0) {
		void *p = task->scatter;
		dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
						task->total_xfer_len,
						task->data_dir);
		sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
		sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
		sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
		return 0;
	}

	num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
			    task->data_dir);
	if (num_sg == 0)
		return -ENOMEM;

	if (num_sg > 3) {
		int i;

		ascb->sg_arr = asd_alloc_coherent(asd_ha,
						  num_sg*sizeof(struct sg_el),
						  gfp_flags);
		if (!ascb->sg_arr) {
			res = -ENOMEM;
			goto err_unmap;
		}
		for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
			struct sg_el *sg =
				&((struct sg_el *)ascb->sg_arr->vaddr)[i];
			sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
			sg->size = cpu_to_le32((u32)sg_dma_len(sc));
			if (i == num_sg-1)
				sg->flags |= ASD_SG_EL_LIST_EOL;
		}

		for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
			sg_arr[i].bus_addr =
				cpu_to_le64((u64)sg_dma_address(sc));
			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
		}
		sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
		sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;

		memset(&sg_arr[2], 0, sizeof(*sg_arr));
		sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
	} else {
		int i;
		for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
			sg_arr[i].bus_addr =
				cpu_to_le64((u64)sg_dma_address(sc));
			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
		}
		sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
	}

	return 0;
err_unmap:
	pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
		     task->data_dir);
	return res;
}
예제 #25
0
/**
 * crystalhd_dioq_fetch_wait - Fetch element from Head.
 * @ioq: DIO queue instance
 * @to_secs: Wait timeout in seconds..
 *
 * Return:
 *	element from the head..
 *
 * Return element from head if Q is not empty. Wait for new element
 * if Q is empty for Timeout seconds.
 */
void *crystalhd_dioq_fetch_wait(struct crystalhd_hw *hw, uint32_t to_secs, uint32_t *sig_pend)
{
    struct device *dev = chddev();
    unsigned long flags = 0;
    int rc = 0;

    crystalhd_rx_dma_pkt *r_pkt = NULL;
    crystalhd_dioq_t *ioq = hw->rx_rdyq;
    uint32_t picYcomp = 0;

    unsigned long fetchTimeout = jiffies + msecs_to_jiffies(to_secs * 1000);

    if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) {
        dev_err(dev, "%s: Invalid arg\n", __func__);
        return r_pkt;
    }

    spin_lock_irqsave(&ioq->lock, flags);
#ifndef __APPLE__
    while (!time_after_eq(jiffies, fetchTimeout)) {
#else
    while (fetchTimeout >= jiffies) {
#endif
        if(ioq->count == 0) {
            spin_unlock_irqrestore(&ioq->lock, flags);
            crystalhd_wait_on_event(&ioq->event, (ioq->count > 0),
                                    250, rc, false);
        }
        else
            spin_unlock_irqrestore(&ioq->lock, flags);
        if (rc == 0) {
            // Found a packet. Check if it is a repeated picture or not
            // Drop the picture if it is a repeated picture
            // Lock against checks from get status calls
            if(down_interruptible(&hw->fetch_sem))
                goto sem_error;
#ifndef __APPLE__
            r_pkt = crystalhd_dioq_fetch(ioq);
#else
            r_pkt = (crystalhd_rx_dma_pkt*)crystalhd_dioq_fetch(ioq);
#endif
            // If format change packet, then return with out checking anything
            if (r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE))
                goto sem_rel_return;
            if (hw->adp->pdev->device == BC_PCI_DEVID_LINK) {
                picYcomp = link_GetRptDropParam(hw, hw->PICHeight, hw->PICWidth, (void *)r_pkt);
            }
            else {
                // For Flea, we don't have the width and height handy since they
                // come in the PIB in the picture, so this function will also
                // populate the width and height
                picYcomp = flea_GetRptDropParam(hw, (void *)r_pkt);
                // For flea it is the above function that indicated format change
                if(r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE))
                    goto sem_rel_return;
            }
            if(!picYcomp || (picYcomp == hw->LastPicNo) ||
                    (picYcomp == hw->LastTwoPicNo)) {
                //Discard picture
                if(picYcomp != 0) {
                    hw->LastTwoPicNo = hw->LastPicNo;
                    hw->LastPicNo = picYcomp;
                }
                crystalhd_dioq_add(hw->rx_freeq, r_pkt, false, r_pkt->pkt_tag);
                r_pkt = NULL;
                up(&hw->fetch_sem);
            } else {
                if(hw->adp->pdev->device == BC_PCI_DEVID_LINK) {
                    if((picYcomp - hw->LastPicNo) > 1) {
                        dev_info(dev, "MISSING %u PICTURES\n", (picYcomp - hw->LastPicNo));
                    }
                }
                hw->LastTwoPicNo = hw->LastPicNo;
                hw->LastPicNo = picYcomp;
                goto sem_rel_return;
            }
        } else if (rc == -EINTR) {
            *sig_pend = 1;
            return NULL;
        }
        spin_lock_irqsave(&ioq->lock, flags);
    }
    dev_info(dev, "FETCH TIMEOUT\n");
    spin_unlock_irqrestore(&ioq->lock, flags);
    return r_pkt;
sem_error:
    return NULL;
sem_rel_return:
    up(&hw->fetch_sem);
    return r_pkt;
}

#ifdef __APPLE__
static bool CustomSegmentFunction(IODMACommand *target, IODMACommand::Segment64 segment, void *sglMem, UInt32 segmentIndex)
{
    struct scatterlist *sg = (scatterlist*)sglMem;
    sg[segmentIndex].dma_address = (uint32_t)segment.fIOVMAddr;
    sg[segmentIndex].dma_length = (unsigned int)segment.fLength;
    //MPCLOG(MPCLOG_DBG,"CustomSegmentFunction: 0x%X/%d/%d\n",(unsigned int)segment.fIOVMAddr,(unsigned int)segment.fLength, (unsigned int)segmentIndex);
    return true;
}
#endif
/**
 * crystalhd_map_dio - Map user address for DMA
 * @adp:	Adapter instance
 * @ubuff:	User buffer to map.
 * @ubuff_sz:	User buffer size.
 * @uv_offset:	UV buffer offset.
 * @en_422mode: TRUE:422 FALSE:420 Capture mode.
 * @dir_tx:	TRUE for Tx (To device from host)
 * @dio_hnd:	Handle to mapped DIO request.
 *
 * Return:
 *	Status.
 *
 * This routine maps user address and lock pages for DMA.
 *
 */
BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
                            uint32_t ubuff_sz, uint32_t uv_offset,
                            bool en_422mode, bool dir_tx,
                            crystalhd_dio_req **dio_hnd)
{
    struct device *dev;
    crystalhd_dio_req	*dio;
    uint32_t start = 0, end = 0, count = 0;
#ifndef __APPLE__
    uint32_t spsz = 0;
    unsigned long uaddr = 0, uv_start = 0;
    int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0;
#else
    unsigned long uaddr = 0, uv_start = 0;
    int rw = 0;
    uint32_t nr_pages = 0;
#endif

    if (!adp || !ubuff || !ubuff_sz || !dio_hnd) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return BC_STS_INV_ARG;
    }

    dev = &adp->pdev->dev;

    /* Compute pages */
    uaddr = (unsigned long)ubuff;
    count = ubuff_sz;
    end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
    start = uaddr >> PAGE_SHIFT;
    nr_pages = end - start;

    if (!count || ((uaddr + count) < uaddr)) {
        dev_err(dev, "User addr overflow!!\n");
        return BC_STS_INV_ARG;
    }

    dio = crystalhd_alloc_dio(adp);
    if (!dio) {
        dev_err(dev, "dio pool empty..\n");
        return BC_STS_INSUFF_RES;
    }

    if (dir_tx) {
        rw = WRITE;
        dio->direction = DMA_TO_DEVICE;
    } else {
        rw = READ;
        dio->direction = DMA_FROM_DEVICE;
    }

    if (nr_pages > dio->max_pages) {
        dev_err(dev, "max_pages(%d) exceeded(%d)!!\n",
                dio->max_pages, nr_pages);
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_INSUFF_RES;
    }

#ifndef __APPLE__
    if (uv_offset) {
        uv_start = (uaddr + uv_offset)  >> PAGE_SHIFT;
        dio->uinfo.uv_sg_ix = uv_start - start;
        dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & ~PAGE_MASK);
    }

    dio->fb_size = ubuff_sz & 0x03;
    if (dio->fb_size) {
        res = copy_from_user(dio->fb_va,
                             (void *)(uaddr + count - dio->fb_size),
                             dio->fb_size);
        if (res) {
            dev_err(dev, "failed %d to copy %u fill bytes from %p\n",
                    res, dio->fb_size,
                    (void *)(uaddr + count-dio->fb_size));
            crystalhd_unmap_dio(adp, dio);
            return BC_STS_INSUFF_RES;
        }
    }

    down_read(&current->mm->mmap_sem);
    res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ,
                         0, dio->pages, NULL);
    up_read(&current->mm->mmap_sem);

    /* Save for release..*/
    dio->sig = crystalhd_dio_locked;
    if (res < nr_pages) {
        dev_err(dev, "get pages failed: %d-%d\n", nr_pages, res);
        dio->page_cnt = res;
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_ERROR;
    }

    dio->page_cnt = nr_pages;
    /* Get scatter/gather */
    crystalhd_init_sg(dio->sg, dio->page_cnt);
    crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK);
    if (nr_pages > 1) {
        dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
        dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
        count -= dio->sg[0].length;
        for (i = 1; i < nr_pages; i++) {
            if (count < 4) {
                spsz = count;
                skip_fb_sg = 1;
            } else {
                spsz = (count < PAGE_SIZE) ?
                       (count & ~0x03) : PAGE_SIZE;
            }
            crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0);
            count -= spsz;
        }
    } else {
        if (count < 4) {
            dio->sg[0].length = count;
            skip_fb_sg = 1;
        } else {
            dio->sg[0].length = count - dio->fb_size;
        }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
        dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
    }
    dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg,
                             dio->page_cnt, dio->direction);
    if (dio->sg_cnt <= 0) {
        dev_err(dev, "sg map %d-%d\n", dio->sg_cnt, dio->page_cnt);
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_ERROR;
    }
    if (dio->sg_cnt && skip_fb_sg)
        dio->sg_cnt -= 1;
#else
    IODMACommand          *dma_command;
    IOMemoryDescriptor    *mem_desc;
    IOReturn              result;

    if (uv_offset) {
        uv_start = (uaddr + uv_offset)  >> PAGE_SHIFT;
        dio->uinfo.uv_sg_ix = uv_start - start;
        dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & PAGE_MASK);
    }

    dio->fb_size = ubuff_sz & 0x03;

    // map user memory into kernel memory
    mem_desc = IOMemoryDescriptor::withAddress(uaddr, count, dir_tx ? kIODirectionIn : kIODirectionOut,
               current_task() );
    if (mem_desc) {
        result = mem_desc->prepare();
        //IOLog("bc_link_map_dio:mem_desc=0x%X, prepare result 0x%X \n", (unsigned int)mem_desc, (int)result);
        dio->io_class = (void*)mem_desc;
    } else {
        dev_err(&adp->pdev->dev, "bc_link_map_dio:IOMemoryDescriptor::withAddress failed\n");
        crystalhd_free_dio(adp,dio);
        return BC_STS_INSUFF_RES;
    }

    // Save for release..
    dio->sig = crystalhd_dio_locked;

    // check transfer count, counts less than four are handled using only the fill byte page
    if (count > 3) {
        do {
            // 32 bit physical address generation using IODMACommand
            // any memory above 4Gb in the memory descriptor will be buffered
            // to memory below the 4G line, on machines without remapping HW support
            dma_command = IODMACommand::withSpecification(
                              // custom segment function
                              (IODMACommand::SegmentFunction)CustomSegmentFunction,
                              // numAddressBits
                              32,
                              // maxSegmentSize
                              PAGE_SIZE,
                              // mappingOptions - kMapped for DMA addresses
                              IODMACommand::kMapped,
                              // maxTransferSize - no restriction
                              0,
                              // alignment - no restriction
                              1 );
            if (!dma_command) {
                dev_err(&adp->pdev->dev, "IODMACommand::withSpecification failed\n");
                break;
            }

            //IOLog("bc_link_map_dio:dma_command=0x%X \n", (unsigned int)dma_command);
            // point IODMACommand at the memory descriptor, don't use auto prepare option
            result = dma_command->setMemoryDescriptor(mem_desc, false);
            if (kIOReturnSuccess != result) {
                dev_err(&adp->pdev->dev, "setMemoryDescriptor failed (0x%x)\n", result);
                break;
            }
            dio->io_class = (void*)dma_command;
            result = dma_command->prepare(0, count, true);
            //IOLog("bc_link_map_dio:dma_command->prepare() result 0x%X \n",(int)result);

            // generate scatter/gather list using custom segment function. This routine will make
            // sure that the first s/g entry will have the correct address and length for user
            // addresses that are not page aligned.
            UInt64 offset = 0;
            result = dma_command->gen32IOVMSegments(&offset, (IODMACommand::Segment32*)dio->sg, (UInt32*)&nr_pages);

            //IOLog("bc_link_map_dio:gen32IOVMSegments nr_pages %d, result %d\n", (int)nr_pages, (int)result);
            // if ending page is not end 4 byte aligned, decrease last page transfer length
            //  as those bytes will be handled using the fill byte page.
            if(dio->fb_size) {
                dio->sg[nr_pages-1].dma_length -= dio->fb_size;
                // check for last page == same size as dio->fb_size
                if (dio->sg[nr_pages-1].dma_length == 0) {
                    nr_pages--;
                }
            }
            // If need a fill byte page
            if(dio->fb_size) {
                UInt64          byte_count;
                UInt64          length;

                // manually copy those bytes into the fill byte page
                offset = count - dio->fb_size;
                length = dio->fb_size;
                byte_count = mem_desc->readBytes(offset, dio->fb_va, length);
            }
            dio->sg_cnt = nr_pages;
        } while(false);

        if (dio->sg_cnt <= 0) {
            dev_err(&adp->pdev->dev, "sg map %d \n",dio->sg_cnt);
            crystalhd_unmap_dio(adp,dio);
            return BC_STS_ERROR;
        }
    } else {
        // three bytes or less, handle this transfer using only the fill_byte page.
        UInt64          byte_count;
        UInt64          offset;
        UInt64          length;

        offset = 0;
        length = dio->fb_size;
        byte_count = mem_desc->readBytes(offset, dio->fb_va, length);
        dio->sg_cnt = 0;
        dio->sg[0].dma_length = count;
    }
#endif
    dio->sig = crystalhd_dio_sg_mapped;
    /* Fill in User info.. */
    dio->uinfo.xfr_len   = ubuff_sz;
#ifndef __APPLE__
    dio->uinfo.xfr_buff  = ubuff;
#else
    dio->uinfo.xfr_buff  = (uint8_t*)ubuff;
#endif
    dio->uinfo.uv_offset = uv_offset;
    dio->uinfo.b422mode  = en_422mode;
    dio->uinfo.dir_tx    = dir_tx;

    *dio_hnd = dio;

    return BC_STS_SUCCESS;
}

/**
 * crystalhd_unmap_sgl - Release mapped resources
 * @adp: Adapter instance
 * @dio: DIO request instance
 *
 * Return:
 *	Status.
 *
 * This routine is to unmap the user buffer pages.
 */
BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio)
{
#ifndef __APPLE__
    struct page *page = NULL;
    int j = 0;

    if (!adp || !dio) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return BC_STS_INV_ARG;
    }

    if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) {
        for (j = 0; j < dio->page_cnt; j++) {
            page = dio->pages[j];
            if (page) {
                if (!PageReserved(page) &&
                        (dio->direction == DMA_FROM_DEVICE))
                    SetPageDirty(page);
                page_cache_release(page);
            }
        }
    }
    if (dio->sig == crystalhd_dio_sg_mapped)
        pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction);
#else
    IODMACommand		*dma_command;
    IOMemoryDescriptor	*mem_desc;

    if(!adp || !dio ) {
        dev_err(chddev(), "bc_link_unmap_dio:Invalid arg \n");
        return BC_STS_INV_ARG;
    }
    dma_command = OSDynamicCast(IODMACommand, (OSMetaClassBase*)dio->io_class);
    //MPCLOG(MPCLOG_DBG, "bc_link_unmap_dio:dma_command=0x%X \n", (unsigned int)dma_command);
    if (dma_command) {
        // fetch current IOMemoryDescriptor before dma_command->clearMemoryDescriptor;
        mem_desc = (IOMemoryDescriptor*)dma_command->getMemoryDescriptor();
        dma_command->complete();
        dma_command->clearMemoryDescriptor();
        SAFE_RELEASE(dma_command);
        mem_desc->complete();
        SAFE_RELEASE(mem_desc);
        dio->io_class = NULL;
    }
#endif

    crystalhd_free_dio(adp, dio);

    return BC_STS_SUCCESS;
}

/**
 * crystalhd_create_dio_pool - Allocate mem pool for DIO management.
 * @adp: Adapter instance
 * @max_pages: Max pages for size calculation.
 *
 * Return:
 *	system error.
 *
 * This routine creates a memory pool to hold dio context for
 * for HW Direct IO operation.
 */
int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages)
{
    struct device *dev;
    uint32_t asz = 0, i = 0;
    uint8_t	*temp;
    crystalhd_dio_req *dio;

    if (!adp || !max_pages) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return -EINVAL;
    }

    dev = &adp->pdev->dev;

    /* Get dma memory for fill byte handling..*/
    adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte",
                                          adp->pdev, 8, 8, 0);
    if (!adp->fill_byte_pool) {
        dev_err(dev, "failed to create fill byte pool\n");
        return -ENOMEM;
    }

#ifndef __APPLE__
    /* Get the max size from user based on 420/422 modes */
    asz =  (sizeof(*dio->pages) * max_pages) +
           (sizeof(*dio->sg) * max_pages) + sizeof(*dio);
#else
    asz = (sizeof(*dio->sg) * max_pages) + sizeof(*dio);
#endif

    dev_dbg(dev, "Initializing Dio pool %d %d %x %p\n",
            BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool);

    for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) {
        temp = (uint8_t *)kzalloc(asz, GFP_KERNEL);
        if ((temp) == NULL) {
            dev_err(dev, "Failed to alloc %d mem\n", asz);
            return -ENOMEM;
        }

        dio = (crystalhd_dio_req *)temp;
        temp += sizeof(*dio);
#ifndef __APPLE__
        dio->pages = (struct page **)temp;
        temp += (sizeof(*dio->pages) * max_pages);
#else
        temp += sizeof(*dio);
#endif
        dio->sg = (struct scatterlist *)temp;
        dio->max_pages = max_pages;
        dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL,
                                    &dio->fb_pa);
        if (!dio->fb_va) {
            dev_err(dev, "fill byte alloc failed.\n");
            return -ENOMEM;
        }

        crystalhd_free_dio(adp, dio);
    }

    return 0;
}

/**
 * crystalhd_destroy_dio_pool - Release DIO mem pool.
 * @adp: Adapter instance
 *
 * Return:
 *	none.
 *
 * This routine releases dio memory pool during close.
 */
void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp)
{
    crystalhd_dio_req *dio;
    int count = 0;

    if (!adp) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return;
    }

    do {
        dio = crystalhd_alloc_dio(adp);
        if (dio) {
            if (dio->fb_va)
                pci_pool_free(adp->fill_byte_pool,
                              dio->fb_va, dio->fb_pa);
            count++;
            kfree(dio);
        }
    } while (dio);

    if (adp->fill_byte_pool) {
        pci_pool_destroy(adp->fill_byte_pool);
        adp->fill_byte_pool = NULL;
    }

    dev_dbg(&adp->pdev->dev, "Released dio pool %d\n", count);
}
예제 #26
0
/**
 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 * @sp: command to send to the ISP
 *
 * Returns non-zero if a failure occured, else zero.
 */
int
qla2x00_start_scsi(srb_t *sp)
{
	int		ret;
	unsigned long   flags;
	scsi_qla_host_t	*ha;
	fc_lun_t	*fclun;
	struct scsi_cmnd *cmd;
	uint32_t	*clr_ptr;
	uint32_t        index;
	uint32_t	handle;
	uint16_t	cnt;
	cmd_entry_t	*cmd_pkt;
	uint32_t        timeout;
	struct scatterlist *sg;

	device_reg_t	*reg;

	/* Setup device pointers. */
	ret = 0;
	fclun = sp->lun_queue->fclun;
	ha = fclun->fcport->ha;
	cmd = sp->cmd;
	reg = ha->iobase;

	/* Send marker if required */
	if (ha->marker_needed != 0) {
		if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
			return (QLA_FUNCTION_FAILED);
		}
		ha->marker_needed = 0;
	}

	/* Calculate number of segments and entries required. */
	if (sp->req_cnt == 0) {
		sp->tot_dsds = 0;
		if (cmd->use_sg) {
			sg = (struct scatterlist *) cmd->request_buffer;
			sp->tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
			    cmd->sc_data_direction);
		} else if (cmd->request_bufflen) {
		    sp->tot_dsds++;
		}
		sp->req_cnt = (ha->calc_request_entries)(sp->tot_dsds);
	}

	/* Acquire ring specific lock */
	spin_lock_irqsave(&ha->hardware_lock, flags);

	if (ha->req_q_cnt < (sp->req_cnt + 2)) {
		/* Calculate number of free request entries */
		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
		if (ha->req_ring_index < cnt)
			ha->req_q_cnt = cnt - ha->req_ring_index;
		else
			ha->req_q_cnt = ha->request_q_length -
			    (ha->req_ring_index - cnt);
	}

	/* If no room for request in request ring */
	if (ha->req_q_cnt < (sp->req_cnt + 2)) {
		DEBUG5(printk("scsi(%ld): in-ptr=%x req_q_cnt=%x "
		    "tot_dsds=%x.\n",
		    ha->host_no, ha->req_ring_index, ha->req_q_cnt,
		    sp->tot_dsds));

		goto queuing_error;
	}

	/* Check for room in outstanding command list. */
	handle = ha->current_outstanding_cmd;
	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
		handle++;
		if (handle == MAX_OUTSTANDING_COMMANDS)
			handle = 1;
		if (ha->outstanding_cmds[handle] == 0) {
			ha->current_outstanding_cmd = handle;
			break;
		}
	}
	if (index == MAX_OUTSTANDING_COMMANDS) {
		DEBUG5(printk("scsi(%ld): Unable to queue command -- NO ROOM "
		    "IN OUTSTANDING ARRAY (req_q_cnt=%x).\n",
		    ha->host_no, ha->req_q_cnt));
		goto queuing_error;
	}

	/* Build command packet */
	ha->outstanding_cmds[handle] = sp;
	sp->ha = ha;
	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
	ha->req_q_cnt -= sp->req_cnt;

	cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
	cmd_pkt->handle = handle;
	/* Zero out remaining portion of packet. */
	clr_ptr = (uint32_t *)cmd_pkt + 2;
	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
	cmd_pkt->dseg_count = cpu_to_le16(sp->tot_dsds);

	/* Set target ID */
	SET_TARGET_ID(ha, cmd_pkt->target, fclun->fcport->loop_id);

	/* Set LUN number*/
	cmd_pkt->lun = cpu_to_le16(fclun->lun);

	/* Update tagged queuing modifier */
	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
	if (cmd->device->tagged_supported) {
		switch (cmd->tag) {
		case HEAD_OF_QUEUE_TAG:
			cmd_pkt->control_flags =
			    __constant_cpu_to_le16(CF_HEAD_TAG);
			break;
		case ORDERED_QUEUE_TAG:
			cmd_pkt->control_flags =
			    __constant_cpu_to_le16(CF_ORDERED_TAG);
			break;
		}
	}

	/*
	 * Allocate at least 5 (+ QLA_CMD_TIMER_DELTA) seconds for RISC timeout.
	 */
	timeout = (uint32_t)(cmd->timeout_per_command / HZ);
	if (timeout > 65535)
		cmd_pkt->timeout = __constant_cpu_to_le16(0);
	else if (timeout > 25)
		cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout -
		    (5 + QLA_CMD_TIMER_DELTA));
	else
		cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout);

	/* Load SCSI command packet. */
	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
	cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);

	/* Build IOCB segments */
	(ha->build_scsi_iocbs)(sp, cmd_pkt, sp->tot_dsds);

	/* Set total data segment count. */
	cmd_pkt->entry_count = (uint8_t)sp->req_cnt;

	/* Adjust ring index. */
	ha->req_ring_index++;
	if (ha->req_ring_index == ha->request_q_length) {
		ha->req_ring_index = 0;
		ha->request_ring_ptr = ha->request_ring;
	} else
		ha->request_ring_ptr++;

	ha->actthreads++;
	ha->total_ios++;
	sp->lun_queue->out_cnt++;
	sp->flags |= SRB_DMA_VALID;
	sp->state = SRB_ACTIVE_STATE;
	sp->u_start = jiffies;

	/* Set chip new ring index. */
	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */

	spin_unlock_irqrestore(&ha->hardware_lock, flags);
	return (QLA_SUCCESS);

queuing_error:
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return (QLA_FUNCTION_FAILED);
}
예제 #27
0
/*****************************************************************************
 Function name  : inia100BuildSCB
 Description    :
 Input          : pHCB  -       Pointer to host adapter structure
 Output         : None.
 Return         : pSRB  -       Pointer to SCSI request block.
*****************************************************************************/
static void inia100BuildSCB(ORC_HCS * pHCB, ORC_SCB * pSCB, struct scsi_cmnd * SCpnt)
{   /* Create corresponding SCB     */
    struct scatterlist *pSrbSG;
    ORC_SG *pSG;		/* Pointer to SG list           */
    int i, count_sg;
    U32 TotalLen;
    ESCB *pEScb;

    pEScb = pSCB->SCB_EScb;
    pEScb->SCB_Srb = SCpnt;
    pSG = NULL;

    pSCB->SCB_Opcode = ORC_EXECSCSI;
    pSCB->SCB_Flags = SCF_NO_DCHK;	/* Clear done bit               */
    pSCB->SCB_Target = SCpnt->device->id;
    pSCB->SCB_Lun = SCpnt->device->lun;
    pSCB->SCB_Reserved0 = 0;
    pSCB->SCB_Reserved1 = 0;
    pSCB->SCB_SGLen = 0;

    if ((pSCB->SCB_XferLen = (U32) SCpnt->request_bufflen)) {
        pSG = (ORC_SG *) & pEScb->ESCB_SGList[0];
        if (SCpnt->use_sg) {
            TotalLen = 0;
            pSrbSG = (struct scatterlist *) SCpnt->request_buffer;
            count_sg = pci_map_sg(pHCB->pdev, pSrbSG, SCpnt->use_sg,
                                  SCpnt->sc_data_direction);
            pSCB->SCB_SGLen = (U32) (count_sg * 8);
            for (i = 0; i < count_sg; i++, pSG++, pSrbSG++) {
                pSG->SG_Ptr = (U32) sg_dma_address(pSrbSG);
                pSG->SG_Len = (U32) sg_dma_len(pSrbSG);
                TotalLen += (U32) sg_dma_len(pSrbSG);
            }
        } else if (SCpnt->request_bufflen != 0) {/* Non SG */
            pSCB->SCB_SGLen = 0x8;
            pSG->SG_Ptr = (U32) pci_map_single(pHCB->pdev,
                                               SCpnt->request_buffer, SCpnt->request_bufflen,
                                               SCpnt->sc_data_direction);
            SCpnt->host_scribble = (void *)pSG->SG_Ptr;
            pSG->SG_Len = (U32) SCpnt->request_bufflen;
        } else {
            pSCB->SCB_SGLen = 0;
            pSG->SG_Ptr = 0;
            pSG->SG_Len = 0;
        }
    }
    pSCB->SCB_SGPAddr = (U32) pSCB->SCB_SensePAddr;
    pSCB->SCB_HaStat = 0;
    pSCB->SCB_TaStat = 0;
    pSCB->SCB_Link = 0xFF;
    pSCB->SCB_SenseLen = SENSE_SIZE;
    pSCB->SCB_CDBLen = SCpnt->cmd_len;
    if (pSCB->SCB_CDBLen >= IMAX_CDB) {
        printk("max cdb length= %x\b", SCpnt->cmd_len);
        pSCB->SCB_CDBLen = IMAX_CDB;
    }
    pSCB->SCB_Ident = SCpnt->device->lun | DISC_ALLOW;
    if (SCpnt->device->tagged_supported) {	/* Tag Support                  */
        pSCB->SCB_TagMsg = SIMPLE_QUEUE_TAG;	/* Do simple tag only   */
    } else {
        pSCB->SCB_TagMsg = 0;	/* No tag support               */
    }
    memcpy(&pSCB->SCB_CDB[0], &SCpnt->cmnd, pSCB->SCB_CDBLen);
    return;
}