Exemplo n.º 1
0
static void mci_xfer(struct ak880xmci_host *host)
{
	int i = 0;
	struct mmc_data *data = host->cmd->data;
	unsigned int *sgbuffer;

	if (!data)
		return;

	if (host->xfer_index<<2 >= host->total_len) {
		dbg("xfer overflow");
		return;
	}
	if (host->blocks >= data->blocks) {
		dbg("xfer overflow");
		return;
	}

	sgbuffer = sg_virt(data->sg);

	if (data->flags & MMC_DATA_READ) {
		unsigned long regval;
#ifdef L2_USING
		if (host->total_len >= 64) {
			dma_addr_t phy_addr = sg_phys(data->sg) + host->blocks*data->blksz;
			l2_dma(L2_SD_BUFX, phy_addr, data->blksz, 0);

			host->blocks++;
		} else  
#endif
		{
			regval = mci_read(host, MMC_CPU_MODE);
			if (regval) {
				/* dbg("[%d] r,MMC_CPU_MODE(0x%x)", host->xfer_index, regval); */
			}
			sgbuffer[host->xfer_index++] = regval;
		}
	} else if (data->flags & MMC_DATA_WRITE) {
#ifdef L2_USING
		dma_addr_t phy_addr;

		host->blocks++;
		if (host->blocks < data->blocks) {
			phy_addr = sg_phys(data->sg) + host->blocks*data->blksz;
			l2_dma(L2_SD_BUFX, phy_addr, data->blksz, 1);
		}
#else
		unsigned long val;
		val = sgbuffer[host->xfer_index++];
		mci_write(host, MMC_CPU_MODE, val);
		if (val)
			dbg("[%d], w,MMC_CPU_MODE(0x%x)", host->xfer_index, val);
#endif /* L2_USING */
	}
}
Exemplo n.º 2
0
/* Set up an indirect table of descriptors and add it to the queue. */
static int vring_add_indirect(struct vring_virtqueue *vq,
			      struct scatterlist sg[],
			      unsigned int out,
			      unsigned int in,
			      gfp_t gfp)
{
	struct vring_desc *desc;
	unsigned head;
	int i;

	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
	if (!desc)
		return -ENOMEM;

	/* Transfer entries from the sg list into the indirect page */
	for (i = 0; i < out; i++) {
		desc[i].flags = VRING_DESC_F_NEXT;
		desc[i].addr = sg_phys(sg);
		desc[i].len = sg->length;
		desc[i].next = i+1;
		sg++;
	}
	for (; i < (out + in); i++) {
		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
		desc[i].addr = sg_phys(sg);
		desc[i].len = sg->length;
		desc[i].next = i+1;
		sg++;
	}

	/* Last one doesn't continue. */
	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
	desc[i-1].next = 0;

	/* We're about to use a buffer */
	vq->num_free--;

	/* Use a single buffer which doesn't continue */
	head = vq->free_head;
	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
	vq->vring.desc[head].addr = virt_to_phys(desc);
	vq->vring.desc[head].len = i * sizeof(struct vring_desc);

	/* Update free pointer */
	vq->free_head = vq->vring.desc[head].next;

	return head;
}
Exemplo n.º 3
0
Arquivo: dma.c Projeto: Endika/linux
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, enum dma_data_direction direction,
			     unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	for_each_sg(sgl, sg, nents, i) {
		sg->dma_address = sg_phys(sg);

		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
			continue;

		__dma_sync(sg_phys(sg), sg->length, direction);
	}
Exemplo n.º 4
0
void dma_direct_sync_sg_for_device(struct device *dev,
		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nents, i) {
		if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
			swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
					dir, SYNC_FOR_DEVICE);

		if (!dev_is_dma_coherent(dev))
			arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
					dir);
	}
}
Exemplo n.º 5
0
Arquivo: gem.c Projeto: JaneDu/ath
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
	struct scatterlist *s;
	unsigned int i;

	bo->pages = drm_gem_get_pages(&bo->gem);
	if (IS_ERR(bo->pages))
		return PTR_ERR(bo->pages);

	bo->num_pages = bo->gem.size >> PAGE_SHIFT;

	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
	if (IS_ERR(bo->sgt))
		goto put_pages;

	/*
	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
	 * to flush the pages associated with it.
	 *
	 * TODO: Replace this by drm_clflash_sg() once it can be implemented
	 * without relying on symbols that are not exported.
	 */
	for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
		sg_dma_address(s) = sg_phys(s);

	dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
			       DMA_TO_DEVICE);

	return 0;

put_pages:
	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
	return PTR_ERR(bo->sgt);
}
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page;
	struct scatterlist sg;

	page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);

	if (!page)
		return NULL;

	if (pool->gfp_mask & __GFP_ZERO)
		if (ion_heap_high_order_page_zero(
				page, pool->order, pool->should_invalidate))
			goto error_free_pages;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
	sg_dma_address(&sg) = sg_phys(&sg);
	dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);

	return page;
error_free_pages:
	__free_pages(page, pool->order);
	return NULL;
}
ssize_t fmpfw_hash_update(struct fmp_info *info, struct hash_data *hdata,
				struct scatterlist *sg, size_t len)
{
	int ret = 0;
	unsigned long addr;
	struct device *dev = info->dev;
	struct hmac_sha256_fmpfw_info *fmpfw_info = hdata->fmpfw_info;

	fmpfw_info->s.step = UPDATE;
	fmpfw_info->s.input = (uint32_t)sg_phys(sg);
	__flush_dcache_area(sg_virt(sg), len);
	fmpfw_info->s.input_len = len;
	__flush_dcache_area(fmpfw_info, sizeof(*fmpfw_info));
	addr = virt_to_phys(fmpfw_info);

	reinit_completion(&hdata->async.result->completion);
	if (fmpfw_info->hmac_mode) {
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_HMAC_SHA2_TEST, addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW HMAC SHA256 update. ret = 0x%x\n", ret);
			ret = -EFAULT;
		}
	} else {
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_SHA2_TEST, addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW SHA256 update. ret = 0x%x\n", ret);
			ret = -EFAULT;
		}
	}

	return waitfor(info, hdata->async.result, ret);
}
Exemplo n.º 8
0
static void arc_dma_sync_sg_for_device(struct device *dev,
		struct scatterlist *sglist, int nelems,
		enum dma_data_direction dir)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sglist, sg, nelems, i)
		_dma_cache_sync(sg_phys(sg), sg->length, dir);
}
Exemplo n.º 9
0
static int map_sg(struct device *dev, struct scatterlist *sgl,
                  int nents, enum dma_data_direction direction,
                  struct dma_attrs *attrs)
{
    struct scatterlist *sg;
    int i;

    for_each_sg(sgl, sg, nents, i) {
        sg->dma_address = sg_phys(sg);
    }
Exemplo n.º 10
0
/**
* @brief 	SD card write function with scatter list.
* @param 	sd[in]: Card information.
* @param 	sector[in]: Start sector.
* @param 	sg[in]: Scatter list pointer.
* @param 	ln[in]: List number.
* @return 	Actual sectors by reading/ERROR_ID(<0).
*/
int gp_sdcard_write_scatter(gpSDInfo_t * sd, unsigned int sector, struct scatterlist *sg, unsigned int ln)
{
	int i;
	struct scatterlist *sg_ev;
	int ret = 0;
	int sector_num = 0;
#ifdef USE_DMA	
	unsigned sgln;
	sgln = dma_map_sg(NULL, sg, ln, DMA_TO_DEVICE);
	if(sgln!=ln)
	{
		dma_unmap_sg(NULL, sg, sgln, DMA_TO_DEVICE);
		DERROR("[%d]: SG map fail, sgln = %d, ln = %d\n", sd->device_id, sgln, ln);
		return -ENOMEM;
	}
#endif	
	if(gp_sdcard_writecmd(sd->device_id,sector)==SP_FALSE)
	{
		ret = -EIO;
		goto out_error;
	}
	DEBUG("SD: write sector %d\n", sector);
	
	for_each_sg(sg, sg_ev, ln, i) 
	{
		unsigned int number = sg_dma_len(sg_ev)>>9;
		/* ----- Start dma ----- */ 
	#ifdef USE_DMA
		if(gp_sdcard_dma_en(sd->device_id, (unsigned char*)sg_phys(sg_ev), sg_dma_len(sg_ev),1)==SP_FALSE)
	#else
		if(gp_sdcard_dma_en(sd->device_id, (unsigned char*)sg_virt(sg_ev), sg_dma_len(sg_ev),1)==SP_FALSE)
	#endif
		{
			ret = -ENOMEM;
			DERROR("[%d]:DMA Enable error\n", sd->device_id);
			goto out_error;
		}
		/* ----- Wait dma finish ----- */
		ret = gp_sdcard_dma_finish(sd->device_id, (sg_dma_len(sg_ev)>>9)*WRITE_TIMEOUT);
		if(ret!=0)
		{
			gp_apbdma0_stop(sd->handle_dma);
			DERROR("[%d]:DMA error: %d, SD status 0x%x\n", sd->device_id, ret,gpHalSDGetStatus(sd->device_id));
			goto out_error;
		}
	#ifdef USE_DMA	
		if(gpHalSDWaitDataComplete(sd->device_id) == SP_FALSE)
		{
			DERROR("[%d]: wait complete error: SD status 0x%x\n", sd->device_id, gpHalSDGetStatus(sd->device_id));
			ret = -ETIMEDOUT;
			goto out_error;
		}
	#endif
		sector_num += number;
	}
Exemplo n.º 11
0
Arquivo: dma.c Projeto: AllenWeb/linux
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	       enum dma_data_direction dir)
{
	int i;

	for (i = 0; i < nents; sg++, i++) {
		sg->dma_address = sg_phys(sg);
		dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
	}
	return nents;
}
static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
		int nents, enum dma_data_direction direction,
		unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sglist, sg, nents, i) {
		BUG_ON(!sg_page(sg));

		sg->dma_address = sg_phys(sg);
	}
Exemplo n.º 13
0
static IMG_BOOL GetPhysAddrFromLCDInfo(struct fb_info *psLINFBInfo,
				IMG_UINT32 *pui32PageCount,
				IMG_SYS_PHYADDR **ppasSysPhysAddr)
{
	IMG_UINT32 i;
	IMG_UINT32 ui32PageCount = 0;
	struct scatterlist *psScatterList;
	struct scatterlist *psTemp;
	struct s3c_fb_win *pfb_win = (struct s3c_fb_win*)psLINFBInfo->par;
	IMG_SYS_PHYADDR *pasSysPhysAddr = NULL;
	psScatterList = pfb_win->dma_buf_data.sg_table->sgl;

	for (i=0;i<2;i++)
	{
		psTemp = psScatterList;
		if (i == 1)
		{
			pasSysPhysAddr = kmalloc(sizeof(IMG_SYS_PHYADDR) * ui32PageCount, GFP_KERNEL);
			if (pasSysPhysAddr == NULL)
			{
				printk("out of memory: cannot alloc mem for structure %s\n", __func__);
				goto exitFailAlloc;
			}
			ui32PageCount = 0;	/* Reset the page count a we use if for the index */
		}

		while(psTemp)
		{
			IMG_UINT32 j;

			for (j=0;j<psTemp->length;j+=PAGE_SIZE)
			{
				if (i == 1)
				{
					pasSysPhysAddr[ui32PageCount].uiAddr = sg_phys(psTemp) + j;
				}
				ui32PageCount++;
			}
			psTemp = sg_next(psTemp);
		}
	}
	printk("\n uiCount = %d\n", ui32PageCount);

	*pui32PageCount = ui32PageCount;
	*ppasSysPhysAddr = pasSysPhysAddr;

	return IMG_TRUE;

exitFailAlloc:
	kfree(pasSysPhysAddr);
	*ppasSysPhysAddr = IMG_NULL;
	return IMG_FALSE;
}
static phys_addr_t get_phys_addr(struct scatterlist *sg)
{
    /*
     * Try sg_dma_address first so that we can
     * map carveout regions that do not have a
     * struct page associated with them.
     */
    phys_addr_t pa = sg_dma_address(sg);
    if (pa == 0)
        pa = sg_phys(sg);
    return pa;
}
Exemplo n.º 15
0
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, enum dma_data_direction direction,
			     struct dma_attrs *attrs)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nents, i) {
		sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
		sg->dma_length = sg->length;
		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
	}
Exemplo n.º 16
0
static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g,
				    size_t compbit_backing_size)
{
	struct gr_gk20a *gr = &g->gr;
	int order = ffs(compbit_backing_size >> PAGE_SHIFT);
	struct page *pages;
	struct sg_table *sgt;
	int err = 0;

	/* allocate few pages */
	pages = alloc_pages(GFP_KERNEL, order);
	if (!pages) {
		gk20a_dbg(gpu_dbg_pte, "alloc_pages failed\n");
		err = -ENOMEM;
		goto err_alloc_pages;
	}

	/* clean up the pages */
	memset(page_address(pages), 0, compbit_backing_size);

	/* allocate room for placing the pages pointer.. */
	gr->compbit_store.pages =
		kzalloc(sizeof(*gr->compbit_store.pages), GFP_KERNEL);
	if (!gr->compbit_store.pages) {
		gk20a_dbg(gpu_dbg_pte, "failed to allocate pages struct");
		err = -ENOMEM;
		goto err_alloc_compbit_store;
	}

	err = gk20a_get_sgtable_from_pages(&g->dev->dev, &sgt, &pages, 0,
					   compbit_backing_size);
	if (err) {
		gk20a_dbg(gpu_dbg_pte, "could not get sg table for pages\n");
		goto err_alloc_sg_table;
	}

	/* store the parameters to gr structure */
	*gr->compbit_store.pages = pages;
	gr->compbit_store.base_iova = sg_phys(sgt->sgl);
	gr->compbit_store.size = compbit_backing_size;
	gr->compbit_store.sgt = sgt;

	return 0;

err_alloc_sg_table:
	kfree(gr->compbit_store.pages);
	gr->compbit_store.pages = NULL;
err_alloc_compbit_store:
	__free_pages(pages, order);
err_alloc_pages:
	return err;
}
Exemplo n.º 17
0
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, enum dma_data_direction direction,
			     struct dma_attrs *attrs)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	for_each_sg(sgl, sg, nents, i) {
		sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
		__dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
							sg->length, direction);
	}
Exemplo n.º 18
0
static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction direction,
		struct dma_attrs *attrs)
{
	int i;

	for_each_sg(sg, sg, nents, i) {
		void *addr;

		addr = sg_virt(sg);
		if (addr) {
			__dma_sync_for_device(addr, sg->length, direction);
			sg->dma_address = sg_phys(sg);
		}
	}
Exemplo n.º 19
0
/* Map a set of buffers described by scatterlist in streaming
 * mode for DMA.  This is the scatter-gather version of the
 * above pci_map_single interface.  Here the scatter gather list
 * elements are each tagged with the appropriate dma address
 * and length.  They are obtained via sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for pci_map_single are
 * the same here.
 */
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
	       int nents, int direction)
{
	struct scatterlist *s;
	int i;

	WARN_ON(nents == 0 || sg[0].length == 0);

	for_each_sg(sg, s, nents, i) {
		BUG_ON(!sg_page(s));
		s->dma_address = sg_phys(s);
		if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
			return 0;
		s->dma_length = s->length;
	}
Exemplo n.º 20
0
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction direction)
{
	int i;

	BUG_ON(!valid_dma_direction(direction));

	for_each_sg(sg, sg, nents, i) {
		void *addr;

		addr = sg_virt(sg);
		if (addr) {
			__dma_sync_for_device(addr, sg->length, direction);
			sg->dma_address = sg_phys(sg);
		}
	}
Exemplo n.º 21
0
/*
 * Create scatter-list for the already allocated DMA buffer.
 * This function could be replaced by dma_common_get_sgtable
 * as soon as it will avalaible.
 */
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t handle, size_t size)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));
	struct scatterlist *sg;
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;

	sg = sgt->sgl;
	sg_set_page(sg, page, PAGE_ALIGN(size), 0);
	sg_dma_address(sg) = sg_phys(sg);

	return 0;
}
Exemplo n.º 22
0
int d40_phy_sg_to_lli(struct scatterlist *sg,
		      int sg_len,
		      dma_addr_t target,
		      struct d40_phy_lli *lli,
		      dma_addr_t lli_phys,
		      u32 reg_cfg,
		      u32 data_width,
		      int psize,
		      bool term_int)
{
	int total_size = 0;
	int i;
	struct scatterlist *current_sg = sg;
	dma_addr_t next_lli_phys;
	dma_addr_t dst;
	int err = 0;

	for_each_sg(sg, current_sg, sg_len, i) {

		total_size += sg_dma_len(current_sg);

		/* If this scatter list entry is the last one, no next link */
		if (sg_len - 1 == i)
			next_lli_phys = 0;
		else
			next_lli_phys = ALIGN(lli_phys + (i + 1) *
					      sizeof(struct d40_phy_lli),
					      D40_LLI_ALIGN);

		if (target)
			dst = target;
		else
			dst = sg_phys(current_sg);

		err = d40_phy_fill_lli(&lli[i],
				       dst,
				       sg_dma_len(current_sg),
				       psize,
				       next_lli_phys,
				       reg_cfg,
				       !next_lli_phys,
				       data_width,
				       target == dst);
		if (err)
			goto err;
	}
Exemplo n.º 23
0
static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
			int nents, enum dma_data_direction dir,
			unsigned long attrs)
{
	struct scatterlist *s;
	int i;

	WARN_ON(nents == 0 || sg[0].length == 0);

	for_each_sg(sg, s, nents, i) {
		BUG_ON(!sg_page(s));

		dma_cache_sync(dev, sg_virt(s), s->length, dir);

		s->dma_address = sg_phys(s);
		s->dma_length = s->length;
	}
Exemplo n.º 24
0
static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
			   int nents, enum dma_data_direction direction,
			   unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	BUG_ON(!valid_dma_direction(direction));

	WARN_ON(nents == 0 || sglist->length == 0);

	for_each_sg(sglist, sg, nents, i) {
		sg->dma_address = sg_phys(sg);
		__dma_prep_pa_range(sg->dma_address, sg->length, direction);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		sg->dma_length = sg->length;
#endif
	}
Exemplo n.º 25
0
/**
 * _vb2_ion_mmap_pfn_range() - map physical pages(vcm) to userspace
 * @vma:	virtual memory region for the mapping
 * @sg:		scatterlist to be mapped
 * @nents:	number of scatterlist to be mapped
 * @size:	size of the memory to be mapped
 * @vm_ops:	vm operations to be assigned to the created area
 * @priv:	private data to be associated with the area
 *
 * Returns 0 on success.
 */
static int _vb2_ion_mmap_pfn_range(struct vm_area_struct *vma,
				   struct scatterlist *sg,
				   int nents,
				   unsigned long size,
				   const struct vm_operations_struct *vm_ops,
				   void *priv)
{
	struct scatterlist *s;
	dma_addr_t addr;
	size_t len;
	unsigned long org_vm_start = vma->vm_start;
	int vma_size = vma->vm_end - vma->vm_start;
	resource_size_t remap_size;
	int mapped_size = 0;
	int remap_break = 0;
	int ret, i = 0;

	for_each_sg(sg, s, nents, i) {
		addr = sg_phys(s);
		len = sg_dma_len(s);
		if ((mapped_size + len) > vma_size) {
			remap_size = vma_size - mapped_size;
			remap_break = 1;
		} else {
			remap_size = len;
		}

		ret = remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
				      remap_size, vma->vm_page_prot);
		if (ret) {
			pr_err("Remapping failed, error: %d\n", ret);
			return ret;
		}

		dbg(6, "%dth page vaddr(0x%08x), paddr(0x%08x),	size(0x%08x)\n",
			i++, (u32)vma->vm_start, addr, len);

		mapped_size += remap_size;
		vma->vm_start += len;

		if (remap_break)
			break;
	}
Exemplo n.º 26
0
static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
			  int nents, enum dma_data_direction dir,
			  struct dma_attrs *attrs)
{
	struct scatterlist *s;
	int i;

	WARN_ON(nents == 0 || sg[0].length == 0);

	for_each_sg(sg, s, nents, i) {
		s->dma_address = sg_phys(s);
		if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
			return 0;

		s->dma_length = s->length;

		flush_dcache_range(PAGE_OFFSET + s->dma_address,
				   PAGE_OFFSET + s->dma_address + s->length);
	}
Exemplo n.º 27
0
uint BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
	int dir;

	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;

#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
	if (dmah != NULL) {
		int32 nsegs, i, totsegs = 0, totlen = 0;
		struct scatterlist *sg, _sg[16];
		struct sk_buff *skb;
		for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
			sg = &_sg[totsegs];
			if (skb_is_nonlinear(skb)) {
				nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
				ASSERT((nsegs > 0) && (nsegs <= 16));
				pci_map_sg(osh->pdev, sg, nsegs, dir);
			} else {
				nsegs = 1;
				sg->page_link = 0;
				sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));

				pci_map_single(osh->pdev, PKTDATA(osh, skb),
				    PKTISCTF(osh, skb) ? CTFMAPSZ : PKTLEN(osh, skb), dir);
			}
			totsegs += nsegs;
			totlen += PKTLEN(osh, skb);
		}
		dmah->nsegs = totsegs;
		dmah->origsize = totlen;
		for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
			dmah->segs[i].addr = sg_phys(sg);
			dmah->segs[i].length = sg->length;
		}
		return dmah->segs[0].addr;
	}
#endif

	return (pci_map_single(osh->pdev, va, size, dir));
}
Exemplo n.º 28
0
static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
			  int nents, enum dma_data_direction dir,
			  unsigned long attrs)
{
	struct scatterlist *s;
	int i;

	WARN_ON(nents == 0 || sg[0].length == 0);

	for_each_sg(sg, s, nents, i) {
		s->dma_address = sg_phys(s);
		if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
			return 0;

		s->dma_length = s->length;

		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
			continue;

		flush_dcache_range(dma_addr_to_virt(s->dma_address),
				   dma_addr_to_virt(s->dma_address + s->length));
	}
Exemplo n.º 29
0
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
	int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
	BUG_ON("do");
	return -EPERM;
#else
	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nents, i) {
		int result = ps3_dma_map(dev->d_region, sg_phys(sg),
					sg->length, &sg->dma_address, 0);

		if (result) {
			pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
				__func__, __LINE__, result);
			return -EINVAL;
		}

		sg->dma_length = sg->length;
	}
Exemplo n.º 30
0
static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
{
	struct drm_device *drm = rk_obj->base.dev;
	int ret, i;
	struct scatterlist *s;

	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
	if (IS_ERR(rk_obj->pages))
		return PTR_ERR(rk_obj->pages);

	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;

	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
	if (IS_ERR(rk_obj->sgt)) {
		ret = PTR_ERR(rk_obj->sgt);
		goto err_put_pages;
	}

	/*
	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
	 * to flush the pages associated with it.
	 *
	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
	 * without relying on symbols that are not exported.
	 */
	for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
		sg_dma_address(s) = sg_phys(s);

	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
			       DMA_TO_DEVICE);

	return 0;

err_put_pages:
	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
	return ret;
}