Example #1
0
static u64 ipath_dma_map_single(struct ib_device *dev,
			        void *cpu_addr, size_t size,
			        enum dma_data_direction direction)
{
	BUG_ON(!valid_dma_direction(direction));
	return (u64) cpu_addr;
}
Example #2
0
static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr,
			      size_t size, enum dma_data_direction direction)
{
	if (WARN_ON(!valid_dma_direction(direction)))
		return BAD_DMA_ADDRESS;

	return (u64)cpu_addr;
}
Example #3
0
/*
 * see if a buffer address is in an 'unsafe' range.  if it is
 * allocate a 'safe' buffer and copy the unsafe buffer into it.
 * substitute the safe buffer for the unsafe one.
 * (basically move the buffer from an unsafe area to a safe one)
 */
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, ptr, size, dir);

	BUG_ON(!valid_dma_direction(dir));

	return map_single(dev, ptr, size, dir);
}
Example #4
0
static inline dma_addr_t mshci_s3c_dma_map_page(struct device *dev, 
		struct page *page, unsigned long offset, size_t size, 
		enum dma_data_direction dir, int flush_type)
{
	BUG_ON(!valid_dma_direction(dir));

	mshci_s3c_dma_page_cpu_to_dev(page, offset, size, dir, flush_type);

	return page_to_dma(dev, page) + offset;
}
Example #5
0
dma_addr_t dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
		__func__, page, offset, size, dir);

	BUG_ON(!valid_dma_direction(dir));

	return map_single_or_page(dev, NULL, page, offset, size, dir);
}
Example #6
0
/**
 * dma_map_sg - map a set of SG buffers for streaming mode DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the dma_map_single interface.
 * Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}.
 *
 * Device ownership issues as mentioned for dma_map_single are the same
 * here.
 */
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
               enum dma_data_direction dir)
{
    struct scatterlist *s;
    int i, j;

    BUG_ON(!valid_dma_direction(dir));

    for_each_sg(sg, s, nents, i) {
        s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
                                        s->length, dir);
        if (dma_mapping_error(dev, s->dma_address))
            goto bad_mapping;
    }
Example #7
0
static int siw_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
			  int n_sge, enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	BUG_ON(!valid_dma_direction(dir));

	/* This is just a validity check */
	for_each_sg(sgl, sg, n_sge, i)
		if (page_address(sg_page(sg)) == NULL)
			return 0;

	return n_sge;
}
Example #8
0
static u64 siw_dma_map_page(struct ib_device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir)
{
	u64 kva = 0;

	BUG_ON(!valid_dma_direction(dir));

	if (offset + size <= PAGE_SIZE) {
		kva = (u64) page_address(page);
		if (kva)
			kva += offset;
	}
	return kva;
}
Example #9
0
static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction direction)
{
	u64 addr;

	if (WARN_ON(!valid_dma_direction(direction)))
		return BAD_DMA_ADDRESS;

	addr = (u64)page_address(page);
	if (addr)
		addr += offset;

	return addr;
}
Example #10
0
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction direction)
{
	int i;

	BUG_ON(!valid_dma_direction(direction));

	for_each_sg(sg, sg, nents, i) {
		void *addr;

		addr = sg_virt(sg);
		if (addr) {
			__dma_sync_for_device(addr, sg->length, direction);
			sg->dma_address = sg_phys(sg);
		}
	}
Example #11
0
dma_addr_t dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
		__func__, page, offset, size, dir);

	BUG_ON(!valid_dma_direction(dir));

	if (PageHighMem(page)) {
		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
			     "is not supported\n");
		return ~0;
	}

	return map_single(dev, page_address(page) + offset, size, dir);
}
Example #12
0
static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
			int nents, enum dma_data_direction direction)
{
	struct scatterlist *sg;
	u64 addr;
	int i;
	int ret = nents;

	BUG_ON(!valid_dma_direction(direction));

	for_each_sg(sgl, sg, nents, i) {
		addr = (u64) page_address(sg_page(sg));
		/* TODO: handle highmem pages */
		if (!addr) {
			ret = 0;
			break;
		}
	}
Example #13
0
static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
			   int nents, enum dma_data_direction direction,
			   unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	BUG_ON(!valid_dma_direction(direction));

	WARN_ON(nents == 0 || sglist->length == 0);

	for_each_sg(sglist, sg, nents, i) {
		sg->dma_address = sg_phys(sg);
		__dma_prep_pa_range(sg->dma_address, sg->length, direction);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		sg->dma_length = sg->length;
#endif
	}
static int siw_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
			  int n_sge, enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	BUG_ON(!valid_dma_direction(dir));

	for_each_sg(sgl, sg, n_sge, i) {
		/* This is just a validity check */
		if (unlikely(page_address(sg_page(sg)) == NULL)) {
			n_sge = 0;
			break;
		}
		sg->dma_address = (dma_addr_t) page_address(sg_page(sg));
		sg_dma_len(sg) = sg->length;
	}
	return n_sge;
}
Example #15
0
static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
		enum dma_data_direction dir)
{
	BUG_ON(!valid_dma_direction(dir));

	switch (dir) {
	case DMA_FROM_DEVICE:
		L2_cache_block_invalidate(paddr, paddr + size);
		break;
	case DMA_TO_DEVICE:
		L2_cache_block_writeback(paddr, paddr + size);
		break;
	case DMA_BIDIRECTIONAL:
		L2_cache_block_writeback_invalidate(paddr, paddr + size);
		break;
	default:
		break;
	}
}
Example #16
0
static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
			enum dma_data_direction direction)
{
	u64 addr;
	int i;
	int ret = nents;

	BUG_ON(!valid_dma_direction(direction));

	for (i = 0; i < nents; i++) {
		addr = (u64) page_address(sg[i].page);
		/* TODO: handle highmem pages */
		if (!addr) {
			ret = 0;
			break;
		}
	}
	return ret;
}
Example #17
0
static void c6x_dma_sync(dma_addr_t handle, size_t size,
			 enum dma_data_direction dir)
{
	unsigned long paddr = handle;

	BUG_ON(!valid_dma_direction(dir));

	switch (dir) {
	case DMA_FROM_DEVICE:
		L2_cache_block_invalidate(paddr, paddr + size);
		break;
	case DMA_TO_DEVICE:
		L2_cache_block_writeback(paddr, paddr + size);
		break;
	case DMA_BIDIRECTIONAL:
		L2_cache_block_writeback_invalidate(paddr, paddr + size);
		break;
	default:
		break;
	}
}
Example #18
0
static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction direction)
{
	u64 addr;

	BUG_ON(!valid_dma_direction(direction));

	if (offset + size > PAGE_SIZE) {
		addr = BAD_DMA_ADDRESS;
		goto done;
	}

	addr = (u64) page_address(page);
	if (addr)
		addr += offset;
	/* TODO: handle highmem pages */

done:
	return addr;
}
Example #19
0
static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl,
		      int nents, enum dma_data_direction direction)
{
	struct scatterlist *sg;
	u64 addr;
	int i;
	int ret = nents;

	if (WARN_ON(!valid_dma_direction(direction)))
		return 0;

	for_each_sg(sgl, sg, nents, i) {
		addr = (u64)page_address(sg_page(sg));
		if (!addr) {
			ret = 0;
			break;
		}
		sg->dma_address = addr + sg->offset;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		sg->dma_length = sg->length;
#endif
	}
/**
 * dma_map_sg - map a set of SG buffers for streaming mode DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the dma_map_single interface.
 * Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}.
 *
 * Device ownership issues as mentioned for dma_map_single are the same
 * here.
 */
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{
	dma_addr_t dma_address;
	struct scatterlist *s;
	int i, j;

	BUG_ON(!valid_dma_direction(dir));

	for_each_sg(sg, s, nents, i) {
		dma_address = __dma_map_page(dev, sg_page(s), s->offset,
					     s->length, dir);

		/* When the page doesn't have a valid PFN, we assume that
		 * dma_address is already present. */
		if (pfn_valid(page_to_pfn(sg_page(s))))
			s->dma_address = dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		s->dma_length = s->length;
#endif

		if (dma_mapping_error(dev, s->dma_address))
			goto bad_mapping;
	}
Example #21
0
static void ipath_dma_unmap_page(struct ib_device *dev,
				 u64 addr, size_t size,
				 enum dma_data_direction direction)
{
	BUG_ON(!valid_dma_direction(direction));
}
Example #22
0
static void ipath_unmap_sg(struct ib_device *dev,
			   struct scatterlist *sg, int nents,
			   enum dma_data_direction direction)
{
	BUG_ON(!valid_dma_direction(direction));
}