예제 #1
0
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
		size_t size, enum dma_data_direction dir)
{
	BUG_ON(buf->size != size);
	BUG_ON(buf->direction != dir);

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
		void *ptr = buf->ptr;

		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
			__func__, buf->safe, ptr, size);
		memcpy(ptr, buf->safe, size);

		/*
		 * Since we may have written to a page cache page,
		 * we need to ensure that the data will be coherent
		 * with user mappings.
		 */
		__cpuc_flush_dcache_area(ptr, size);
	}
	free_safe_buffer(dev->archdata.dmabounce, buf);
}
예제 #2
0
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
{
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
		__func__, addr, off, sz, dir);

	buf = find_safe_buffer_dev(dev, addr, __func__);
	if (!buf)
		return 1;

	BUG_ON(buf->direction != dir);

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
			__func__,buf->ptr + off, buf->safe + off, sz);
		memcpy(buf->safe + off, buf->ptr + off, sz);
	}
	return 0;
}
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	}
}
예제 #4
0
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	buf = alloc_safe_buffer(device_info, ptr, size, dir);
	if (buf == NULL) {
		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
		       __func__, ptr);
		return ~0;
	}

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
			__func__, ptr, buf->safe, size);
		memcpy(buf->safe, ptr, size);
	}

	return buf->safe_dma_addr;
}
예제 #5
0
static inline void unmap_page(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(!buf->page);
		BUG_ON(buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, page_address(buf->page),
			page_to_dma(dev, buf->page),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr;
			ptr = kmap_atomic(buf->page, KM_BOUNCE_READ) + buf->offset;
			memcpy(ptr, buf->safe, size);
			__cpuc_flush_dcache_area(ptr, size);
			kunmap_atomic(ptr - buf->offset, KM_BOUNCE_READ);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_page_dev_to_cpu(dma_to_page(dev, dma_addr),
			      dma_addr & ~PAGE_MASK, size, dir);
	}
}
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
		ptr = buf->safe;

		dma_addr = buf->safe_dma_addr;
	} else {
		
		dma_cache_maint(ptr, size, dir);
	}

	return dma_addr;
}
예제 #7
0
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
		const char *name, unsigned long size)
{
	pool->size = size;
	DO_STATS(pool->allocs = 0);
	pool->pool = dma_pool_create(name, dev, size,
				     0 /* byte alignment */,
				     0 /* no page-crossing issues */);

	return pool->pool ? 0 : -ENOMEM;
}
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
		const char *name, unsigned long size)
{
	pool->size = size;
	DO_STATS(pool->allocs = 0);
	pool->pool = dma_pool_create(name, dev, size,
				     0 ,
				     0 );

	return pool->pool ? 0 : -ENOMEM;
}
예제 #9
0
static inline void
unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf = NULL;

	/*
	 * Trying to unmap an invalid mapping
	 */
	if (dma_mapping_error(dma_addr)) {
		dev_err(dev, "Trying to unmap invalid mapping\n");
		return;
	}

	if (device_info)
		buf = find_safe_buffer(device_info, dma_addr);

	if (buf) {
		BUG_ON(buf->size != size);

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		DO_STATS ( device_info->bounce_count++ );

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(device_info, buf);
	}
}
예제 #10
0
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
		const char *name, unsigned long size)
{
	unsigned int align = 0;
	if (!(*dev->dma_mask & 0x1))
		align = 1 << ffs(*dev->dma_mask);

	if (align & (align-1)) {
		dev_warn(dev, "invalid DMA mask %#llx\n", *dev->dma_mask);
		return -ENOMEM;
	}
	pool->size = size;
	DO_STATS(pool->allocs = 0);
	pool->pool = dma_pool_create(name, dev, size, align,
				     0 /* no page-crossing issues */);

	return pool->pool ? 0 : -ENOMEM;
}
예제 #11
0
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(buf->page);
		BUG_ON(!buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
	}
}
예제 #12
0
static inline dma_addr_t map_single_or_page(struct device *dev, void *ptr,
		struct page *page, unsigned long offset,  size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	if (page)
		dma_addr = page_to_dma(dev, page) + offset;
	else
		dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask - 1) | mask;
		limit = (limit + 1) & ~limit;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr & ~mask) ||
			(limit && (dma_addr + size > limit));
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, page, offset, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

                if (buf->page)
			dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped "
				"to %p (dma=%#x)\n", __func__,
				page_address(buf->page),
				page_to_dma(dev, buf->page),
				buf->safe, buf->safe_dma_addr);
		else
			dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped "
				"to %p (dma=%#x)\n", __func__,
				buf->ptr, virt_to_dma(dev, buf->ptr),
				buf->safe, buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			if (page)
				ptr = kmap_atomic(page, KM_BOUNCE_READ) + offset;
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
			wmb();
			if (page)
				kunmap_atomic(ptr - offset, KM_BOUNCE_READ);
		}
		dma_addr = buf->safe_dma_addr;
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
		if (page)
			__dma_page_cpu_to_dev(page, offset, size, dir);
		else
			__dma_single_cpu_to_dev(ptr, size, dir);
	}

	return dma_addr;
}
예제 #13
0
static inline void
sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf = NULL;

	if (device_info)
		buf = find_safe_buffer(device_info, dma_addr);

	if (buf) {
		/*
		 * Both of these checks from original code need to be
		 * commented out b/c some drivers rely on the following:
		 *
		 * 1) Drivers may map a large chunk of memory into DMA space
		 *    but only sync a small portion of it. Good example is
		 *    allocating a large buffer, mapping it, and then
		 *    breaking it up into small descriptors. No point
		 *    in syncing the whole buffer if you only have to
		 *    touch one descriptor.
		 *
		 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
		 *    usually only synced in one dir at a time.
		 *
		 * See drivers/net/eepro100.c for examples of both cases.
		 *
		 * -ds
		 *
		 * BUG_ON(buf->size != size);
		 * BUG_ON(buf->direction != dir);
		 */

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		DO_STATS ( device_info->bounce_count++ );

		switch (dir) {
		case DMA_FROM_DEVICE:
			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, buf->ptr, size);
			memcpy(buf->ptr, buf->safe, size);
			break;
		case DMA_TO_DEVICE:
			dev_dbg(dev,
				"%s: copy out unsafe %p to safe %p, size %d\n",
				__func__,buf->ptr, buf->safe, size);
			memcpy(buf->safe, buf->ptr, size);
			break;
		case DMA_BIDIRECTIONAL:
			BUG();	/* is this allowed?  what does it mean? */
		default:
			BUG();
		}
		/*
		 * No need to sync the safe buffer - it was allocated
		 * via the coherent allocators.
		 */
	} else {
		consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
	}
}
예제 #14
0
static inline dma_addr_t
map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
		ptr = buf->safe;

		dma_addr = buf->safe_dma_addr;
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
		consistent_sync(ptr, size, dir);
	}

	return dma_addr;
}