static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct safe_buffer *buf; if (device_info) DO_STATS ( device_info->map_op_count++ ); buf = alloc_safe_buffer(device_info, ptr, size, dir); if (buf == NULL) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); return ~0; } dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); } return buf->safe_dma_addr; }
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; dma_addr_t dma_addr; int needs_bounce = 0; if (device_info) DO_STATS ( device_info->map_op_count++ ); dma_addr = virt_to_dma(dev, ptr); if (dev->dma_mask) { unsigned long mask = *dev->dma_mask; unsigned long limit; limit = (mask + 1) & ~mask; if (limit && size > limit) { dev_err(dev, "DMA mapping too big (requested %#x " "mask %#Lx)\n", size, *dev->dma_mask); return ~0; } needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; } if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { struct safe_buffer *buf; buf = alloc_safe_buffer(device_info, ptr, size, dir); if (buf == 0) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); return 0; } dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); } ptr = buf->safe; dma_addr = buf->safe_dma_addr; } else { dma_cache_maint(ptr, size, dir); } return dma_addr; }
static inline dma_addr_t map_single_or_page(struct device *dev, void *ptr, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; dma_addr_t dma_addr; int needs_bounce = 0; if (device_info) DO_STATS ( device_info->map_op_count++ ); if (page) dma_addr = page_to_dma(dev, page) + offset; else dma_addr = virt_to_dma(dev, ptr); if (dev->dma_mask) { unsigned long mask = *dev->dma_mask; unsigned long limit; limit = (mask - 1) | mask; limit = (limit + 1) & ~limit; if (limit && size > limit) { dev_err(dev, "DMA mapping too big (requested %#x " "mask %#Lx)\n", size, *dev->dma_mask); return ~0; } /* * Figure out if we need to bounce from the DMA mask. */ needs_bounce = (dma_addr & ~mask) || (limit && (dma_addr + size > limit)); } if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { struct safe_buffer *buf; buf = alloc_safe_buffer(device_info, ptr, page, offset, size, dir); if (buf == 0) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); return 0; } if (buf->page) dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped " "to %p (dma=%#x)\n", __func__, page_address(buf->page), page_to_dma(dev, buf->page), buf->safe, buf->safe_dma_addr); else dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped " "to %p (dma=%#x)\n", __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { if (page) ptr = kmap_atomic(page, KM_BOUNCE_READ) + offset; dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); wmb(); if (page) kunmap_atomic(ptr - offset, KM_BOUNCE_READ); } dma_addr = buf->safe_dma_addr; } else { /* * We don't need to sync the DMA buffer since * it was allocated via the coherent allocators. */ if (page) __dma_page_cpu_to_dev(page, offset, size, dir); else __dma_single_cpu_to_dev(ptr, size, dir); } return dma_addr; }
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; dma_addr_t dma_addr; int needs_bounce = 0; if (device_info) DO_STATS ( device_info->map_op_count++ ); dma_addr = virt_to_dma(dev, ptr); if (dev->dma_mask) { unsigned long mask = *dev->dma_mask; unsigned long limit; limit = (mask + 1) & ~mask; if (limit && size > limit) { dev_err(dev, "DMA mapping too big (requested %#x " "mask %#Lx)\n", size, *dev->dma_mask); return ~0; } /* * Figure out if we need to bounce from the DMA mask. */ needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; } if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { struct safe_buffer *buf; buf = alloc_safe_buffer(device_info, ptr, size, dir); if (buf == 0) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); return 0; } dev_dbg(dev, "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), buf->safe, (void *) buf->safe_dma_addr); if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); } ptr = buf->safe; dma_addr = buf->safe_dma_addr; } else { /* * We don't need to sync the DMA buffer since * it was allocated via the coherent allocators. */ consistent_sync(ptr, size, dir); } return dma_addr; }