static void * __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { struct page *page; void *addr; /* Following is a work-around (a.k.a. hack) to prevent pages * with __GFP_COMP being passed to split_page() which cannot * handle them. The real problem is that this flag probably * should be 0 on ARM as it is not supported on this * platform--see CONFIG_HUGETLB_PAGE. */ gfp &= ~(__GFP_COMP); *handle = ~0; size = PAGE_ALIGN(size); page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; if (!arch_is_coherent()) addr = __dma_alloc_remap(page, size, gfp, prot); else addr = page_address(page); if (addr) *handle = page_to_dma(dev, page); return addr; }
static void * __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { struct page *page; void *addr; *handle = ~0; size = PAGE_ALIGN(size); page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; if (!arch_is_coherent()) addr = __dma_alloc_remap(page, size, gfp, prot); else addr = page_address(page); if (addr) *handle = page_to_dma(dev, page); else __dma_free_buffer(page,size); return addr; }
static inline void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); if (buf) { BUG_ON(buf->size != size); BUG_ON(buf->direction != dir); BUG_ON(!buf->page); BUG_ON(buf->ptr); dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", __func__, page_address(buf->page), page_to_dma(dev, buf->page), buf->safe, buf->safe_dma_addr); DO_STATS(dev->archdata.dmabounce->bounce_count++); if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { void *ptr; ptr = kmap_atomic(buf->page, KM_BOUNCE_READ) + buf->offset; memcpy(ptr, buf->safe, size); __cpuc_flush_dcache_area(ptr, size); kunmap_atomic(ptr - buf->offset, KM_BOUNCE_READ); } free_safe_buffer(dev->archdata.dmabounce, buf); } else { __dma_page_dev_to_cpu(dma_to_page(dev, dma_addr), dma_addr & ~PAGE_MASK, size, dir); } }
static inline dma_addr_t mshci_s3c_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, int flush_type) { BUG_ON(!valid_dma_direction(dir)); mshci_s3c_dma_page_cpu_to_dev(page, offset, size, dir, flush_type); return page_to_dma(dev, page) + offset; }
static void __ispstat_buf_sync_magic(struct ispstat *stat, struct ispstat_buffer *buf, u32 buf_size, enum dma_data_direction dir, void (*dma_sync)(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction)) { struct device *dev = stat->isp->dev; struct page *pg; dma_addr_t dma_addr; u32 offset; /* Initial magic words */ pg = vmalloc_to_page(buf->virt_addr); dma_addr = page_to_dma(dev, pg); dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir); /* Final magic words */ pg = vmalloc_to_page(buf->virt_addr + buf_size); dma_addr = page_to_dma(dev, pg); offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK; dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir); }
static inline dma_addr_t map_single_or_page(struct device *dev, void *ptr, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; dma_addr_t dma_addr; int needs_bounce = 0; if (device_info) DO_STATS ( device_info->map_op_count++ ); if (page) dma_addr = page_to_dma(dev, page) + offset; else dma_addr = virt_to_dma(dev, ptr); if (dev->dma_mask) { unsigned long mask = *dev->dma_mask; unsigned long limit; limit = (mask - 1) | mask; limit = (limit + 1) & ~limit; if (limit && size > limit) { dev_err(dev, "DMA mapping too big (requested %#x " "mask %#Lx)\n", size, *dev->dma_mask); return ~0; } /* * Figure out if we need to bounce from the DMA mask. */ needs_bounce = (dma_addr & ~mask) || (limit && (dma_addr + size > limit)); } if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { struct safe_buffer *buf; buf = alloc_safe_buffer(device_info, ptr, page, offset, size, dir); if (buf == 0) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); return 0; } if (buf->page) dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped " "to %p (dma=%#x)\n", __func__, page_address(buf->page), page_to_dma(dev, buf->page), buf->safe, buf->safe_dma_addr); else dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped " "to %p (dma=%#x)\n", __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { if (page) ptr = kmap_atomic(page, KM_BOUNCE_READ) + offset; dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); wmb(); if (page) kunmap_atomic(ptr - offset, KM_BOUNCE_READ); } dma_addr = buf->safe_dma_addr; } else { /* * We don't need to sync the DMA buffer since * it was allocated via the coherent allocators. */ if (page) __dma_page_cpu_to_dev(page, offset, size, dir); else __dma_single_cpu_to_dev(ptr, size, dir); } return dma_addr; }
static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) { struct msmsdcc_nc_dmadata *nc; dmov_box *box; uint32_t rows; uint32_t crci; unsigned int n; int i, rc; struct scatterlist *sg = data->sg; rc = validate_dma(host, data); if (rc) return rc; host->dma.sg = data->sg; host->dma.num_ents = data->sg_len; BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */ nc = host->dma.nc; if (host->pdev_id == 1) crci = MSMSDCC_CRCI_SDC1; else if (host->pdev_id == 2) crci = MSMSDCC_CRCI_SDC2; else if (host->pdev_id == 3) crci = MSMSDCC_CRCI_SDC3; else if (host->pdev_id == 4) crci = MSMSDCC_CRCI_SDC4; else { host->dma.sg = NULL; host->dma.num_ents = 0; return -ENOENT; } if (data->flags & MMC_DATA_READ) host->dma.dir = DMA_FROM_DEVICE; else host->dma.dir = DMA_TO_DEVICE; /* host->curr.user_pages = (data->flags & MMC_DATA_USERPAGE); */ host->curr.user_pages = 0; box = &nc->cmd[0]; for (i = 0; i < host->dma.num_ents; i++) { box->cmd = CMD_MODE_BOX; /* Initialize sg dma address */ sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg)) + sg->offset; if (i == (host->dma.num_ents - 1)) box->cmd |= CMD_LC; rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ? (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 : (sg_dma_len(sg) / MCI_FIFOSIZE) ; if (data->flags & MMC_DATA_READ) { box->src_row_addr = msmsdcc_fifo_addr(host); box->dst_row_addr = sg_dma_address(sg); box->src_dst_len = (MCI_FIFOSIZE << 16) | (MCI_FIFOSIZE); box->row_offset = MCI_FIFOSIZE; box->num_rows = rows * ((1 << 16) + 1); box->cmd |= CMD_SRC_CRCI(crci); } else { box->src_row_addr = sg_dma_address(sg); box->dst_row_addr = msmsdcc_fifo_addr(host); box->src_dst_len = (MCI_FIFOSIZE << 16) | (MCI_FIFOSIZE); box->row_offset = (MCI_FIFOSIZE << 16); box->num_rows = rows * ((1 << 16) + 1); box->cmd |= CMD_DST_CRCI(crci); } box++; sg++; } /* location of command block must be 64 bit aligned */ BUG_ON(host->dma.cmd_busaddr & 0x07); nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); host->dma.hdr.complete_func = msmsdcc_dma_complete_func; n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); /* dsb inside dma_map_sg will write nc out to mem as well */ if (n != host->dma.num_ents) { pr_err("%s: Unable to map in all sg elements\n", mmc_hostname(host->mmc)); host->dma.sg = NULL; host->dma.num_ents = 0; return -ENOMEM; } return 0; }