// helper function, mmap's the allocated area which is physically contiguous int mmap_kmem(struct file *filp, struct vm_area_struct *vma) { int ret; long length = vma->vm_end - vma->vm_start; /* check length - do not allow larger mappings than the number of pages allocated */ if (length > NPAGES * PAGE_SIZE) return -EIO; /* #ifdef ARCH_HAS_DMA_MMAP_COHERENT */ if (vma->vm_pgoff == 0) { printk(KERN_INFO "Using dma_mmap_coherent\n"); ret = dma_mmap_coherent(NULL, vma, alloc_ptr, dma_handle, length); } else /* #else */ { printk(KERN_INFO "Using remap_pfn_range\n"); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_IO; printk(KERN_INFO "off=%d\n", vma->vm_pgoff); ret = remap_pfn_range(vma, vma->vm_start, PFN_DOWN(virt_to_phys(bus_to_virt(dma_handle))) + vma->vm_pgoff, length, vma->vm_page_prot); } /* #endif */ /* map the whole physically contiguous area in one piece */ if (ret < 0) { printk(KERN_ERR "mmap_alloc: remap failed (%d)\n", ret); return ret; } return 0; }
static int pxa3xx_gcu_misc_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned int size = vma->vm_end - vma->vm_start; struct pxa3xx_gcu_priv *priv = container_of(filp->f_op, struct pxa3xx_gcu_priv, misc_fops); switch (vma->vm_pgoff) { case 0: /* hand out the shared data area */ if (size != SHARED_SIZE) return -EINVAL; return dma_mmap_coherent(NULL, vma, priv->shared, priv->shared_phys, size); case SHARED_SIZE >> PAGE_SHIFT: /* hand out the MMIO base for direct register access * from userspace */ if (size != resource_size(priv->resource_mem)) return -EINVAL; vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return io_remap_pfn_range(vma, vma->vm_start, priv->resource_mem->start >> PAGE_SHIFT, size, vma->vm_page_prot); } return -EINVAL; }
static int ipq_pcm_mi2s_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_coherent(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int mxs_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_coherent(NULL, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, struct vm_area_struct *vma) { struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); struct device *dev = cma_heap->dev; struct ion_cma_buffer_info *info = buffer->priv_virt; return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, buffer->size); }
static int ux500_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; dev_dbg(rtd->platform->dev, "%s: Enter.\n", __func__); return dma_mmap_coherent(NULL, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
uint32_t q6usm_get_virtual_address(int dir, struct us_client *usc, struct vm_area_struct *vms) { uint32_t ret = 0xffffffff; if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) { struct us_port_data *port = &usc->port[dir]; ret = dma_mmap_coherent(NULL, vms, port->data, port->phys, port->buf_size * port->buf_cnt); } return ret; }
static int msm_afe_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; struct pcm_afe_info *prtd = runtime->private_data; pr_debug("%s\n", __func__); prtd->mmap_flag = 1; dma_mmap_coherent(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); return 0; }
int msm_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; prtd->out_head = 0; /* point to First buffer on startup */ prtd->mmap_flag = 1; runtime->dma_bytes = snd_pcm_lib_period_bytes(substream)*2; dma_mmap_coherent(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); return 0; }
/** * Called when an 'mmap' system call is made on the device file. */ static int dev_mmap(struct file *filp, struct vm_area_struct *vma) { int i; struct output_data *tx = filp->private_data; //reset buffer index tx->tail = 0; //clear the statuses for(i = 0; i < RING_BUFFER_ITEM_COUNT; i++) tx->ring[i].status = MMAP_STATUS_AVAILABLE; //ensure the output is flushed and not paused do_flush_transfer(tx); do_pause_transfer(tx, 0); return dma_mmap_coherent(&ax_driver->spi->dev, vma, tx->mem, tx->dma_handle, RING_BUFFER_SIZE); }
int devdma_mmap(struct device *dev, struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_coherent(dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }