int vc4_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_gem_object *gem_obj; struct vc4_bo *bo; int ret; ret = drm_gem_mmap(filp, vma); if (ret) return ret; gem_obj = vma->vm_private_data; bo = to_vc4_bo(gem_obj); if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); return -EINVAL; } /* * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map * the whole buffer. */ vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma, bo->base.vaddr, bo->base.paddr, vma->vm_end - vma->vm_start); if (ret) drm_gem_vm_close(vma); return ret; }
int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) { return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base, fb->fb.fix.smem_start, fb->fb.fix.smem_len); }
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, struct vm_area_struct *vma) { struct device *dev = buffer->heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; #ifdef CONFIG_TIMA_RKP if (buffer->size) { /* iommu optimization- needs to be turned ON from * the tz side. */ cpu_v7_tima_iommu_opt(vma->vm_start, vma->vm_end, (unsigned long)vma->vm_mm->pgd); __asm__ __volatile__ ( "mcr p15, 0, r0, c8, c3, 0\n" "dsb\n" "isb\n"); } #endif if (info->is_cached) return dma_mmap_nonconsistent(dev, vma, info->cpu_addr, info->handle, buffer->size); else return dma_mmap_writecombine(dev, vma, info->cpu_addr, info->handle, buffer->size); }
int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int nx_pcm_ops_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; DBGOUT("%s\n", __func__); return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int rockchip_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; DBG("Enter::%s----%d\n",__FUNCTION__,__LINE__); return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int dma_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; pr_debug("Entered %s\n", __func__); return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int wmt_pdm_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; DBG_DETAIL(); return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int s3c24xx_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; s3cdbg("Entered %s\n", __FUNCTION__); return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, struct vm_area_struct *vma) { struct device *dev = buffer->heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; if (info->is_cached) return dma_mmap_nonconsistent(dev, vma, info->cpu_addr, info->handle, buffer->size); else return dma_mmap_writecombine(dev, vma, info->cpu_addr, info->handle, buffer->size); }
static int tegra_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_pcm_runtime *runtime = substream->runtime; if (rtd->dai_link->no_pcm) return 0; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int sunxi_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *play_runtime = NULL; struct snd_pcm_runtime *capture_runtime = NULL; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { play_runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, play_runtime->dma_area, play_runtime->dma_addr, play_runtime->dma_bytes); } else { capture_runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, play_runtime->dma_area, play_runtime->dma_addr, play_runtime->dma_bytes); } }
static int stmp3xxxfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct stmp3xxx_fb_data *data = (struct stmp3xxx_fb_data *)info; unsigned long off = vma->vm_pgoff << PAGE_SHIFT; if (off < info->fix.smem_len) return dma_mmap_writecombine(data->dev, vma, data->virt_start, data->phys_start, info->fix.smem_len); else return -EINVAL; }
int nusmart_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; DBG_PRINT("nusmart_pcm_mmap, area = %x, phy_addr = %x, bytes = %d\n", (unsigned int)runtime->dma_area, (unsigned int)runtime->dma_addr, runtime->dma_bytes ); return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int sunxi_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = NULL; if (substream->runtime!=NULL) { runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); } else { return -1; } }
static int hi3630_srcup_normal_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; if (NULL != runtime) ret = dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); return ret; }
int snd_imx_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; int ret; ret = dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); pr_debug("%s: ret: %d %p 0x%08x 0x%08x\n", __func__, ret, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); return ret; }
static int cns3xxx_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; #ifdef __DEBUG_PATH printk("%s=>%d\n", __FUNCTION__, __LINE__); #endif // printk("runtime->dma_area: 0x%08x\n", (unsigned int)runtime->dma_area); // printk("runtime->dma_addr: 0x%08x\n", runtime->dma_addr); // printk("runtime->dma_bytes: 0x%08x\n", runtime->dma_bytes); return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); }
static int rockchip_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; DBG("Enter::%s----%d\n",__FUNCTION__,__LINE__); #ifdef CONFIG_RK_SRAM_DMA vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, substream->dma_buffer.addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); #else return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); #endif }
static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma) { int ret; /* * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map * the whole buffer. */ vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma, cma_obj->vaddr, cma_obj->paddr, vma->vm_end - vma->vm_start); if (ret) drm_gem_vm_close(vma); return ret; }
static int exynos_drm_fb_mmap(struct fb_info *info, struct vm_area_struct * vma) { int ret; DRM_DEBUG_KMS("pgoff: 0x%lx vma: 0x%lx - 0x%lx (0x%lx)\n", vma->vm_pgoff, vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start); DRM_DEBUG_KMS("screen: 0x%p (0x%lx) smem: 0x%lx (0x%x)\n", info->screen_base, info->screen_size, info->fix.smem_start, info->fix.smem_len); vma->vm_pgoff = 0; ret = dma_mmap_writecombine(info->device, vma, info->screen_base, info->fix.smem_start, vma->vm_end - vma->vm_start); if (ret) printk(KERN_ERR "Remapping memory failed, error: %d\n", ret); return ret; }
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) { struct drm_gem_object *gem; struct tegra_bo *bo; int ret; ret = drm_gem_mmap(file, vma); if (ret) return ret; gem = vma->vm_private_data; bo = to_tegra_bo(gem); if (!bo->pages) { unsigned long vm_pgoff = vma->vm_pgoff; vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, bo->paddr, gem->size); if (ret) { drm_gem_vm_close(vma); return ret; } vma->vm_pgoff = vm_pgoff; } else { pgprot_t prot = vm_get_page_prot(vma->vm_flags); vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags &= ~VM_PFNMAP; vma->vm_page_prot = pgprot_writecombine(prot); } return 0; }
static int lpc313x_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *rtd = substream->runtime; struct lpc313x_dma_data *prtd = rtd->private_data; int ret = 0; #if defined (CONFIG_SND_USE_DMA_LINKLIST) int i, tch; u32 addr; dma_sg_ll_t *p_sg_cpuw, *p_sg_dmaw; unsigned long timeout; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { tch = 0; } else { tch = 1; } switch (cmd) { case SNDRV_PCM_TRIGGER_START: prtd->dma_cur = prtd->dma_buffer; p_sg_cpuw = prtd->p_sg_cpu; p_sg_dmaw = prtd->p_sg_dma; /* Build a linked list that wraps around */ addr = (u32) prtd->dma_buffer; for (i = 0; i < prtd->num_periods; i++) { p_sg_cpuw->setup.trans_length = (prtd->period_size / 4) - 1; p_sg_cpuw->setup.cfg = prtd->dma_cfg_base; p_sg_cpuw->next_entry = (u32) (p_sg_dmaw + 1); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { p_sg_cpuw->setup.src_address = addr; p_sg_cpuw->setup.dest_address = TX_FIFO_ADDR; } else { p_sg_cpuw->setup.dest_address = addr; p_sg_cpuw->setup.src_address = RX_FIFO_ADDR; } /* Wrap end of list back to start? */ if (i == (prtd->num_periods - 1)) p_sg_cpuw->next_entry = (u32) prtd->p_sg_dma; p_sg_cpuw++; p_sg_dmaw++; addr += prtd->period_size; } /* Add and start audio data position timer */ init_timer(&prtd->timer[tch]); prtd->timer[tch].data = (unsigned long) substream; prtd->timer[tch].function = lpc313x_check_dmall; prtd->timer[tch].expires = jiffies + MINTICKINC; add_timer(&prtd->timer[tch]); /* Program DMA channel and start it */ dma_prog_sg_channel(prtd->dmach, (u32) prtd->p_sg_dma); dma_set_irq_mask(prtd->dmach, 1, 1); #else dma_setup_t dmasetup; switch (cmd) { case SNDRV_PCM_TRIGGER_START: prtd->dma_cur = prtd->dma_buffer; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dmasetup.src_address = (u32) prtd->dma_buffer; dmasetup.dest_address = TX_FIFO_ADDR; } else { dmasetup.dest_address = (u32) prtd->dma_buffer; dmasetup.src_address = RX_FIFO_ADDR; } dmasetup.cfg = prtd->dma_cfg_base; dmasetup.trans_length = (2 * prtd->period_size / 4) - 1; /* Program DMA channel and start it */ dma_prog_channel(prtd->dmach, &dmasetup); dma_set_irq_mask(prtd->dmach, 0, 0); #endif dma_start_channel(prtd->dmach); break; case SNDRV_PCM_TRIGGER_STOP: #if defined (CONFIG_SND_USE_DMA_LINKLIST) del_timer_sync(&prtd->timer[tch]); #endif /* Stop the companion channel and let the current DMA transfer finish */ dma_stop_channel_sg(prtd->dmach); timeout = jiffies + (HZ / 20); while ((dma_channel_enabled(prtd->dmach)) && (jiffies < timeout)) { cpu_relax(); } // dma_stop_channel(prtd->dmach); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: break; default: ret = -EINVAL; } return ret; } static snd_pcm_uframes_t lpc313x_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct lpc313x_dma_data *prtd = runtime->private_data; snd_pcm_uframes_t x; /* Return an offset into the DMA buffer for the next data */ x = bytes_to_frames(runtime, (prtd->dma_cur - runtime->dma_addr)); if (x >= runtime->buffer_size) x = 0; return x; } static int lpc313x_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct lpc313x_dma_data *prtd; int ret = 0; snd_soc_set_runtime_hwparams(substream, &lpc313x_pcm_hardware); /* ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto out; prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); if (prtd == NULL) { ret = -ENOMEM; goto out; } runtime->private_data = prtd; prtd->dmach = -1; out: return ret; } static int lpc313x_pcm_close(struct snd_pcm_substream *substream) { struct lpc313x_dma_data *prtd = substream->runtime->private_data; kfree(prtd); return 0; } static int lpc313x_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); } static struct snd_pcm_ops lpc313x_pcm_ops = { .open = lpc313x_pcm_open, .close = lpc313x_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = lpc313x_pcm_hw_params, .hw_free = lpc313x_pcm_hw_free, .prepare = lpc313x_pcm_prepare, .trigger = lpc313x_pcm_trigger, .pointer = lpc313x_pcm_pointer, .mmap = lpc313x_pcm_mmap, }; /* * ASoC platform driver */ static int lpc313x_pcm_new(struct snd_card *card, struct snd_soc_dai *dai, struct snd_pcm *pcm) { int ret = 0; if (!card->dev->dma_mask) card->dev->dma_mask = &lpc313x_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = 0xffffffff; if (dai->playback.channels_min) { ret = lpc313x_pcm_allocate_dma_buffer( pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) goto out; } if (dai->capture.channels_min) { pr_debug("%s: Allocating PCM capture DMA buffer\n", SND_NAME); ret = lpc313x_pcm_allocate_dma_buffer( pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) goto out; } out: return ret; }