示例#1
0
int omap_tiler_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
			     struct vm_area_struct *vma)
{
	struct omap_tiler_info *info = buffer->priv_virt;
	unsigned long addr = vma->vm_start;
	u32 vma_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
	int n_pages = min(vma_pages, info->n_tiler_pages);
	int i, ret = 0;

	if (TILER_PIXEL_FMT_PAGE == info->fmt) {
		/* Since 1D buffer is linear, map whole buffer in one shot */
		ret = remap_pfn_range(vma, addr,
				 __phys_to_pfn(info->tiler_addrs[0]),
				(vma->vm_end - vma->vm_start),
				(buffer->cached ?
				(vma->vm_page_prot)
				: pgprot_writecombine(vma->vm_page_prot)));
	} else {
		for (i = vma->vm_pgoff; i < n_pages; i++, addr += PAGE_SIZE) {
			ret = remap_pfn_range(vma, addr,
				 __phys_to_pfn(info->tiler_addrs[i]),
				PAGE_SIZE,
				pgprot_writecombine(vma->vm_page_prot));
			if (ret)
				return ret;
		}
	}
	return ret;
}
示例#2
0
static int mfc_mmap(struct file *filp, struct vm_area_struct *vma)
{
	unsigned long vir_size = vma->vm_end - vma->vm_start;
	unsigned long phy_size, firmware_size;
	unsigned long page_frame_no = 0;
	struct mfc_inst_ctx *mfc_ctx;

	mfc_debug("vma->vm_start = 0x%08x, vma->vm_end = 0x%08x\n",
			(unsigned int)vma->vm_start,
			(unsigned int)vma->vm_end);
	mfc_debug("vma->vm_end - vma->vm_start = %ld\n", vir_size);

	mfc_ctx = (struct mfc_inst_ctx *)filp->private_data;

	firmware_size = mfc_get_port0_buff_paddr() - mfc_get_fw_buff_paddr();
	phy_size = (unsigned long)(mfc_port0_memsize - firmware_size + mfc_port1_memsize);

	/* if memory size required from appl. mmap() is bigger than max data memory
	 * size allocated in the driver */
	if (vir_size > phy_size) {
		mfc_err("virtual requested mem(%ld) is bigger than physical mem(%ld)\n",
				vir_size, phy_size);
		return -EINVAL;
	}
#ifdef CONFIG_MACH_ARIES
	mfc_ctx->port0_mmap_size = mfc_port0_memsize - firmware_size;
#else // CONFIG_MACH_P1
	mfc_ctx->port0_mmap_size = (vir_size / 2);
#endif

	vma->vm_flags |= VM_RESERVED | VM_IO;
	if (mfc_ctx->buf_type != MFC_BUFFER_CACHE)
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	/*
	 * port0 mapping for stream buf & frame buf (chroma + MV)
	 */
	page_frame_no = __phys_to_pfn(mfc_get_port0_buff_paddr());
	if (remap_pfn_range(vma, vma->vm_start, page_frame_no,
		mfc_ctx->port0_mmap_size, vma->vm_page_prot)) {
		mfc_err("mfc remap port0 error\n");
		return -EAGAIN;
	}

	vma->vm_flags |= VM_RESERVED | VM_IO;
	if (mfc_ctx->buf_type != MFC_BUFFER_CACHE)
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	/*
	 * port1 mapping for frame buf (luma)
	 */
	page_frame_no = __phys_to_pfn(mfc_get_port1_buff_paddr());
	if (remap_pfn_range(vma, vma->vm_start + mfc_ctx->port0_mmap_size,
		page_frame_no, vir_size - mfc_ctx->port0_mmap_size, vma->vm_page_prot)) {
		mfc_err("mfc remap port1 error\n");
		return -EAGAIN;
	}

	mfc_debug("virtual requested mem = %ld, physical reserved data mem = %ld\n", vir_size, phy_size);

	return 0;
}
示例#3
0
static int gsl_kmod_mmap(struct file *fd, struct vm_area_struct *vma)
{
    int status = 0;
    unsigned long start = vma->vm_start;
    unsigned long pfn = vma->vm_pgoff;
    unsigned long size = vma->vm_end - vma->vm_start;
    unsigned long prot = pgprot_writecombine(vma->vm_page_prot);
    unsigned long addr = vma->vm_pgoff << PAGE_SHIFT;
    void *va = NULL;

    if (gsl_driver.enable_mmu && (addr < GSL_LINUX_MAP_RANGE_END) && (addr >= GSL_LINUX_MAP_RANGE_START)) {
	va = gsl_linux_map_find(addr);
	while (size > 0) {
	    if (remap_pfn_range(vma, start, vmalloc_to_pfn(va), PAGE_SIZE, prot)) {
		return -EAGAIN;
	    }
	    start += PAGE_SIZE;
	    va += PAGE_SIZE;
	    size -= PAGE_SIZE;
	}
    } else {
	if (remap_pfn_range(vma, start, pfn, size, prot)) {
	    status = -EAGAIN;
	}
    }

    vma->vm_ops = &gsl_kmod_vmops;

    return status;
}
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
			struct vm_area_struct *vma, unsigned long flags)
{
	int ret_value = -EAGAIN;
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	mutex_lock(&cp_heap->lock);
	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
		if (ion_cp_request_region(cp_heap)) {
			mutex_unlock(&cp_heap->lock);
			return -EINVAL;
		}

		 if (ION_IS_CACHED(flags))
			ret_value =  remap_pfn_range(vma, vma->vm_start,
				__phys_to_pfn(buffer->priv_phys) +
				vma->vm_pgoff,
				vma->vm_end - vma->vm_start,
				vma->vm_page_prot);
		else
			ret_value = remap_pfn_range(vma, vma->vm_start,
				__phys_to_pfn(buffer->priv_phys) +
				vma->vm_pgoff,
				vma->vm_end - vma->vm_start,
				pgprot_noncached(vma->vm_page_prot));

		if (ret_value)
			ion_cp_release_region(cp_heap);
		else
			++cp_heap->umap_count;
	}
	mutex_unlock(&cp_heap->lock);
	return ret_value;
}
示例#5
0
int jpeg_mmap(struct file *filp, struct vm_area_struct *vma)
{
#if defined(CONFIG_S5P_SYSMMU_JPEG)
#if !defined(CONFIG_S5P_VMEM)
	unsigned long	page_frame_no;
	unsigned long	start;
	unsigned long	size;
	char		*ptr;	/* vmalloc */

	size = vma->vm_end - vma->vm_start;
	ptr = (char *)jpeg_ctrl->mem.base;
	start = 0;

	vma->vm_flags |= VM_RESERVED | VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	while (size > 0) {
		page_frame_no = vmalloc_to_pfn(ptr);
		if (remap_pfn_range(vma, vma->vm_start + start, page_frame_no,
			PAGE_SIZE, vma->vm_page_prot)) {
			jpeg_err("failed to remap jpeg pfn range.\n");
			return -ENOMEM;
		}

		start += PAGE_SIZE;
		ptr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
#endif /* CONFIG_S5P_VMEM */
#else
	unsigned long	page_frame_no;
	unsigned long	size;
	int		ret;

	size = vma->vm_end - vma->vm_start;
	
  if (!cma_is_registered_region(jpeg_ctrl->mem.base, size)) {
    pr_err("[%s] handling non-cma region (%#x@%#x)is prohibited\n",
      __func__, (unsigned int)size, jpeg_ctrl->mem.base);
    return -EINVAL;
  }

	vma->vm_flags |= VM_RESERVED | VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	page_frame_no = __phys_to_pfn(jpeg_ctrl->mem.base);
	ret = remap_pfn_range(vma, vma->vm_start, page_frame_no,
					size, vma->vm_page_prot);
	if (ret != 0) {
		jpeg_err("failed to remap jpeg pfn range.\n");
		return -ENOMEM;
	}
#endif /* SYSMMU_JPEG_ON */

	return 0;
}
示例#6
0
static int s3c_fimc_mmap(struct file* filp, struct vm_area_struct *vma)
{
	struct s3c_fimc_control *ctrl = filp->private_data;
	struct s3c_fimc_out_frame *frame = &ctrl->out_frame;

	u32 size = vma->vm_end - vma->vm_start;
	u32 pfn, total_size = frame->buf_size;

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_flags |= VM_RESERVED;

	/* page frame number of the address for a source frame to be stored at. */
	pfn = __phys_to_pfn(frame->addr[vma->vm_pgoff].phys_y);

	if (size > total_size) {
		err("the size of mapping is too big\n");
		return -EINVAL;
	}

	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
		err("writable mapping must be shared\n");
		return -EINVAL;
	}

	if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
		err("mmap fail\n");
		return -EINVAL;
	}

	return 0;
}
示例#7
0
文件: pci.c 项目: hugh-smtl/linux-2.6
/**
 * pci_mmap_legacy_page_range - map legacy memory space to userland
 * @bus: bus whose legacy space we're mapping
 * @vma: vma passed in by mmap
 *
 * Map legacy memory space for this device back to userspace using a machine
 * vector to get the base address.
 */
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
	unsigned long size = vma->vm_end - vma->vm_start;
	pgprot_t prot;
	char *addr;

	/*
	 * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
	 * for more details.
	 */
	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
		return -EINVAL;
	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
				    vma->vm_page_prot);

	addr = pci_get_legacy_mem(bus);
	if (IS_ERR(addr))
		return PTR_ERR(addr);

	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
	vma->vm_page_prot = prot;

	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
			    size, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
/*!
 * V4L2 interface - mmap function
 *
 * @param file          structure file *
 *
 * @param vma           structure vm_area_struct *
 *
 * @return status       0 Success, EINTR busy lock error,
 *                      ENOBUFS remap_page error
 */
static int mxc_v4l2out_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct video_device *dev = file->private_data;
	unsigned long size;
	int res = 0;
	vout_data *vout = video_get_drvdata(dev);

	DPRINTK("pgoff=0x%x, start=0x%x, end=0x%x\n",
		vma->vm_pgoff, vma->vm_start,  vma->vm_end);

	/* make this _really_ smp-safe */
	if (down_interruptible(&vout->busy_lock))
		return -EINTR;

	size = vma->vm_end - vma->vm_start;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (remap_pfn_range(vma, vma->vm_start,
		vma->vm_pgoff, size, vma->vm_page_prot)) {
		printk("mxc_mmap(V4L)i - remap_pfn_range failed\n");
		res = -ENOBUFS;
		goto mxc_mmap_exit;
	}

	vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */

mxc_mmap_exit:
	up(&vout->busy_lock);
	return res;
}
示例#9
0
文件: pci.c 项目: ivucica/linux
int
pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
		     enum pci_mmap_state mmap_state, int write_combine)
{
	/*
	 * I/O space cannot be accessed via normal processor loads and
	 * stores on this platform.
	 */
	if (mmap_state == pci_mmap_io)
		/*
		 * XXX we could relax this for I/O spaces for which ACPI
		 * indicates that the space is 1-to-1 mapped.  But at the
		 * moment, we don't support multiple PCI address spaces and
		 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
		 */
		return -EINVAL;

	/*
	 * Leave vm_pgoff as-is, the PCI space address is the physical
	 * address on this platform.
	 */
	if (write_combine && efi_range_is_wc(vma->vm_start,
					     vma->vm_end - vma->vm_start))
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	else
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
示例#10
0
static int dlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
	unsigned long start = vma->vm_start;
	unsigned long size = vma->vm_end - vma->vm_start;
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
	unsigned long page, pos;

	printk("MMAP: %lu %u\n", offset + size, info->fix.smem_len);

	if (offset + size > info->fix.smem_len)
		return -EINVAL;

	pos = (unsigned long)info->fix.smem_start + offset;

	while (size > 0) {
		page = vmalloc_to_pfn((void *)pos);
		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
			return -EAGAIN;

		start += PAGE_SIZE;
		pos += PAGE_SIZE;
		if (size > PAGE_SIZE)
			size -= PAGE_SIZE;
		else
			size = 0;
	}

	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
	return 0;

}
示例#11
0
文件: dk_func.c 项目: KHATEEBNSIT/AP
static int dk_mmap
(
 	struct file *file,
	struct vm_area_struct *vma
)
{
#if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;

        vma->vm_page_prot = phys_mem_access_prot(file, offset,
                                                 vma->vm_end - vma->vm_start,
                                                 vma->vm_page_prot);
#elif defined(pgprot_noncached)
        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
        int uncached;

        uncached = uncached_access(file, offset);
        if (uncached)
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif

        /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
        if (remap_pfn_range(vma,
                            vma->vm_start,
                            vma->vm_pgoff,
                            vma->vm_end-vma->vm_start,
                            vma->vm_page_prot))
                return -EAGAIN;

		return 0; 
}
示例#12
0
// helper function, mmap's the allocated area which is physically contiguous
int mmap_kmem(struct file *filp, struct vm_area_struct *vma)
{
        int ret;
        long length = vma->vm_end - vma->vm_start;

        /* check length - do not allow larger mappings than the number of
           pages allocated */
        if (length > NPAGES * PAGE_SIZE)
                return -EIO;
/* #ifdef ARCH_HAS_DMA_MMAP_COHERENT */
	if (vma->vm_pgoff == 0) {
		printk(KERN_INFO "Using dma_mmap_coherent\n");
		ret = dma_mmap_coherent(NULL, vma, alloc_ptr,
					dma_handle, length);
	} else
/* #else */
	{
		printk(KERN_INFO "Using remap_pfn_range\n");
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		vma->vm_flags |= VM_IO;
		printk(KERN_INFO "off=%d\n", vma->vm_pgoff);
	        ret = remap_pfn_range(vma, vma->vm_start,
			      PFN_DOWN(virt_to_phys(bus_to_virt(dma_handle))) +
			      vma->vm_pgoff, length, vma->vm_page_prot);
	}
/* #endif */
        /* map the whole physically contiguous area in one piece */
        if (ret < 0) {
		printk(KERN_ERR "mmap_alloc: remap failed (%d)\n", ret);
		return ret;
        }
        
        return 0;
}
示例#13
0
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	void *cpu_addr, dma_addr_t dma_addr, size_t size,
	unsigned long attrs)
{
	unsigned long user_count = vma_pages(vma);
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long addr = (unsigned long)cpu_addr;
	unsigned long off = vma->vm_pgoff;
	unsigned long pfn;
	int ret = -ENXIO;

	if (!plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);

	pfn = page_to_pfn(virt_to_page((void *)addr));

	if (attrs & DMA_ATTR_WRITE_COMBINE)
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	else
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off < count && user_count <= (count - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      user_count << PAGE_SHIFT,
				      vma->vm_page_prot);
	}

	return ret;
}
示例#14
0
//only for test
static int vpp_mmap(struct file *file, struct vm_area_struct *vma)
{
    unsigned long start = 0;
    unsigned long size = 0;

    logi();

    if (NULL == vma)
    {
        loge("can not get vm_area_struct!");
        return -EBADF;
    }

    start = vma->vm_start;
    size = vma->vm_end - vma->vm_start;

    /* make buffers write-thru cacheable */
    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); //update to 2.6.32
    if (remap_pfn_range(vma, start, vma->vm_pgoff, size, vma->vm_page_prot))
    {
        loge("remap_pfn_range error!");
        return -ENOBUFS;
    }

    return 0;
}
示例#15
0
static int sram_mmap(struct file *file, struct vm_area_struct *vma)
{
    unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
    unsigned long size = vma->vm_end - vma->vm_start;
    int reg_num = iminor(file->f_path.dentry->d_inode);

    dev_dbg(dev, "sram_mmap: rdev major = %d, rdev minor = %d\n",
            imajor(file->f_path.dentry->d_inode), reg_num);

    if (offset + size > region_table[reg_num].size)
        return -EINVAL;

    offset += region_table[reg_num].start_phys;

    /* here we have to set the "right" value */
    vma->vm_pgoff = offset >> PAGE_SHIFT;
    pgprot_val( vma->vm_page_prot ) |= ( _PAGE_USER );

    /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
    if (remap_pfn_range(vma,
                        vma->vm_start,
                        vma->vm_pgoff,
                        size,
                        vma->vm_page_prot))
        return -EAGAIN;

    return 0;
}
示例#16
0
文件: mmap_phys.c 项目: wangxiaoq/MEI
int assemble_vma (struct phys_mem_session* session, struct vm_area_struct * vma){
  unsigned long request_iterator;
  int insert_status = 0;

  for (request_iterator = 0; request_iterator < session->num_frame_stati; request_iterator++){
    struct phys_mem_frame_status* frame_status = &session->frame_stati[request_iterator];

    if ( frame_status->page) {
      //split_page(frame_status->page, 0);
      //insert_status  = vm_insert_page(vma,vma->vm_start + frame_status->vma_offset_of_first_byte, frame_status->page);
      vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
      insert_status = remap_pfn_range(vma,
                                      vma->vm_start + frame_status->vma_offset_of_first_byte,
                                      page_to_pfn(frame_status->page),
                                      PAGE_SIZE,
                                      vma->vm_page_prot);

      if  (unlikely(insert_status)){
        /* Upps! We could not insert our page. This should not really happen, so we just print that
         * and mark it in the configuration.*/
        printk(KERN_WARNING "Could not insert page %p into VMA! Reason: %d", frame_status->page, insert_status);
        frame_status->actual_source |= SOURCE_ERROR_NOT_MAPPABLE;
        goto out;
      }
    }
  }

 out:
  return  insert_status;
}
示例#17
0
static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
	unsigned long start = vma->vm_start;
	unsigned long size = vma->vm_end - vma->vm_start;
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
	unsigned long page, pos;

	if (offset + size > info->fix.smem_len)
		return -EINVAL;

	pos = (unsigned long)info->fix.smem_start + offset;

	pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
		  pos, size);

	while (size > 0) {
		page = vmalloc_to_pfn((void *)pos);
		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
			return -EAGAIN;

		start += PAGE_SIZE;
		pos += PAGE_SIZE;
		if (size > PAGE_SIZE)
			size -= PAGE_SIZE;
		else
			size = 0;
	}

	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
	return 0;
}
static int shm_driver_mmap_cache(struct file *filp, struct vm_area_struct *vma)
{
	unsigned long pfn, vsize;
	shm_device_t *pDevice;
	struct shm_device_priv_data *priv_data
			= (struct shm_device_priv_data*)filp->private_data;
	if (NULL == priv_data) {
		shm_error("shm_driver_mmap_cache NULL private data\n");
		return -ENOTTY;
	}

	pDevice = (shm_device_t*)priv_data->m_device;
	if (NULL == pDevice) {
		shm_error("shm_driver_mmap_cache NULL shm device\n");
		return -ENOTTY;
	}

	pfn = pDevice->m_base >> PAGE_SHIFT;
	vsize = vma->vm_end - vma->vm_start;

	shm_debug("shm_driver_mmap_nocache size = 0x%08lX(0x%x, 0x%x), base:0x%x\n",
				vsize, shm_size_cache, pDevice->m_size, pDevice->m_base);

	if (vsize > shm_size_cache)
		return -EINVAL;

	vma->vm_pgoff = 0;	// skip offset

	if (remap_pfn_range(vma, vma->vm_start, pfn, vsize, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
				      struct ion_buffer *buffer,
				      struct vm_area_struct *vma,
				      struct ion_mapping *mapping)
{
	int ret;

	switch (buffer->heap->type) {
	case ION_HEAP_KMALLOC:
	{
		unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
		ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
		break;
	}
	case ION_HEAP_VMALLOC:
		ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
		break;
	default:
		pr_err("%s: attempting to map unsupported heap to userspace\n",
		       __func__);
		return -EINVAL;
	}

	return ret;
}
示例#20
0
//------------------------------------------------------------------------------
static int SYSRAM_mmap(
    struct file*            pFile,
    struct vm_area_struct*  pVma)
{
    //LOG_MSG("");
    pVma->vm_page_prot = pgprot_noncached(pVma->vm_page_prot);
    long length = pVma->vm_end - pVma->vm_start;
    MUINT32 pfn=pVma->vm_pgoff<<PAGE_SHIFT;//page from number, physical address of kernel memory
    LOG_WRN("pVma->vm_pgoff(0x%x),phy(0x%x),pVmapVma->vm_start(0x%x),pVma->vm_end(0x%x),length(0x%x)",\
            pVma->vm_pgoff,pVma->vm_pgoff<<PAGE_SHIFT,pVma->vm_start,pVma->vm_end,length);
    if((length>ISP_VALID_REG_RANGE) || (pfn<IMGSYS_BASE_ADDR) || (pfn>(IMGSYS_BASE_ADDR+ISP_VALID_REG_RANGE)))
    {
        LOG_ERR("mmap range error : vm_start(0x%x),vm_end(0x%x),length(0x%x),pfn(0x%x)!",pVma->vm_start,pVma->vm_end,length,pfn);
        return -EAGAIN;
    }
    if(remap_pfn_range(
                pVma,
                pVma->vm_start,
                pVma->vm_pgoff,
                pVma->vm_end - pVma->vm_start,
                pVma->vm_page_prot))
    {
        LOG_ERR("fail");
        return -EAGAIN;
    }
    return 0;
}
示例#21
0
static inline int fimc_mmap_cap(struct file *filp, struct vm_area_struct *vma)
{
	struct fimc_control *ctrl = filp->private_data;
	u32 size = vma->vm_end - vma->vm_start;
	u32 pfn, idx = vma->vm_pgoff;

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_flags |= VM_RESERVED;

	/*
	 * page frame number of the address for a source frame
	 * to be stored at.
	 */
	pfn = __phys_to_pfn(ctrl->cap->bufs[idx].base[0]);

	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
		fimc_err("%s: writable mapping must be shared\n", __func__);
		return -EINVAL;
	}

	if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
		fimc_err("%s: mmap fail\n", __func__);
		return -EINVAL;
	}

	return 0;
}
示例#22
0
static int tzasc_mmap(struct file *filp, struct vm_area_struct *vma)
{
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	return remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(tzasc_buf_phys) + vma->vm_pgoff,
			vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
示例#23
0
static int g2d_mmap(struct file *file, struct vm_area_struct *vma)
{
	unsigned long pfn = 0;
	unsigned long size;

	size = vma->vm_end - vma->vm_start;
	pfn = __phys_to_pfn(g2d->mem->start);

	if (size > G2D_MMAP_SIZE) {
		printk(KERN_ERR "g2d: invalid mmap size\n");
		return -EINVAL;
	}

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
		printk(KERN_ERR "g2d: mapping must be shared\n");
		return -EINVAL;
	}

	/* remap kernel memory to userspace */
	if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
		printk(KERN_ERR "g2d: failed to mmap\n");
		return -EINVAL;
	}

	return 0;
}
示例#24
0
int cs598_dev_mmap(struct file *fp, struct vm_area_struct *vma)
{
	int ret,i;
	unsigned long length = vma->vm_end - vma->vm_start;

	if (length > (NPAGES * HASH_SIZE)) {
		return -EIO;
	}

	/* Done for every page */
	for (i=0; i < length; i+=PAGE_SIZE) {
		/* Remap every page in the virtual address space of the user process.
		   This is required so that the process can access with correct privilege
		   Else MMU will report violation */
		if ((ret = remap_pfn_range(vma,
					   vma->vm_start + i,
					   /* Convert virtual address to page frame number */
					   vmalloc_to_pfn((void*)(((unsigned long)vmalloc_buffer)
								  + i)),
					   PAGE_SIZE,
					   vma->vm_page_prot)) < 0) {
			printk(KERN_INFO "cs598:mmap failed");
			return ret;
		}
	}
	printk(KERN_INFO "cs598:mmap successful");
	return 0;
}
示例#25
0
static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
                    void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
    int ret = -ENXIO;
#ifdef CONFIG_MMU
    unsigned long user_size, kern_size;
    struct arm_vmregion *c;

    user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;

    c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
    if (c) {
        unsigned long off = vma->vm_pgoff;

        kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;

        if (off < kern_size &&
                user_size <= (kern_size - off)) {
            ret = remap_pfn_range(vma, vma->vm_start,
                                  page_to_pfn(c->vm_pages) + off,
                                  user_size << PAGE_SHIFT,
                                  vma->vm_page_prot);
        }
    }
#endif	/* CONFIG_MMU */

    return ret;
}
示例#26
0
int s3c_g3d_mmap(struct file* file, struct vm_area_struct *vma)
{
	struct g3d_context *ctx = file->private_data;
	struct g3d_drvdata *data = ctx->data;
	unsigned long pfn;
	size_t size = vma->vm_end - vma->vm_start;

	pfn = __phys_to_pfn(data->mem->start);

	if(size > resource_size(data->mem)) {
		dev_err(data->dev, "mmap size bigger than G3D SFR block\n");
		return -EINVAL;
	}

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
		dev_err(data->dev, "mmap of G3D SFR block must be shared\n");
		return -EINVAL;
	}

	if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
		dev_err(data->dev, "remap_pfn range failed\n");
		return -EINVAL;
	}

	dev_dbg(data->dev, "hardware mapped by %p\n", ctx);

	return 0;
}
示例#27
0
int s3c_jpeg_mmap(struct file *filp, struct vm_area_struct *vma)
{
	unsigned long size	= vma->vm_end - vma->vm_start;
	unsigned long maxSize;
	unsigned long pageFrameNo;

	pageFrameNo = __phys_to_pfn(jpg_data_base_addr);

	maxSize = JPG_TOTAL_BUF_SIZE + PAGE_SIZE - (JPG_TOTAL_BUF_SIZE % PAGE_SIZE);

	if(size > maxSize) {
		return -EINVAL;
	}

	vma->vm_flags |= VM_RESERVED | VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if( remap_pfn_range(vma, vma->vm_start, pageFrameNo, size,	\
				vma->vm_page_prot) ) {
		log_msg(LOG_ERROR, "s3c_jpeg_mmap", "jpeg remap error");
		return -EAGAIN;
	}

	return 0;
}
示例#28
0
/************************************************************************************************************
 *  
 *  MODULE TYPE	:	FUNCTION				MODULE ID	: 
 *  Name	:	omap_mmap
 *  Parameter1	:	struct file *file
 *  Parameter2	:	struct vm_area_struct *vma
 *  Returns	:	INT32	- On sucess returns 0
 *  				- On Failure a negative number be returned	
 *  Description	: 	perform mmap operation 
 *  Comments	:  	
 ************************************************************************************************************/
INT32 omap_mmap(struct file *file, struct vm_area_struct *vma)
{
	#define VGA_IMAGE_SIZE	(640 *480 *2)

	ULINT32 size;
	INT32 res = DISABLE;

	pr_debug("pgoff=0x%lx, start=0x%lx, end=0x%lx\n",vma->vm_pgoff, vma->vm_start, vma->vm_end);

	size = vma->vm_end - vma->vm_start;

	if(size	 > VGA_IMAGE_SIZE)
	{
		vma->vm_page_prot = PAGE_SHARED;
	}else
	{
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	}

	if (remap_pfn_range(vma, vma->vm_start,vma->vm_pgoff, size, vma->vm_page_prot))
	{
		printk(KERN_ERR "omap_mmap: remap_pfn_range failed\n");
		res = -ENOBUFS;
		goto omap_mmap_exit;
	}

	vma->vm_flags |= VM_RESERVED;
	omap_mmap_exit:
	{
		return res;
	}
}
示例#29
0
static int _tee_shm_dma_buf_mmap(struct dma_buf *dmabuf,
				struct vm_area_struct *vma)
{
	struct tee_shm *shm = dmabuf->priv;
	size_t size = vma->vm_end - vma->vm_start;
	struct tee *tee;
	int ret;
	pgprot_t prot;
	unsigned long pfn;

	tee = shm->ctx->tee;

	pfn = shm->paddr >> PAGE_SHIFT;

	INMSG();

	if (shm->flags & TEE_SHM_CACHED)
		prot = vma->vm_page_prot;
	else
		prot = pgprot_noncached(vma->vm_page_prot);

	ret =
	    remap_pfn_range(vma, vma->vm_start, pfn, size, prot);
	if (!ret)
		vma->vm_private_data = (void *)shm;

	dev_dbg(_DEV(shm->ctx->tee), "%s: map the shm (p@=%p,s=%dKiB) => %x\n",
		__func__, (void *)shm->paddr, (int)size / 1024,
		(unsigned int)vma->vm_start);

	OUTMSG(ret);
	return ret;
}
static int hwlog_mmap(struct file *filp,
				  struct vm_area_struct *vma)
{
	size_t size = vma->vm_end - vma->vm_start;

	//hwlog_mem_debug();

	if(hwlog_memory_map_perm_check(vma)){
		pr_err("hwlog map out of memory range\n");
		return -EPERM;
	}

	hwlog_debug("hwlog start=0x%lx, size=%zx, vm_pgoff=0x%lx\n", 
			vma->vm_start, size, vma->vm_pgoff);

	vma->vm_ops = &hwlog_mmap_ops;

	/* Remap-pfn-range will mark the range VM_IO */
	if (remap_pfn_range(vma,
			    vma->vm_start,
			    vma->vm_pgoff,
			    size,
			    vma->vm_page_prot)) {
		pr_err("hwlog remap pfn range failed\n");
		return -EAGAIN;
	}

	hwlog_vma_open(vma);
	hwlog_debug("hwlog mmap sucess\n");
	return 0;
}