Example #1
0
static int allocate_dsa(void)
{
	free_dsa();

	g_dsa = __vmalloc(sizeof(struct px_tp_dsa),
                          GFP_KERNEL,
                          pgprot_noncached(PAGE_KERNEL));

	if (g_dsa == NULL)
	{
		return -ENOMEM;
	}

	memset(g_dsa, 0, sizeof(struct px_tp_dsa));

	return 0;
}
Example #2
0
static int idma_mmap(struct snd_pcm_substream *substream,
	struct vm_area_struct *vma)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	unsigned long size, offset;
	int ret;

	/* From snd_pcm_lib_mmap_iomem */
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	size = vma->vm_end - vma->vm_start;
	offset = vma->vm_pgoff << PAGE_SHIFT;
	ret = io_remap_pfn_range(vma, vma->vm_start,
			(runtime->dma_addr + offset) >> PAGE_SHIFT,
			size, vma->vm_page_prot);

	return ret;
}
Example #3
0
/* This function maps kernel space memory to user space memory. */
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct omap_dsp_platform_data *pdata =
					omap_dspbridge_dev->dev.platform_data;

	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
		"flags %lx\n", __func__, filp,
		vma->vm_start, vma->vm_end, vma->vm_page_prot,
		vma->vm_flags);

	return vm_iomap_memory(vma,
			       pdata->phys_mempool_base,
			       pdata->phys_mempool_size);
}
Example #4
0
static int acdb_mmap(struct file *file, struct vm_area_struct *vma)
{
	int result = 0;
	uint32_t size = vma->vm_end - vma->vm_start;

	pr_debug("%s\n", __func__);

	if (atomic64_read(&acdb_data.mem_len)) {
		if (size <= atomic64_read(&acdb_data.mem_len)) {
			vma->vm_page_prot = pgprot_noncached(
						vma->vm_page_prot);
			result = remap_pfn_range(vma,
				vma->vm_start,
				atomic64_read(&acdb_data.paddr) >> PAGE_SHIFT,
				size,
				vma->vm_page_prot);
		} else {
Example #5
0
static int msm_compr_mmap(struct snd_pcm_substream *substream,
				struct vm_area_struct *vma)
{
	int result = 0;
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct compr_audio *compr = runtime->private_data;
	struct msm_audio *prtd = &compr->prtd;

	pr_debug("[AUD]%s\n", __func__);
	prtd->mmap_flag = 1;
	if (runtime->dma_addr && runtime->dma_bytes) {
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		result = remap_pfn_range(vma, vma->vm_start,
				runtime->dma_addr >> PAGE_SHIFT,
				runtime->dma_bytes,
				vma->vm_page_prot);
	} else {
Example #6
0
static int uio_mmap_physical(struct vm_area_struct *vma)
{
    struct uio_device *idev = vma->vm_private_data;
    int mi = uio_find_mem_index(vma);
    if (mi < 0)
        return -EINVAL;

    vma->vm_flags |= VM_IO | VM_RESERVED;

    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

    return remap_pfn_range(vma,
                           vma->vm_start,
                           idev->info->mem[mi].addr >> PAGE_SHIFT,
                           vma->vm_end - vma->vm_start,
                           vma->vm_page_prot);
}
Example #7
0
static int vpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_flags |= VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	if(PFN_PHYS(vma->vm_pgoff) < 0x13200000 || PFN_PHYS(vma->vm_pgoff) >= 0x13300000){
		if(PFN_PHYS(vma->vm_pgoff) != 0x10000000) {
			printk("phy addr err ,range is 0x13200000 - 13300000");
			return -EAGAIN;
		}
	}
	if (io_remap_pfn_range(vma,vma->vm_start,
			       vma->vm_pgoff,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
Example #8
0
static int acedev_mmap(struct file *filp, struct vm_area_struct *vma)
{
    unsigned long temp_pfn;
    temp_pfn = ACE_REGS_pBASE >> 12;
    /* Set reserved and I/O flag for the area. */
    vma->vm_flags |= VM_RESERVED | VM_IO;
	
    /* Select uncached access. */
    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    if (io_remap_pfn_range(vma, vma->vm_start, temp_pfn,
                    vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
        return -EAGAIN;
    }
    vma->vm_ops = &acedev_remap_vm_ops;
    acedev_vma_open(vma);
    
    return 0; 
} 
static int acdb_mmap(struct file *file, struct vm_area_struct *vma)
{
	int result = 0;
	size_t size = vma->vm_end - vma->vm_start;

	pr_debug("%s\n", __func__);

	mutex_lock(&acdb_data.acdb_mutex);
	if (acdb_data.mem_len) {
		if (size <= acdb_data.mem_len) {
			vma->vm_page_prot = pgprot_noncached(
						vma->vm_page_prot);
			result = remap_pfn_range(vma,
				vma->vm_start,
				acdb_data.paddr >> PAGE_SHIFT,
				size,
				vma->vm_page_prot);
		} else {
Example #10
0
static inline int fimc_mmap_out_dst(struct file *filp, struct vm_area_struct *vma, u32 idx)
{
	struct fimc_control *ctrl = filp->private_data;
	unsigned long pfn = 0, size;
	int ret = 0;

	size = vma->vm_end - vma->vm_start;

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_flags |= VM_RESERVED;

	pfn = __phys_to_pfn(ctrl->out->dst[idx].base[0]);
	ret = remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot);
	if (ret != 0)
		fimc_err("remap_pfn_range fail.\n");

	return ret;
}
Example #11
0
int
knacs_pulse_ctl_mmap(struct file *filp, struct vm_area_struct *vma)
{
    unsigned long requested_size = vma->vm_end - vma->vm_start;
    if (requested_size > resource_size(pulse_ctl_regs)) {
        pr_alert("MMap size too large for pulse controller\n");
        return -EINVAL;
    }

    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    vma->vm_flags |= VM_IO;

    pr_debug("mmap pulse controller\n");

    return remap_pfn_range(vma, vma->vm_start,
                           pulse_ctl_regs->start >> PAGE_SHIFT,
                           requested_size, vma->vm_page_prot);
}
Example #12
0
File: epiphany.c Project: cpehle/oh
/**
 * Map memory that can be shared between the Epiphany
 * device and user-space
 */
static int epiphany_map_host_memory(struct vm_area_struct *vma)
{
    int err;
    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);


    err = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                          vma->vm_end - vma->vm_start, vma->vm_page_prot);

    if (err) {
        printk(KERN_ERR "Failed mapping host memory to vma 0x%08lx, "
               "size 0x%08lx, page offset 0x%08lx\n",
               vma->vm_start, vma->vm_end - vma->vm_start,
               vma->vm_pgoff);
    }

    return err;
}
Example #13
0
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
{
	phys_addr_t address;
	struct kfd_dev *dev;

	/*
	 * For simplicitly we only allow mapping of the entire doorbell
	 * allocation of a single device & process.
	 */
	if (vma->vm_end - vma->vm_start != doorbell_process_allocation())
		return -EINVAL;

	/* Find kfd device according to gpu id */
	dev = kfd_device_by_id(vma->vm_pgoff);
	if (dev == NULL)
		return -EINVAL;

	/* Find if pdd exists for combination of process and gpu id */
	if (!kfd_get_process_device_data(dev, process, 0))
		return -EINVAL;

	/* Calculate physical address of doorbell */
	address = kfd_get_process_doorbells(dev, process);

	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
				VM_DONTDUMP | VM_PFNMAP;

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	pr_debug("kfd: mapping doorbell page in kfd_doorbell_mmap\n"
		 "     target user address == 0x%08llX\n"
		 "     physical address    == 0x%08llX\n"
		 "     vm_flags            == 0x%04lX\n"
		 "     size                == 0x%04lX\n",
		 (unsigned long long) vma->vm_start, address, vma->vm_flags,
		 doorbell_process_allocation());


	return io_remap_pfn_range(vma,
				vma->vm_start,
				address >> PAGE_SHIFT,
				doorbell_process_allocation(),
				vma->vm_page_prot);
}
Example #14
0
static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
	int len = vma->vm_end - vma->vm_start;
	u32 key = vma->vm_pgoff << PAGE_SHIFT;
	struct cxio_rdev *rdev_p;
	int ret = 0;
	struct iwch_mm_entry *mm;
	struct iwch_ucontext *ucontext;
	u64 addr;

	PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff,
	     key, len);

	if (vma->vm_start & (PAGE_SIZE-1)) {
	        return -EINVAL;
	}

	rdev_p = &(to_iwch_dev(context->device)->rdev);
	ucontext = to_iwch_ucontext(context);

	mm = remove_mmap(ucontext, key, len);
	if (!mm)
		return -EINVAL;
	addr = mm->addr;
	kfree(mm);

	if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
	    (addr < (rdev_p->rnic_info.udbell_physbase +
		       rdev_p->rnic_info.udbell_len))) {

		/*
		 * Map T3 DB register.
		 */
		if (vma->vm_flags & VM_READ) {
			return -EPERM;
		}

		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
		vma->vm_flags &= ~VM_MAYREAD;
		ret = io_remap_pfn_range(vma, vma->vm_start,
					 addr >> PAGE_SHIFT,
				         len, vma->vm_page_prot);
	} else {
Example #15
0
static int s5p_pcm_mmap(struct snd_pcm_substream *substream,
	struct vm_area_struct *vma)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	unsigned long size, offset;
	int ret;

	s3cdbg("Entered %s\n", __FUNCTION__);

	if(s3c_pcm_pdat.lp_mode){
		/* From snd_pcm_lib_mmap_iomem */
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		vma->vm_flags |= VM_IO;
		size = vma->vm_end - vma->vm_start;
		offset = vma->vm_pgoff << PAGE_SHIFT;
		ret = io_remap_pfn_range(vma, vma->vm_start,
				(runtime->dma_addr + offset) >> PAGE_SHIFT,
				size, vma->vm_page_prot);
	}else{
/*
 * mspec_mmap
 *
 * Called when mmapping the device.  Initializes the vma with a fault handler
 * and private data structure necessary to allocate, track, and free the
 * underlying pages.
 */
static int
mspec_mmap(struct file *file, struct vm_area_struct *vma,
					enum mspec_page_type type)
{
	struct vma_data *vdata;
	int pages, vdata_size, flags = 0;

	if (vma->vm_pgoff != 0)
		return -EINVAL;

	if ((vma->vm_flags & VM_SHARED) == 0)
		return -EINVAL;

	if ((vma->vm_flags & VM_WRITE) == 0)
		return -EPERM;

	pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
	if (vdata_size <= PAGE_SIZE)
		vdata = kmalloc(vdata_size, GFP_KERNEL);
	else {
		vdata = vmalloc(vdata_size);
		flags = VMD_VMALLOCED;
	}
	if (!vdata)
		return -ENOMEM;
	memset(vdata, 0, vdata_size);

	vdata->vm_start = vma->vm_start;
	vdata->vm_end = vma->vm_end;
	vdata->flags = flags;
	vdata->type = type;
	spin_lock_init(&vdata->lock);
	vdata->refcnt = ATOMIC_INIT(1);
	vma->vm_private_data = vdata;

	vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
	if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_ops = &mspec_vm_ops;

	return 0;
}
Example #17
0
static inline int fimc_mmap_out_src(struct file *filp, struct vm_area_struct *vma)
{
	struct fimc_control *ctrl = filp->private_data;
	u32 start_phy_addr = 0;
	u32 size = vma->vm_end - vma->vm_start;
	u32 pfn, idx = vma->vm_pgoff;
	u32 buf_length = 0;
	int pri_data = 0;

	buf_length = ctrl->out->src[idx].length[FIMC_ADDR_Y] + \
				ctrl->out->src[idx].length[FIMC_ADDR_CB] + \
				ctrl->out->src[idx].length[FIMC_ADDR_CR];
	if (size > buf_length) {
		fimc_err("Requested mmap size is too big\n");
		return -EINVAL;
	}

	pri_data = (ctrl->id * 0x10) + idx;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &fimc_mmap_ops;
	vma->vm_private_data = (void *)pri_data;

	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
		fimc_err("writable mapping must be shared\n");
		return -EINVAL;
	}

	start_phy_addr = ctrl->out->src[idx].base[FIMC_ADDR_Y];
	pfn = __phys_to_pfn(start_phy_addr);

	if (remap_pfn_range(vma, vma->vm_start, pfn, size,
						vma->vm_page_prot)) {
		fimc_err("mmap fail\n");
		return -EINVAL;
	}

	vma->vm_ops->open(vma);

	ctrl->out->src[idx].flags |= V4L2_BUF_FLAG_MAPPED;

	return 0;
}
Example #18
0
/**
 * pci_mmap_legacy_page_range - map legacy memory space to userland
 * @bus: bus whose legacy space we're mapping
 * @vma: vma passed in by mmap
 *
 * Map legacy memory space for this device back to userspace using a machine
 * vector to get the base address.
 */
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
	char *addr;

	addr = pci_get_legacy_mem(bus);
	if (IS_ERR(addr))
		return PTR_ERR(addr);

	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);

	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
			    vma->vm_end - vma->vm_start, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct exynos_mem *mem = (struct exynos_mem *)filp->private_data;
	bool cacheable = mem->cacheable;
	dma_addr_t start = 0;
	u32 pfn = 0;
	u32 size = vma->vm_end - vma->vm_start;

	if (vma->vm_pgoff) {
		start = vma->vm_pgoff << PAGE_SHIFT;
		pfn = vma->vm_pgoff;
	} else {
		start = mem->phybase << PAGE_SHIFT;
		pfn = mem->phybase;
	}

	if (!cma_is_registered_region(start, size)) {
		pr_err("[%s] handling non-cma region (%#x@%#x)is prohibited\n",
						__func__, size, start);
		return -EINVAL;
	}

	if (!cacheable)
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &exynos_mem_ops;

	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
		pr_err("writable mapping must be shared\n");
		return -EINVAL;
	}

	if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
		pr_err("mmap fail\n");
		return -EINVAL;
	}

	vma->vm_ops->open(vma);

	return 0;
}
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
	u32 status;

	vma->vm_flags |= VM_RESERVED | VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
		"flags %lx\n", __func__, filp,
		vma->vm_start, vma->vm_end, vma->vm_page_prot,
		vma->vm_flags);

	status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
				 vma->vm_end - vma->vm_start,
				 vma->vm_page_prot);
	if (status != 0)
		status = -EAGAIN;

	return status;
}
Example #21
0
static int mmap(struct file *filp, struct vm_area_struct *vma)
{
	unsigned long physp;

	__D("mmap: vma->vm_start     = %#lx\n", vma->vm_start);
	__D("mmap: vma->vm_pgoff     = %#lx\n", vma->vm_pgoff);
	__D("mmap: vma->vm_end       = %#lx\n", vma->vm_end);
	__D("mmap: size              = %#lx\n", vma->vm_end - vma->vm_start);

	physp = vma->vm_pgoff << PAGE_SHIFT;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_flags |= VM_RESERVED | VM_IO;
	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
					vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
		__E("set_noncached: failed remap_pfn_range\n");
		return -EAGAIN;
	}

	return 0;	
}
Example #22
0
static int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
{
	unsigned long vma_size = vma->vm_end - vma->vm_start;
	v3d_t *dev = (v3d_t *)(filp->private_data);

	if (vma_size & (~PAGE_MASK)) {
		pr_err(KERN_ERR "v3d_mmap: mmaps must be aligned to a multiple of pages_size.\n");
		return -EINVAL;
	}

	if (!vma->vm_pgoff) {
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	}
	else
	{
		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
	}
	if (!vma->vm_pgoff) {
		vma->vm_pgoff = BCM21553_V3D_BASE >> PAGE_SHIFT;
	} else if (vma->vm_pgoff != (dev->mempool.addr >> PAGE_SHIFT)) {
Example #23
0
static int exm_mmap_physical(struct vm_area_struct *vma)
{
	struct exm_device *idev = vma->vm_private_data;
	int mi = exm_find_mem_index(vma);
	if (mi < 0)
		return -EINVAL;

	vma->vm_flags |= VM_IO | VM_RESERVED;

#ifdef CONFIG_ARM64
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#else
	vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK);
#endif
	return remap_pfn_range(vma,
			       vma->vm_start,
			       idev->info->mem[mi].addr >> PAGE_SHIFT,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot);
}
Example #24
0
static int iq_mem_nocache_mmap(struct file *filp, struct vm_area_struct *vma)
{
    size_t size = vma->vm_end - vma->vm_start;

    printk(KERN_INFO "iq_mem_nocache_mmap\n");

    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

    if (remap_pfn_range(vma,vma->vm_start, vma->vm_pgoff,
                        vma->vm_end - vma->vm_start, vma->vm_page_prot))
    {
        printk(KERN_ERR "remap_pfn_range failed\n");
        return -EAGAIN;
    }

    printk(KERN_INFO "iq mmap %x,%x,%x\n", (unsigned int)PAGE_SHIFT,
           (unsigned int)vma->vm_start,
           (unsigned int)(vma->vm_end - vma->vm_start));
    return 0;
}
Example #25
0
static int mmap_mem(struct file * file, struct vm_area_struct * vma)
{
#ifdef pgprot_noncached
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
	int uncached;

	uncached = uncached_access(file, offset);
	if (uncached)
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif

	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
	if (remap_pfn_range(vma,
			    vma->vm_start,
			    vma->vm_pgoff,
			    vma->vm_end-vma->vm_start,
			    vma->vm_page_prot))
		return -EAGAIN;
	return 0;
}
Example #26
0
static int fpga_mmap(struct file *filp, struct vm_area_struct *vma)
{
	unsigned long off = vma->vm_pgoff << PAGE_SHIFT; 
	unsigned long physical = FPGA_PHY_START + off; 
	unsigned long vsize = vma->vm_end - vma->vm_start; 
	unsigned long psize = FPGA_PHY_SIZE- off; 

	DPRINTK("mmap offset=0x%x, protect=0x%x. \n", off, vma->vm_page_prot);

	if (vsize > psize) 
	   return -EINVAL; //  spans too high 
	
	vma->vm_flags |= VM_IO|VM_RESERVED;
	vma->vm_page_prot=pgprot_noncached(vma->vm_page_prot);		

 	remap_page_range(vma->vm_start, physical, vsize, vma->vm_page_prot); 
 	//remap_page_range(vma->vm_start, physical, vsize, PAGE_SHARED); 

	return 0;
}
Example #27
0
static int
igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
{
	int idx;

	idx = (int)vma->vm_pgoff;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#ifdef HAVE_PTE_MASK_PAGE_IOMAP
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
	vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
#else
	vma->vm_page_prot.pgprot |= _PAGE_SOFTW2;
#endif
#endif

	return remap_pfn_range(vma,
			vma->vm_start,
			info->mem[idx].addr >> PAGE_SHIFT,
			vma->vm_end - vma->vm_start,
			vma->vm_page_prot);
}
Example #28
0
/** Linux 3.12 adds a size test when mapping UIO_MEM_PHYS ranges
 *  to fix an clear security issue.  7314e613d5ff9f0934f7a0f74ed7973b903315d1
 *
 *  Unfortunately this makes it impossible to map ranges less than a page,
 *  such as the control registers for the PLX bridges (128 bytes).
 *  A further change in b65502879556d041b45104c6a35abbbba28c8f2d
 *  prevents mapping of ranges which don't start on a page boundary,
 *  which is also true of the PLX chips (offset 0xc00 on my test system).
 *
 *  This remains the case though the present (4.1 in May 2015).
 *
 *  These patches have been applied to the Debian 3.2.0 kernel.
 *
 *  The following is based uio_mmap_physical() from 3.2.0.
 */
static
int mrf_mmap_physical(struct uio_info *info, struct vm_area_struct *vma)
{
    struct pci_dev *dev = info->priv;
    int mi = vma->vm_pgoff; /* bounds check already done in uio_mmap() */

    if (vma->vm_end - vma->vm_start > PAGE_ALIGN(info->mem[mi].size)) {
        dev_err(&dev->dev, "mmap alignment/size test fails %lx %lx %u\n",
                vma->vm_start, vma->vm_end, (unsigned)PAGE_ALIGN(info->mem[mi].size));
        return -EINVAL;
    }

    vma->vm_flags |= VM_IO | VM_RESERVED;
    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

    return remap_pfn_range(vma,
                           vma->vm_start,
                           info->mem[mi].addr >> PAGE_SHIFT,
                           vma->vm_end - vma->vm_start,
                           vma->vm_page_prot);
}
Example #29
0
static int mtk_mspace_mmap_physical(struct exm_info *info, struct vm_area_struct *vma)
{
    unsigned long size = vma->vm_end - vma->vm_start;
    void * va = NULL;
    unsigned long pa;
    int ret = -EINVAL;

    if ((vma->vm_flags & VM_SHARED) == 0) {
        return -EINVAL;
    }

    vma->vm_ops = &exm_vm_ops;
    va = extmem_malloc_page_align(size);

    if (!va) {
        printk(KERN_ERR "[EXT_MEM] %s failed...\n", __FUNCTION__);
        return -ENOMEM;
    }

    memset(va, 0, size);	
    vma->vm_flags |= (VM_DONTCOPY | VM_DONTEXPAND);

#ifdef CONFIG_ARM64
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#else
	vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK);
#endif
    pa = get_phys_from_mspace((unsigned long) va);
    ret = remap_pfn_range(vma,
    		       vma->vm_start,
    		       (pa >> PAGE_SHIFT),
    		       size,
    		       vma->vm_page_prot);
	extmem_printk("[EXT_MEM] pa:%p, va:%p, vma->vm_pgoff:0x%lx, vm_start:0x%lx, vm_end:0x%lx\n", (void *)pa, va, vma->vm_pgoff, vma->vm_start, vma->vm_end);
	if (ret) {
		printk(KERN_ERR "[EXT_MEM] %s fail ret:%d\n", __FUNCTION__, ret);
	}

    return ret;
}
Example #30
0
/*
 * ioremap with access flags
 * Cache semantics wise it is same as ioremap - "forced" uncached.
 * However unline vanilla ioremap which bypasses ARC MMU for addresses in
 * ARC hardware uncached region, this one still goes thru the MMU as caller
 * might need finer access control (R/W/X)
 */
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
			   unsigned long flags)
{
	void __iomem *vaddr;
	struct vm_struct *area;
	unsigned long off, end;
	pgprot_t prot = __pgprot(flags);

	/* Don't allow wraparound, zero size */
	end = paddr + size - 1;
	if ((!size) || (end < paddr))
		return NULL;

	/* An early platform driver might end up here */
	if (!slab_is_available())
		return NULL;

	/* force uncached */
	prot = pgprot_noncached(prot);

	/* Mappings have to be page-aligned */
	off = paddr & ~PAGE_MASK;
	paddr &= PAGE_MASK;
	size = PAGE_ALIGN(end + 1) - paddr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	area->phys_addr = paddr;
	vaddr = (void __iomem *)area->addr;
	if (ioremap_page_range((unsigned long)vaddr,
			       (unsigned long)vaddr + size, paddr, prot)) {
		vunmap((void __force *)vaddr);
		return NULL;
	}
	return (void __iomem *)(off + (char __iomem *)vaddr);
}