示例#1
1
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
	struct mlx4_ib_dev *dev = to_mdev(context->device);

	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
		return -EINVAL;

	if (vma->vm_pgoff == 0) {
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

		if (io_remap_pfn_range(vma, vma->vm_start,
				       to_mucontext(context)->uar.pfn,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;
	} else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
		/* FIXME want pgprot_writecombine() for BlueFlame pages */
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

		if (io_remap_pfn_range(vma, vma->vm_start,
				       to_mucontext(context)->uar.pfn +
				       dev->dev->caps.num_uars,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;
	} else
		return -EINVAL;

	return 0;
}
示例#2
0
文件: epiphany.c 项目: cpehle/oh
static int epiphany_map_device_memory(struct vm_area_struct *vma)
{
    int err, retval = 0;
    unsigned long pfn = vma->vm_pgoff;
    unsigned long size = vma->vm_end - vma->vm_start;

    vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

#ifdef EHalUsesOffsetsRatherThanAbsoluteAddress
    pfn = (EPIPHANY_MEM_START + off) >> PAGE_SHIFT;
#endif

    err = io_remap_pfn_range(vma, vma->vm_start, pfn, size,
                             vma->vm_page_prot);

    if (err) {
        printk(KERN_ERR "Failed mapping device memory to vma 0x%08lx, "
               "size 0x%08lx, page offset 0x%08lx\n",
               vma->vm_start, vma->vm_end - vma->vm_start,
               vma->vm_pgoff);
        retval = -EAGAIN;
    }

    return retval;
}
示例#3
0
static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
			  struct vm_area_struct *vma)
{
	int err = 0;
	int size = 0;
	char *vmalloc_area_ptr = NULL;
	unsigned long start = 0;
	unsigned long pfn = 0;

	start = vma->vm_start;
	vmalloc_area_ptr = substream->dma_buffer.area;
	size = vma->vm_end - vma->vm_start;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	while (size > 0) {
		pfn = vmalloc_to_pfn(vmalloc_area_ptr);
		err = io_remap_pfn_range(vma, start, pfn,
					 PAGE_SIZE, vma->vm_page_prot);
		if (err < 0) {
			snd_printk(KERN_ERR "io_remap_pfn_range failed \n");
			return err;
		}
		start += PAGE_SIZE;
		vmalloc_area_ptr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	return err;
}
示例#4
0
static int vpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_flags |= VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (io_remap_pfn_range(vma,vma->vm_start,
			       vma->vm_pgoff,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
示例#5
0
/*
 * Perform the actual remap of the pages for a PCI device mapping, as
 * appropriate for this architecture.  The region in the process to map
 * is described by vm_start and vm_end members of VMA, the base physical
 * address is found in vm_pgoff.
 * The pci device structure is provided so that architectures may make mapping
 * decisions on a per-device or per-bus basis.
 *
 * Returns a negative error code on failure, zero on success.
 */
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
			enum pci_mmap_state mmap_state,
			int write_combine)
{
	int ret;

	ret = __pci_mmap_make_offset(dev, vma, mmap_state);
	if (ret < 0)
		return ret;

	__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);

	ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
			         vma->vm_end - vma->vm_start,vma->vm_page_prot);

	return ret;
}
示例#6
0
static int vpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_flags |= VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	if(PFN_PHYS(vma->vm_pgoff) < 0x13200000 || PFN_PHYS(vma->vm_pgoff) >= 0x13300000){
		if(PFN_PHYS(vma->vm_pgoff) != 0x10000000) {
			printk("phy addr err ,range is 0x13200000 - 13300000");
			return -EAGAIN;
		}
	}
	if (io_remap_pfn_range(vma,vma->vm_start,
			       vma->vm_pgoff,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
示例#7
0
static int acedev_mmap(struct file *filp, struct vm_area_struct *vma)
{
    unsigned long temp_pfn;
    temp_pfn = ACE_REGS_pBASE >> 12;
    /* Set reserved and I/O flag for the area. */
    vma->vm_flags |= VM_RESERVED | VM_IO;
	
    /* Select uncached access. */
    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    if (io_remap_pfn_range(vma, vma->vm_start, temp_pfn,
                    vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
        return -EAGAIN;
    }
    vma->vm_ops = &acedev_remap_vm_ops;
    acedev_vma_open(vma);
    
    return 0; 
} 
示例#8
0
文件: main.c 项目: DenisLug/mptcp
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
{
	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
	struct mlx5_uuar_info *uuari = &context->uuari;
	unsigned long command;
	unsigned long idx;
	phys_addr_t pfn;

	command = get_command(vma->vm_pgoff);
	switch (command) {
	case MLX5_IB_MMAP_REGULAR_PAGE:
		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
			return -EINVAL;

		idx = get_index(vma->vm_pgoff);
		if (idx >= uuari->num_uars)
			return -EINVAL;

		pfn = uar_index2pfn(dev, uuari->uars[idx].index);
		mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
			    (unsigned long long)pfn);

		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
		if (io_remap_pfn_range(vma, vma->vm_start, pfn,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;

		mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
			    vma->vm_start,
			    (unsigned long long)pfn << PAGE_SHIFT);
		break;

	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
		return -ENOSYS;

	default:
		return -EINVAL;
	}

	return 0;
}
示例#9
0
static int
_gmodule_mmap(struct file *filp, struct vm_area_struct *vma)
{
    if (_gmodule->mmap) {
        return _gmodule->mmap(filp, vma);
    }
#ifdef BCM_PLX9656_LOCAL_BUS
	vma->vm_flags |= VM_RESERVED | VM_IO;
	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;

	if (io_remap_pfn_range(	vma,
				vma->vm_start,
				vma->vm_pgoff,
				vma->vm_end - vma->vm_start,
				vma->vm_page_prot)) {
                return (-EAGAIN);
	}
	return (0);
#else/* BCM_PLX9656_LOCAL_BUS */
    return -EPERM;
#endif/* BCM_PLX9656_LOCAL_BUS */
}
/*
   vma->vm_end, vma->vm_start : specify the user space process address range assigned when mmap has been called;
   vma->vm_pgoff - is the physical address supplied by user to mmap in the last argument (off)
	     However, mmap restricts the offset, so we pass this shifted 12 bits right.
 */
static int seh_mmap(struct file *file, struct vm_area_struct *vma)
{
	unsigned long size = vma->vm_end - vma->vm_start;
	unsigned long pa = vma->vm_pgoff;

	/* we do not want to have this area swapped out, lock it */
	vma->vm_flags |= (VM_RESERVED | VM_IO);
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (io_remap_pfn_range(vma,
			       vma->vm_start,
			       pa, /* physical page index */
			       size,
			       vma->vm_page_prot))
	{
		ERRMSG("remap page range failed\n");
		return -ENXIO;
	}
	vma->vm_ops = &vm_ops;
	seh_vma_open(vma);
	return(0);
}
示例#11
0
static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev;
	drm_i810_private_t *dev_priv;
	struct drm_buf *buf;
	drm_i810_buf_priv_t *buf_priv;

	dev = priv->minor->dev;
	dev_priv = dev->dev_private;
	buf = dev_priv->mmap_buffer;
	buf_priv = buf->dev_private;

	vma->vm_flags |= (VM_IO | VM_DONTCOPY);

	buf_priv->currently_mapped = I810_BUF_MAPPED;

	if (io_remap_pfn_range(vma, vma->vm_start,
			       vma->vm_pgoff,
			       vma->vm_end - vma->vm_start, vma->vm_page_prot))
		return -EAGAIN;
	return 0;
}
示例#12
0
/**
 * @brief   Chunkmem device mmap function
 */
static int chunkmem_mmap(struct file *file, struct vm_area_struct *vma)
{
	int ret;

	if (!chunkmem->mmap_enable) {
		ret = -EPERM; /* disable calling mmap from user AP */
		goto out;
	}

	vma->vm_pgoff += (chunkmem->pbase >> PAGE_SHIFT);
	/* This is an IO map - tell maydump to skip this VMA */
	vma->vm_flags |= VM_IO | VM_RESERVED;
	ret = io_remap_pfn_range(vma,
							 vma->vm_start,
							 vma->vm_pgoff,
							 vma->vm_end - vma->vm_start,
							 vma->vm_page_prot);
	if (ret != 0) {
		ret = -EAGAIN;
	}
out:
	return ret;
}
示例#13
0
/**
 * pvrdma_mmap - create mmap region
 * @ibcontext: the user context
 * @vma: the VMA
 *
 * @return: 0 on success, otherwise errno.
 */
int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
{
	struct pvrdma_ucontext *context = to_vucontext(ibcontext);
	unsigned long start = vma->vm_start;
	unsigned long size = vma->vm_end - vma->vm_start;
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;

	dev_dbg(&context->dev->pdev->dev, "create mmap region\n");

	if ((size != PAGE_SIZE) || (offset & ~PAGE_MASK)) {
		dev_warn(&context->dev->pdev->dev,
			 "invalid params for mmap region\n");
		return -EINVAL;
	}

	/* Map UAR to kernel space, VM_LOCKED? */
	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
			       vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}