コード例 #1
0
ファイル: ept.c プロジェクト: peimichael/dune
static int ept_set_pfnmap_epte(struct vmx_vcpu *vcpu, int make_write,
				unsigned long gpa, unsigned long hva)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = current->mm;
	epte_t *epte, flags;
	unsigned long pfn;
	int ret;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, hva);
	if (!vma) {
		up_read(&mm->mmap_sem);
		return -EFAULT;
	}

	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
		up_read(&mm->mmap_sem);
		return -EFAULT;
	}

	ret = follow_pfn(vma, hva, &pfn);
	if (ret) {
		up_read(&mm->mmap_sem);
		return ret;
	}
	up_read(&mm->mmap_sem);

	/* NOTE: pfn's can not be huge pages, which is quite a relief here */
	spin_lock(&vcpu->ept_lock);
	ret = ept_lookup_gpa(vcpu, (void *) gpa, 0, 1, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		printk(KERN_ERR "ept: failed to lookup EPT entry\n");
		return ret;
	}

	flags = __EPTE_READ | __EPTE_TYPE(EPTE_TYPE_UC) |
		__EPTE_IPAT | __EPTE_PFNMAP;
	if (make_write)
		flags |= __EPTE_WRITE;
	if (vcpu->ept_ad_enabled) {
		/* premark A/D to avoid extra memory references */
		flags |= __EPTE_A;
		if (make_write)
			flags |= __EPTE_D;
	}

	if (epte_present(*epte))
		ept_clear_epte(epte);

	*epte = epte_addr(pfn << PAGE_SHIFT) | flags;
	spin_unlock(&vcpu->ept_lock);

	return 0;
}
コード例 #2
0
ファイル: videobuf2-memops.c プロジェクト: CSCLOG/beaglebone
/**
 * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
 * @vaddr:	starting virtual address of the area to be verified
 * @size:	size of the area
 * @res_paddr:	will return physical address for the given vaddr
 * @res_vma:	will return locked copy of struct vm_area for the given area
 *
 * This function will go through memory area of size @size mapped at @vaddr and
 * verify that the underlying physical pages are contiguous. If they are
 * contiguous the virtual memory area is locked and a @res_vma is filled with
 * the copy and @res_pa set to the physical address of the buffer.
 *
 * Returns 0 on success.
 */
int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
			   struct vm_area_struct **res_vma, dma_addr_t *res_pa)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long offset, start, end;
	unsigned long this_pfn, prev_pfn;
	dma_addr_t pa = 0;
	int ret = -EFAULT;

	start = vaddr;
	offset = start & ~PAGE_MASK;
	end = start + size;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, start);

	if (vma == NULL || vma->vm_end < end)
		goto done;

	for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
		ret = follow_pfn(vma, start, &this_pfn);
		if (ret)
			goto done;

		if (prev_pfn == 0)
			pa = this_pfn << PAGE_SHIFT;
		else if (this_pfn != prev_pfn + 1) {
			ret = -EFAULT;
			goto done;
		}
		prev_pfn = this_pfn;
	}

	/*
	 * Memory is contigous, lock vma and return to the caller
	 */
	*res_vma = vb2_get_vma(vma);
	if (*res_vma == NULL) {
		ret = -ENOMEM;
		goto done;
	}
	*res_pa = pa + offset;
	ret = 0;

done:
	up_read(&mm->mmap_sem);
	return ret;
}
コード例 #3
0
int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
                           struct vm_area_struct **res_vma, dma_addr_t *res_pa)
{
    struct mm_struct *mm = current->mm;
    struct vm_area_struct *vma;
    unsigned long offset, start, end;
    unsigned long this_pfn, prev_pfn;
    dma_addr_t pa = 0;

    start = vaddr;
    offset = start & ~PAGE_MASK;
    end = start + size;

    vma = find_vma(mm, start);

    if (vma == NULL || vma->vm_end < end)
        return -EFAULT;

    for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
        int ret = follow_pfn(vma, start, &this_pfn);
        if (ret)
            return ret;

        if (prev_pfn == 0)
            pa = this_pfn << PAGE_SHIFT;
        else if (this_pfn != prev_pfn + 1)
            return -EFAULT;

        prev_pfn = this_pfn;
    }

    *res_vma = vb2_get_vma(vma);
    if (*res_vma == NULL)
        return -ENOMEM;

    *res_pa = pa + offset;
    return 0;
}
コード例 #4
0
/*
 * Hacked from kernel function __get_user_pages in mm/memory.c
 *
 * Handle buffers allocated by other kernel space driver and mmaped into user
 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
 *
 * Get physical pages from user space virtual address and update into page list
 */
static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
			      unsigned long start, int nr_pages,
			      unsigned int gup_flags, struct page **pages,
			      struct vm_area_struct **vmas)
{
	int i, ret;
	unsigned long vm_flags;

	if (nr_pages <= 0)
		return 0;

	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));

	/*
	 * Require read or write permissions.
	 * If FOLL_FORCE is set, we only require the "MAY" flags.
	 */
	vm_flags  = (gup_flags & FOLL_WRITE) ?
			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	vm_flags &= (gup_flags & FOLL_FORCE) ?
			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
	i = 0;

	do {
		struct vm_area_struct *vma;

		vma = find_vma(mm, start);
		if (!vma) {
			dev_err(atomisp_dev, "find_vma failed\n");
			return i ? : -EFAULT;
		}

		if (is_vm_hugetlb_page(vma)) {
			/*
			i = follow_hugetlb_page(mm, vma, pages, vmas,
					&start, &nr_pages, i, gup_flags);
			*/
			continue;
		}

		do {
			struct page *page;
			unsigned long pfn;

			/*
			 * If we have a pending SIGKILL, don't keep faulting
			 * pages and potentially allocating memory.
			 */
			if (unlikely(fatal_signal_pending(current))) {
				dev_err(atomisp_dev,
					"fatal_signal_pending in %s\n",
					__func__);
				return i ? i : -ERESTARTSYS;
			}

			ret = follow_pfn(vma, start, &pfn);
			if (ret) {
				dev_err(atomisp_dev, "follow_pfn() failed\n");
				return i ? : -EFAULT;
			}

			page = pfn_to_page(pfn);
			if (IS_ERR(page))
				return i ? i : PTR_ERR(page);
			if (pages) {
				pages[i] = page;
				get_page(page);
				flush_anon_page(vma, page, start);
				flush_dcache_page(page);
			}
			if (vmas)
				vmas[i] = vma;
			i++;
			start += PAGE_SIZE;
			nr_pages--;
		} while (nr_pages && start < vma->vm_end);
	} while (nr_pages);
コード例 #5
0
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
				    unsigned long size,
				    enum dma_data_direction dma_dir)
{
	struct vb2_dma_sg_conf *conf = alloc_ctx;
	struct vb2_dma_sg_buf *buf;
	unsigned long first, last;
	int num_pages_from_user;
	struct vm_area_struct *vma;
	struct sg_table *sgt;
	DEFINE_DMA_ATTRS(attrs);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
#endif

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
	buf->dev = conf->dev;
	buf->dma_dir = dma_dir;
	buf->offset = vaddr & ~PAGE_MASK;
	buf->size = size;
	buf->dma_sgt = &buf->sg_table;

	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
	buf->num_pages = last - first + 1;

	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
			     GFP_KERNEL);
	if (!buf->pages)
		goto userptr_fail_alloc_pages;

	vma = find_vma(current->mm, vaddr);
	if (!vma) {
		dprintk(1, "no vma for address %lu\n", vaddr);
		goto userptr_fail_find_vma;
	}

	if (vma->vm_end < vaddr + size) {
		dprintk(1, "vma at %lu is too small for %lu bytes\n",
			vaddr, size);
		goto userptr_fail_find_vma;
	}

	buf->vma = vb2_get_vma(vma);
	if (!buf->vma) {
		dprintk(1, "failed to copy vma\n");
		goto userptr_fail_find_vma;
	}

	if (vma_is_io(buf->vma)) {
		for (num_pages_from_user = 0;
		     num_pages_from_user < buf->num_pages;
		     ++num_pages_from_user, vaddr += PAGE_SIZE) {
			unsigned long pfn;

			if (follow_pfn(vma, vaddr, &pfn)) {
				dprintk(1, "no page for address %lu\n", vaddr);
				break;
			}
			buf->pages[num_pages_from_user] = pfn_to_page(pfn);
		}
	} else
		num_pages_from_user = get_user_pages(current, current->mm,
					     vaddr & PAGE_MASK,
					     buf->num_pages,
					     buf->dma_dir == DMA_FROM_DEVICE,
					     1, /* force */
					     buf->pages,
					     NULL);

	if (num_pages_from_user != buf->num_pages)
		goto userptr_fail_get_user_pages;

	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
			buf->num_pages, buf->offset, size, 0))
		goto userptr_fail_alloc_table_from_pages;

	sgt = &buf->sg_table;
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				      buf->dma_dir, &attrs);
	if (!sgt->nents)
		goto userptr_fail_map;

	return buf;

userptr_fail_map:
	sg_free_table(&buf->sg_table);
userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
	dprintk(1, "get_user_pages requested/got: %d/%d]\n",
		buf->num_pages, num_pages_from_user);
	if (!vma_is_io(buf->vma))
		while (--num_pages_from_user >= 0)
			put_page(buf->pages[num_pages_from_user]);
	vb2_put_vma(buf->vma);
userptr_fail_find_vma:
	kfree(buf->pages);
userptr_fail_alloc_pages:
	kfree(buf);
	return NULL;
}
コード例 #6
0
long s3c_mem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
#ifdef USE_DMA_ALLOC
	unsigned long virt_addr;
#else
	unsigned long *virt_addr;
#endif

	struct mm_struct *mm = current->mm;
	struct s3c_mem_alloc param;
	struct vm_area_struct *vma;
	unsigned long start, this_pfn;
#ifdef CONFIG_S3C_DMA_MEM
	struct s3c_mem_dma_param dma_param;
#endif

	switch (cmd) {
	case S3C_MEM_ALLOC:
		mutex_lock(&mem_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC;
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
						param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		param.phy_addr = physical_address;
#ifdef USE_DMA_ALLOC
		param.kvir_addr = virtual_address;
#endif

		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_alloc_lock);

		break;

	case S3C_MEM_CACHEABLE_ALLOC:
		mutex_lock(&mem_cacheable_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_CACHEABLE;
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		param.phy_addr = physical_address;
		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X"
				" \t size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_cacheable_alloc_lock);

		break;

	case S3C_MEM_SHARE_ALLOC:
		mutex_lock(&mem_share_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_SHARE;
		physical_address = param.phy_addr;
		DEBUG("param.phy_addr = %08x, %d\n",
				physical_address, __LINE__);
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_share_alloc_lock);

		break;

	case S3C_MEM_CACHEABLE_SHARE_ALLOC:
		mutex_lock(&mem_cacheable_share_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_CACHEABLE_SHARE;
		physical_address = param.phy_addr;
		DEBUG("param.phy_addr = %08x, %d\n",
				physical_address, __LINE__);
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_cacheable_share_alloc_lock);

		break;

	case S3C_MEM_FREE:
		mutex_lock(&mem_free_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk(KERN_INFO "do_munmap() failed !!\n");
			mutex_unlock(&mem_free_lock);
			return -EINVAL;
		}

#ifdef USE_DMA_ALLOC
		virt_addr = param.kvir_addr;
		dma_free_writecombine(NULL, param.size,
				(unsigned int *) virt_addr, param.phy_addr);
#else
		virt_addr = (unsigned long *)phys_to_virt(param.phy_addr);
		kfree(virt_addr);
#endif
		param.size = 0;
		DEBUG("do_munmap() succeed !!\n");

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		mutex_unlock(&mem_free_lock);

		break;

	case S3C_MEM_SHARE_FREE:
		mutex_lock(&mem_share_free_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT; }

		DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk(KERN_INFO "do_munmap() failed - MEM_SHARE_FREE!!\n");
			mutex_unlock(&mem_share_free_lock);
			return -EINVAL;
		}

		param.vir_addr = 0;
		DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;
		}

		mutex_unlock(&mem_share_free_lock);

		break;

#ifdef CONFIG_S3C_DMA_MEM
	case S3C_MEM_DMA_COPY:
		if (copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg,
				sizeof(struct s3c_mem_dma_param))) {
			return -EFAULT;
		}
		if (s3c_dma_mem_start(current->mm, &dma_param,
				S3C_DMA_MEM2MEM)) {
			return -EINVAL;
		}
		if (copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param,
				sizeof(struct s3c_mem_dma_param))) {
			return -EFAULT;
		}
		break;
#endif

	case S3C_MEM_GET_PADDR:
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			return -EFAULT;
		}
		start = param.vir_addr;
		down_read(&mm->mmap_sem);
		vma = find_vma(mm, start);

		if (vma == NULL) {
			up_read(&mm->mmap_sem);
			return -EINVAL;
		}

		if (follow_pfn(vma, start, &this_pfn)) {
			up_read(&mm->mmap_sem);
			return -EINVAL;
		}

		param.phy_addr = this_pfn << PAGE_SHIFT;
		up_read(&mm->mmap_sem);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			return -EFAULT;
		}
		break;

	default:
		DEBUG("s3c_mem_ioctl() : default !!\n");
		return -EINVAL;
	}

	return 0;
}
コード例 #7
0
ファイル: psb_pvr_glue.c プロジェクト: DanBjorklund/ME302C
int psb_get_vaddr_pages(u32 vaddr, u32 size, u32 **pfn_list, int *page_count)
{
	u32 num_pages;
	struct page **pages = 0;
	struct task_struct *task = current;
	struct mm_struct *mm = task->mm;
	struct vm_area_struct *vma;
	u32 *pfns = 0;
	int ret;
	int i;

	if (unlikely(!pfn_list || !page_count || !vaddr || !size))
		return -EINVAL;

	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;

	pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
	if (unlikely(!pages)) {
		DRM_ERROR("Failed to allocate page list\n");
		return -ENOMEM;
	}

	down_read(&mm->mmap_sem);
	ret = get_user_pages(task, mm, vaddr, num_pages, 0, 0, pages, NULL);
	up_read(&mm->mmap_sem);

	if (ret <= 0) {
		DRM_DEBUG("failed to get user pages\n");
		kfree(pages);
		pages = 0;
	} else {
		DRM_DEBUG("num_pages %d, ret %d\n", num_pages, ret);
		num_pages = ret;
	}

	/*allocate page list*/
	pfns = kzalloc(num_pages * sizeof(u32), GFP_KERNEL);
	if (!pfns) {
		DRM_ERROR("No memory\n");
		goto get_page_err;
	}

	if (!pages) {
		DRM_ERROR("No pages found, trying to follow pfn\n");
		for (i = 0; i < num_pages; i++) {
			vma = find_vma(mm, vaddr + i * PAGE_SIZE);
			if (!vma) {
				DRM_ERROR("failed to find vma\n");
				goto find_vma_err;
			}

			ret = follow_pfn(vma,
				(unsigned long)(vaddr + i * PAGE_SIZE),
				(unsigned long *)&pfns[i]);
			if (ret) {
				DRM_ERROR("failed to follow pfn\n");
				goto follow_pfn_err;
			}
		}
	} else {
		DRM_ERROR("Found pages\n");
		for (i = 0; i < num_pages; i++)
			pfns[i] = page_to_pfn(pages[i]);
	}

	*pfn_list = pfns;
	*page_count = num_pages;

	kfree(pages);

	return 0;
find_vma_err:
follow_pfn_err:
	kfree(pfns);
get_page_err:
	if (pages) {
		for (i = 0; i < num_pages; i++)
			put_page(pages[i]);
		kfree(pages);
	}
	return -EINVAL;
}
コード例 #8
0
ファイル: frame_vector.c プロジェクト: SantoshShilimkar/linux
/**
 * get_vaddr_frames() - map virtual addresses to pfns
 * @start:	starting user address
 * @nr_frames:	number of pages / pfns from start to map
 * @gup_flags:	flags modifying lookup behaviour
 * @vec:	structure which receives pages / pfns of the addresses mapped.
 *		It should have space for at least nr_frames entries.
 *
 * This function maps virtual addresses from @start and fills @vec structure
 * with page frame numbers or page pointers to corresponding pages (choice
 * depends on the type of the vma underlying the virtual address). If @start
 * belongs to a normal vma, the function grabs reference to each of the pages
 * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
 * touch page structures and the caller must make sure pfns aren't reused for
 * anything else while he is using them.
 *
 * The function returns number of pages mapped which may be less than
 * @nr_frames. In particular we stop mapping if there are more vmas of
 * different type underlying the specified range of virtual addresses.
 * When the function isn't able to map a single page, it returns error.
 *
 * This function takes care of grabbing mmap_sem as necessary.
 */
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
		     unsigned int gup_flags, struct frame_vector *vec)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	int ret = 0;
	int err;
	int locked;

	if (nr_frames == 0)
		return 0;

	if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
		nr_frames = vec->nr_allocated;

	down_read(&mm->mmap_sem);
	locked = 1;
	vma = find_vma_intersection(mm, start, start + 1);
	if (!vma) {
		ret = -EFAULT;
		goto out;
	}

	/*
	 * While get_vaddr_frames() could be used for transient (kernel
	 * controlled lifetime) pinning of memory pages all current
	 * users establish long term (userspace controlled lifetime)
	 * page pinning. Treat get_vaddr_frames() like
	 * get_user_pages_longterm() and disallow it for filesystem-dax
	 * mappings.
	 */
	if (vma_is_fsdax(vma))
		return -EOPNOTSUPP;

	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
		vec->got_ref = true;
		vec->is_pfns = false;
		ret = get_user_pages_locked(start, nr_frames,
			gup_flags, (struct page **)(vec->ptrs), &locked);
		goto out;
	}

	vec->got_ref = false;
	vec->is_pfns = true;
	do {
		unsigned long *nums = frame_vector_pfns(vec);

		while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
			err = follow_pfn(vma, start, &nums[ret]);
			if (err) {
				if (ret == 0)
					ret = err;
				goto out;
			}
			start += PAGE_SIZE;
			ret++;
		}
		/*
		 * We stop if we have enough pages or if VMA doesn't completely
		 * cover the tail page.
		 */
		if (ret >= nr_frames || start < vma->vm_end)
			break;
		vma = find_vma_intersection(mm, start, start + 1);
	} while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
out:
	if (locked)
		up_read(&mm->mmap_sem);
	if (!ret)
		ret = -EFAULT;
	if (ret > 0)
		vec->nr_frames = ret;
	return ret;
}
コード例 #9
0
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
				    unsigned long size, int write)
{
	struct vb2_dma_sg_buf *buf;
	unsigned long first, last;
	int num_pages_from_user;
	struct vm_area_struct *vma;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
	buf->write = write;
	buf->offset = vaddr & ~PAGE_MASK;
	buf->size = size;

	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
	buf->num_pages = last - first + 1;

	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
			     GFP_KERNEL);
	if (!buf->pages)
		goto userptr_fail_alloc_pages;

	vma = find_vma(current->mm, vaddr);
	if (!vma) {
		dprintk(1, "no vma for address %lu\n", vaddr);
		goto userptr_fail_find_vma;
	}

	if (vma->vm_end < vaddr + size) {
		dprintk(1, "vma at %lu is too small for %lu bytes\n",
			vaddr, size);
		goto userptr_fail_find_vma;
	}

	buf->vma = vb2_get_vma(vma);
	if (!buf->vma) {
		dprintk(1, "failed to copy vma\n");
		goto userptr_fail_find_vma;
	}

	if (vma_is_io(buf->vma)) {
		for (num_pages_from_user = 0;
		     num_pages_from_user < buf->num_pages;
		     ++num_pages_from_user, vaddr += PAGE_SIZE) {
			unsigned long pfn;

			if (follow_pfn(buf->vma, vaddr, &pfn)) {
				dprintk(1, "no page for address %lu\n", vaddr);
				break;
			}
			buf->pages[num_pages_from_user] = pfn_to_page(pfn);
		}
	} else
		num_pages_from_user = get_user_pages(current, current->mm,
					     vaddr & PAGE_MASK,
					     buf->num_pages,
					     write,
					     1, /* force */
					     buf->pages,
					     NULL);

	if (num_pages_from_user != buf->num_pages)
		goto userptr_fail_get_user_pages;

	if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
			buf->num_pages, buf->offset, size, 0))
		goto userptr_fail_alloc_table_from_pages;

	return buf;

userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
	dprintk(1, "get_user_pages requested/got: %d/%d]\n",
		buf->num_pages, num_pages_from_user);
	if (!vma_is_io(buf->vma))
		while (--num_pages_from_user >= 0)
			put_page(buf->pages[num_pages_from_user]);
	vb2_put_vma(buf->vma);
userptr_fail_find_vma:
	kfree(buf->pages);
userptr_fail_alloc_pages:
	kfree(buf);
	return NULL;
}
コード例 #10
0
/**
 * @brief ドライバwrite関数
 *
 * @param[in] *file   : ファイル
 * @param[in] *buffer : 書き込みパラメータ
 * @param[in] count   : 書き込みサイズ
 * @param[in] *f_pos  : ファイルのRW位置
 * @retval write_size     : 正常終了 書き込みサイズ
 * @retval EFAULT         : 異常終了 アドレス不正
 * @retval ENOENT         : 異常終了 ファイルorディレクトリがない
 * @retval ENOSPC         : 異常終了 デバイスに空領域がない
 * @retval EINVAL         : 異常終了 引数なし
 * @retval D_SHDMA_RET_NG : 異常終了 エラー
 * @exception なし
 * @see       なし
 */
static ssize_t shdma_write(
    struct file    *file,
    const char __user    *buffer,
    size_t         count,
    loff_t         *f_pos )
{
	int ret = D_SHDMA_RET_OK;
	unsigned int i,j;
	int err = D_SHDMA_RET_OK;
	int result_chk = D_SHDMA_RET_OK;
	struct vm_area_struct *vma;
	unsigned long pfn = 0;
	ion_phys_addr_t src_phys = 0;
	unsigned long dst_phys = 0;
	size_t src_len;
	unsigned long trans_size = 0;
	unsigned long shdma_trans_num_rows = 0;
	unsigned long dma_trans_num_rows = 0;
	unsigned long dma_trans_num_rows_rem = 0;
	unsigned addr_offset = 0;
	struct ion_handle *shdma_src_handle;
	struct shdma_dmov_exec_cmdptr_cmd cmd[3];
	struct shdma_command_t shdma_cmd[D_SHDMA_CHANNEL_MAX];
	unsigned int id[D_SHDMA_CHANNEL_MAX] = { DMOV_SHDMA_CH1, DMOV_SHDMA_CH2, DMOV_SHDMA_CH3 };
	unsigned long width_yuv = 0;
	unsigned long height_y = 0;
	unsigned long height_uv = 0;
	unsigned long ysize_align = 0;
	unsigned long uvsize_align = 0;
	int ion_ret = 0;


	/** <ol><li>処理開始 */
	SHDMA_DEBUG_MSG_ENTER(0, 0, 0);

	/** <li> ドライバwriteセマフォ獲得*/
	down( &write_sem );

	/** <li>初期化処理 */
	/** <ol><li>引数NULLチェック */
	if( file == NULL || buffer == NULL || count <= 0 || f_pos == NULL ){
		printk("***ERROR: argument NULL    file = %p  buffer = %p  count = 0x%x  f_pos = %p\n", file, buffer, count, f_pos );
		up( &write_sem );
		return -EINVAL;
	}

	/** <li>上位からのパラメータをコピーする */
	if (copy_from_user(&tci, buffer, sizeof(tci))){
		printk("***ERROR: fault copy write data parameter.\n" );
		up( &write_sem );
		return -EFAULT;
	}

	/** <li>転送元、転送先アドレスNULLチェック */
	if( tci[0].dst_handle == NULL || tci[0].src_handle == NULL ){
		printk("***ERROR: fault transfer address NULL   src = %p  dst = %p\n", tci[0].src_handle, tci[0].dst_handle );
		up( &write_sem );
		return -EINVAL;
	}

	/** <li>転送幅、高さチェック */
	if(( tci[0].height < D_SHDMA_CHANNEL_MAX ) || ( tci[0].src_stride == 0  )){
		printk("***ERROR: argument ERROR   height = %d  width = %ld\n", tci[0].height, tci[0].src_stride );
		up( &write_sem );		
		return -EINVAL;
	}
	if(( tci[0].src_stride % D_SHDMA_ODD_CHECK ) != 0 ){	/* widthが奇数の場合はありえないためNGを返す */
		printk("***ERROR: argument ERROR width is odd number   width = %ld\n", tci[0].src_stride );
		up( &write_sem );
		return -EINVAL;
	}

	/** <li>内部変数の初期化をする */
	memset( &cmd, 0, sizeof(struct shdma_dmov_exec_cmdptr_cmd) * 3 );
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		memset( &shdma_cmd[i], 0, sizeof(struct shdma_command_t));
	}
	/** </ol>*/

	/** <li>物理アドレス取得 */
	/** <ol><li>転送元物理アドレス取得 */
	shdma_src_handle = (struct ion_handle *)tci[0].src_handle;
	ion_ret = ion_phys( shdma_src_handle->client, shdma_src_handle, &src_phys, &src_len);
	if( src_phys == 0 || src_len < 1 || ion_ret < 0 ){
		printk("***ERROR: get src_phys falut.\n");
		up( &write_sem );
		return -EFAULT;
	}

	/** <li>転送先物理アドレス取得 */
	vma = find_vma( current->mm, (unsigned int )tci[0].dst_handle );
	if( vma == NULL ){
		printk("***ERROR: get vma falut.\n");
		up( &write_sem );
		return -ENOENT;
	}
	follow_pfn( vma, (unsigned int)tci[0].dst_handle, &pfn );
	dst_phys = __pfn_to_phys( pfn );
	/** </ol> */

	/** <li>DMA転送用パラメータバッファ獲得 */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){	/** <ol><li>DMAチャネル分獲得する */
		/** <ol><li>DMA転送用パラメータ領域獲得 */
		shdma_cmd[i].cmd_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
		if( shdma_cmd[i].cmd_ptr == NULL ){
			printk("***ERROR: falut allocate buffer cmd_ptr  num = 0x%x .\n" , i);
			if( i != 0 ){
				for( j = 0; j < (i - 1); j++ ){
					kfree(shdma_cmd[j].cmd_ptr);
				}
			}
			up( &write_sem );
			return -ENOSPC;
		}
	}
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		/** <li>DMA転送用パラメータ先頭アドレス領域獲得 */
		shdma_cmd[i].cmd_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
		if( shdma_cmd[i].cmd_ptr_ptr == NULL ){
			printk("***ERROR: falut allocate buffer cmd_ptr_ptr  num = 0x%x .\n" , i);
			if( i != 0 ){
				for( j = 0; j < (i - 1); j++ ){
					kfree(shdma_cmd[j].cmd_ptr_ptr);
				}
			}
			for( j = 0; j < D_SHDMA_CHANNEL_MAX; j++ ){
				kfree(shdma_cmd[j].cmd_ptr);
			}
			up( &write_sem );
			return -ENOSPC;
		}
	}
	/** </ol></ol> */

	/** <li>転送サイズ計算 */
	/** <li>アライメント調整 */
	if(( tci[0].src_stride % D_SHDMA_ALIGN_128 ) != 0 ){	/*Y領域、UV領域幅アライメント調整*/
		width_yuv = ((( tci[0].src_stride /
				D_SHDMA_ALIGN_128 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_128 );		/*128バイトでアライメント*/
	} else {
		width_yuv = tci[0].src_stride;
	}

	if(( tci[0].height % D_SHDMA_ALIGN_32 ) != 0 ){		/*Y領域高さアライメント調整*/
		height_y = ((( tci[0].height /
				D_SHDMA_ALIGN_32 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_32 );		/*32バイトでアライメント*/
	} else {
		height_y = tci[0].height;
	}

	if((( tci[0].height / D_SHDMA_ALIGN_HEIGHT_UV ) %
			D_SHDMA_ALIGN_32 ) != 0 ){		/*UV領域高さアライメント調整*/
		height_uv = (((( tci[0].height /
				D_SHDMA_ALIGN_HEIGHT_UV ) /
				D_SHDMA_ALIGN_32 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_32 );		/*32バイトでアライメント*/
	} else {
		height_uv = tci[0].height / D_SHDMA_ALIGN_HEIGHT_UV;
	}

	if(( width_yuv * height_y ) % D_SHDMA_ALIGN_8192 ){	/*Y領域のアライメント調整*/
		ysize_align = ((( width_yuv * height_y /
				D_SHDMA_ALIGN_8192 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_8192 );		/*8Kバイトでアライメント*/
	} else {
		ysize_align = width_yuv * height_y;
	}

	if(( width_yuv * height_uv ) % D_SHDMA_ALIGN_8192 ){	/*YU領域のアライメント調整*/
		uvsize_align = ((( width_yuv * height_uv /
				D_SHDMA_ALIGN_8192 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_8192 );		/*8Kバイトでアライメント*/
	} else {
		uvsize_align = width_yuv * height_uv;
	}

	shdma_trans_num_rows = (( ysize_align + uvsize_align ) /
					D_SHDMA_ALIGN_8192 );		/** <li>DMAbox転送回数はYUV領域を8Kで割った値*/
	trans_size = D_SHDMA_ALIGN_8192;				/** <li>DMA転送1boxサイズは8Kサイズを指定*/
	dma_trans_num_rows = shdma_trans_num_rows / D_SHDMA_CHANNEL_MAX;	/** <li>DMA1面あたりのbox転送回数算出 */
	dma_trans_num_rows_rem = shdma_trans_num_rows % D_SHDMA_CHANNEL_MAX;	/** <li>DMA1面あたりのbox転送回数の余り算出 */
	if( trans_size > D_SHDMA_TRANS_MAX_SIZE ){	/** <li>DMA転送1boxサイズが65535より大きい場合はハード制約で転送できないため、NGを返す */
		printk("***ERROR: Size over for DMA transfer.\n");
		for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
			kfree(shdma_cmd[i].cmd_ptr);
			kfree(shdma_cmd[i].cmd_ptr_ptr);
		}
		up( &write_sem );
		return -EINVAL;
	}
	/** </ol> */

	/** <li>DMA転送用パラメータ設定 */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){	/** <ol><li>DMAチャネル分設定する */
		if( i == D_SHDMA_CHANNEL_MAX - 1){	/** <ol><li>最後のDMAチャネルの場合転送box数の余りも転送する */
			dma_trans_num_rows += dma_trans_num_rows_rem;
		}
		shdma_cmd[i].cmd_ptr->cmd = CMD_PTR_LP | CMD_MODE_BOX;	/** <li>boxモード転送 */
		shdma_cmd[i].cmd_ptr->src_row_addr = (unsigned int)src_phys + addr_offset;	/** <li>転送元アドレス設定 */
		shdma_cmd[i].cmd_ptr->dst_row_addr = (unsigned int)dst_phys + addr_offset;	/** <li>転送先アドレス設定 */
		shdma_cmd[i].cmd_ptr->src_dst_len =			/** <li>1box転送サイズ設定 */
				(( trans_size & D_SHDMA_PARAM_MASK ) << D_SHDMA_SRC_PARAM_SHIFT ) |
				( trans_size & D_SHDMA_PARAM_MASK );
		shdma_cmd[i].cmd_ptr->num_rows =			/** <li>転送box数設定 */
				(( dma_trans_num_rows & D_SHDMA_PARAM_MASK ) << D_SHDMA_SRC_PARAM_SHIFT ) |
				( dma_trans_num_rows & D_SHDMA_PARAM_MASK );
		shdma_cmd[i].cmd_ptr->row_offset =			/** <li>転送オフセット設定 */
				(( trans_size & D_SHDMA_PARAM_MASK ) << D_SHDMA_SRC_PARAM_SHIFT ) |
				( trans_size & D_SHDMA_PARAM_MASK );
		/** <li>転送アドレスオフセット加算 */
		addr_offset += trans_size * dma_trans_num_rows;
	}
	/** </ol></ol> */

	/** <li>DMA転送用パラメータをマッピングする */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){	/** <ol><li>DMAチャネル分マッピングする */
		/** <ol><li>DMA転送用パラメータ領域の物理アドレスを獲得する */
		shdma_cmd[i].map_cmd = dma_map_single( NULL, shdma_cmd[i].cmd_ptr,
					sizeof(*shdma_cmd[i].cmd_ptr), DMA_TO_DEVICE );
		if( shdma_cmd[i].map_cmd == 0 ){
			printk("***ERROR: falut cmd_ptr mapping.  num = 0x%x\n", i);
			if( i != 0 ){
				for( j = 0; j < (i - 1); j++ ){
					dma_unmap_single( NULL, shdma_cmd[j].map_cmd,
						sizeof(*shdma_cmd[j].cmd_ptr), DMA_TO_DEVICE );
				}
			}
			for( j = 0; j < D_SHDMA_CHANNEL_MAX; j++ ){
				kfree(shdma_cmd[j].cmd_ptr_ptr);
				kfree(shdma_cmd[j].cmd_ptr);
			}
			up( &write_sem );
			return -EFAULT;
		}
		/** <li>DMA転送用パラメータの物理アドレスをDMA転送用パラメータ先頭領域に格納する */
		*shdma_cmd[i].cmd_ptr_ptr = CMD_PTR_ADDR(shdma_cmd[i].map_cmd) | CMD_PTR_LP;
	}
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		/** <li>DMA転送用パラメータ先頭領域の物理アドレスを獲得する */
		err = shdma_cmd[i].map_cmd_ptr = dma_map_single( NULL, shdma_cmd[i].cmd_ptr_ptr,
					sizeof(*shdma_cmd[i].cmd_ptr_ptr), DMA_TO_DEVICE );
		if( err == 0 ){
			printk("***ERROR: falut cmd_ptr_ptr mapping.  num = 0x%x\n", i);
			if( i != 0 ){
				for( j = 0; j < (i - 1); j++ ){
					dma_unmap_single( NULL, shdma_cmd[j].map_cmd_ptr,
						sizeof(*shdma_cmd[j].cmd_ptr_ptr), DMA_TO_DEVICE );
				}
			}
			for( j = 0; j < D_SHDMA_CHANNEL_MAX; j++ ){
				dma_unmap_single( NULL, shdma_cmd[j].map_cmd,
					sizeof(*shdma_cmd[j].cmd_ptr), DMA_TO_DEVICE );
				kfree(shdma_cmd[j].cmd_ptr_ptr);
				kfree(shdma_cmd[j].cmd_ptr);
			}
			up( &write_sem );
			return -EFAULT;
		}
	}
	/** </ol></ol> */

	/** <li>DMA転送構造体にパラメータを設定する */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){	/** <ol><li>DMAチャネル分設定する */
		cmd[i].dmov_cmd.cmdptr = DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(shdma_cmd[i].map_cmd_ptr);
		cmd[i].dmov_cmd.complete_func = shdma_complete_func;
		cmd[i].dmov_cmd.exec_func = NULL;
		cmd[i].id = id[i];
		cmd[i].result = 0;
	}
	/** </ol> */

	/** <li>DMA転送完了資源をチャネル数分に設定する */
	atomic_set( &atomic_shdma, D_SHDMA_CHANNEL_MAX );

	/** <li>DMA転送開始関数をコールする */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		msm_dmov_enqueue_cmd( cmd[i].id, &cmd[i].dmov_cmd );
	}

	/** <li>DMA転送完了資源が0以下になるまでWaitする */
	wait_event( wq, ( atomic_read( &atomic_shdma ) <= 0 ));

	/** <li>DMA転送結果を確認する*/
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++){	/** <ol><li>DMAチャネル分確認する */
		if( cmd[i].result != D_SHDMA_DMOV_RESULT_OK ){	/** <li>DMA転送結果がNGの場合、ログを出力する */
			result_chk = D_SHDMA_RET_NG;
			printk("***ERROR: dma id:%d result:0x%08x \n***flush: 0x%08x 0x%08x 0x%08x 0x%08x\n",
					id[i], cmd[i].result, cmd[i].err.flush[0],
					cmd[i].err.flush[1], cmd[i].err.flush[2], cmd[i].err.flush[3]);
		}
	}
	/** </ol>*/

	/** <li>獲得したメモリを解放する */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		dma_unmap_single( NULL, (dma_addr_t)shdma_cmd[i].map_cmd_ptr,
					sizeof(*shdma_cmd[i].cmd_ptr_ptr), DMA_TO_DEVICE );
		dma_unmap_single( NULL, shdma_cmd[i].map_cmd,
					sizeof(*shdma_cmd[i].cmd_ptr), DMA_TO_DEVICE );
		kfree(shdma_cmd[i].cmd_ptr_ptr);
		kfree(shdma_cmd[i].cmd_ptr);
	}

	/** <li>DMA転送結果を返す */
	if( result_chk == 0 ){
		ret = count;
	} else {
		ret = result_chk;
	}

	/** <li>ドライバwriteセマフォ解放 */
	up( &write_sem );

	SHDMA_DEBUG_MSG_EXIT();
	/** <li>処理終了</ol>*/

	return ret;
}
コード例 #11
0
/**
 * get_vaddr_frames() - map virtual addresses to pfns
 * @start:	starting user address
 * @nr_frames:	number of pages / pfns from start to map
 * @write:	whether pages will be written to by the caller
 * @force:	whether to force write access even if user mapping is
 *		readonly. See description of the same argument of
		get_user_pages().
 * @vec:	structure which receives pages / pfns of the addresses mapped.
 *		It should have space for at least nr_frames entries.
 *
 * This function maps virtual addresses from @start and fills @vec structure
 * with page frame numbers or page pointers to corresponding pages (choice
 * depends on the type of the vma underlying the virtual address). If @start
 * belongs to a normal vma, the function grabs reference to each of the pages
 * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
 * touch page structures and the caller must make sure pfns aren't reused for
 * anything else while he is using them.
 *
 * The function returns number of pages mapped which may be less than
 * @nr_frames. In particular we stop mapping if there are more vmas of
 * different type underlying the specified range of virtual addresses.
 * When the function isn't able to map a single page, it returns error.
 *
 * This function takes care of grabbing mmap_sem as necessary.
 */
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
		     bool write, bool force, struct frame_vector *vec)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	int ret = 0;
	int err;
	int locked;

	if (nr_frames == 0)
		return 0;

	if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
		nr_frames = vec->nr_allocated;

	down_read(&mm->mmap_sem);
	locked = 1;
	vma = find_vma_intersection(mm, start, start + 1);
	if (!vma) {
		ret = -EFAULT;
		goto out;
	}
	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
		vec->got_ref = true;
		vec->is_pfns = false;
		ret = get_user_pages_locked(current, mm, start, nr_frames,
			write, force, (struct page **)(vec->ptrs), &locked);
		goto out;
	}

	vec->got_ref = false;
	vec->is_pfns = true;
	do {
		unsigned long *nums = frame_vector_pfns(vec);

		while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
			err = follow_pfn(vma, start, &nums[ret]);
			if (err) {
				if (ret == 0)
					ret = err;
				goto out;
			}
			start += PAGE_SIZE;
			ret++;
		}
		/*
		 * We stop if we have enough pages or if VMA doesn't completely
		 * cover the tail page.
		 */
		if (ret >= nr_frames || start < vma->vm_end)
			break;
		vma = find_vma_intersection(mm, start, start + 1);
	} while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
out:
	if (locked)
		up_read(&mm->mmap_sem);
	if (!ret)
		ret = -EFAULT;
	if (ret > 0)
		vec->nr_frames = ret;
	return ret;
}