/** * _vb2_ion_get_vma() - lock userspace mapped memory * @vaddr: starting virtual address of the area to be verified * @size: size of the area * @res_vma: will return locked copy of struct vm_area for the given area * * This function will go through memory area of size @size mapped at @vaddr * If they are contiguous the virtual memory area is locked and a @res_vma is * filled with the copy and @res_pa set to the physical address of the buffer. * * Returns 0 on success. */ static int _vb2_ion_get_vma(unsigned long vaddr, unsigned long size, struct vm_area_struct **res_vma) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start, end; int ret = -EFAULT; start = vaddr; end = start + size; down_read(&mm->mmap_sem); vma = find_vma(mm, start); if (vma == NULL || vma->vm_end < end) goto done; /* Lock vma and return to the caller */ *res_vma = vb2_get_vma(vma); if (*res_vma == NULL) { ret = -ENOMEM; goto done; } ret = 0; done: up_read(&mm->mmap_sem); return ret; }
/** * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory * @vaddr: starting virtual address of the area to be verified * @size: size of the area * @res_paddr: will return physical address for the given vaddr * @res_vma: will return locked copy of struct vm_area for the given area * * This function will go through memory area of size @size mapped at @vaddr and * verify that the underlying physical pages are contiguous. If they are * contiguous the virtual memory area is locked and a @res_vma is filled with * the copy and @res_pa set to the physical address of the buffer. * * Returns 0 on success. */ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, struct vm_area_struct **res_vma, dma_addr_t *res_pa) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long offset, start, end; unsigned long this_pfn, prev_pfn; dma_addr_t pa = 0; int ret = -EFAULT; start = vaddr; offset = start & ~PAGE_MASK; end = start + size; down_read(&mm->mmap_sem); vma = find_vma(mm, start); if (vma == NULL || vma->vm_end < end) goto done; for (prev_pfn = 0; start < end; start += PAGE_SIZE) { ret = follow_pfn(vma, start, &this_pfn); if (ret) goto done; if (prev_pfn == 0) pa = this_pfn << PAGE_SHIFT; else if (this_pfn != prev_pfn + 1) { ret = -EFAULT; goto done; } prev_pfn = this_pfn; } /* * Memory is contigous, lock vma and return to the caller */ *res_vma = vb2_get_vma(vma); if (*res_vma == NULL) { ret = -ENOMEM; goto done; } *res_pa = pa + offset; ret = 0; done: up_read(&mm->mmap_sem); return ret; }
int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, struct vm_area_struct **res_vma, dma_addr_t *res_pa) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long offset, start, end; unsigned long this_pfn, prev_pfn; dma_addr_t pa = 0; start = vaddr; offset = start & ~PAGE_MASK; end = start + size; vma = find_vma(mm, start); if (vma == NULL || vma->vm_end < end) return -EFAULT; for (prev_pfn = 0; start < end; start += PAGE_SIZE) { int ret = follow_pfn(vma, start, &this_pfn); if (ret) return ret; if (prev_pfn == 0) pa = this_pfn << PAGE_SHIFT; else if (this_pfn != prev_pfn + 1) return -EFAULT; prev_pfn = this_pfn; } *res_vma = vb2_get_vma(vma); if (*res_vma == NULL) return -ENOMEM; *res_pa = pa + offset; return 0; }
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; unsigned long first, last; int num_pages_from_user; struct vm_area_struct *vma; struct sg_table *sgt; DEFINE_DMA_ATTRS(attrs); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); #endif buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->dev = conf->dev; buf->dma_dir = dma_dir; buf->offset = vaddr & ~PAGE_MASK; buf->size = size; buf->dma_sgt = &buf->sg_table; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; buf->num_pages = last - first + 1; buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto userptr_fail_alloc_pages; vma = find_vma(current->mm, vaddr); if (!vma) { dprintk(1, "no vma for address %lu\n", vaddr); goto userptr_fail_find_vma; } if (vma->vm_end < vaddr + size) { dprintk(1, "vma at %lu is too small for %lu bytes\n", vaddr, size); goto userptr_fail_find_vma; } buf->vma = vb2_get_vma(vma); if (!buf->vma) { dprintk(1, "failed to copy vma\n"); goto userptr_fail_find_vma; } if (vma_is_io(buf->vma)) { for (num_pages_from_user = 0; num_pages_from_user < buf->num_pages; ++num_pages_from_user, vaddr += PAGE_SIZE) { unsigned long pfn; if (follow_pfn(vma, vaddr, &pfn)) { dprintk(1, "no page for address %lu\n", vaddr); break; } buf->pages[num_pages_from_user] = pfn_to_page(pfn); } } else num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->num_pages, buf->dma_dir == DMA_FROM_DEVICE, 1, /* force */ buf->pages, NULL); if (num_pages_from_user != buf->num_pages) goto userptr_fail_get_user_pages; if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, buf->offset, size, 0)) goto userptr_fail_alloc_table_from_pages; sgt = &buf->sg_table; /* * No need to sync to the device, this will happen later when the * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, &attrs); if (!sgt->nents) goto userptr_fail_map; return buf; userptr_fail_map: sg_free_table(&buf->sg_table); userptr_fail_alloc_table_from_pages: userptr_fail_get_user_pages: dprintk(1, "get_user_pages requested/got: %d/%d]\n", buf->num_pages, num_pages_from_user); if (!vma_is_io(buf->vma)) while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); vb2_put_vma(buf->vma); userptr_fail_find_vma: kfree(buf->pages); userptr_fail_alloc_pages: kfree(buf); return NULL; }
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write) { struct vb2_dma_sg_buf *buf; unsigned long first, last; int num_pages_from_user; struct vm_area_struct *vma; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->write = write; buf->offset = vaddr & ~PAGE_MASK; buf->size = size; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; buf->num_pages = last - first + 1; buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto userptr_fail_alloc_pages; vma = find_vma(current->mm, vaddr); if (!vma) { dprintk(1, "no vma for address %lu\n", vaddr); goto userptr_fail_find_vma; } if (vma->vm_end < vaddr + size) { dprintk(1, "vma at %lu is too small for %lu bytes\n", vaddr, size); goto userptr_fail_find_vma; } buf->vma = vb2_get_vma(vma); if (!buf->vma) { dprintk(1, "failed to copy vma\n"); goto userptr_fail_find_vma; } if (vma_is_io(buf->vma)) { for (num_pages_from_user = 0; num_pages_from_user < buf->num_pages; ++num_pages_from_user, vaddr += PAGE_SIZE) { unsigned long pfn; if (follow_pfn(buf->vma, vaddr, &pfn)) { dprintk(1, "no page for address %lu\n", vaddr); break; } buf->pages[num_pages_from_user] = pfn_to_page(pfn); } } else num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->num_pages, write, 1, /* force */ buf->pages, NULL); if (num_pages_from_user != buf->num_pages) goto userptr_fail_get_user_pages; if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, buf->num_pages, buf->offset, size, 0)) goto userptr_fail_alloc_table_from_pages; return buf; userptr_fail_alloc_table_from_pages: userptr_fail_get_user_pages: dprintk(1, "get_user_pages requested/got: %d/%d]\n", buf->num_pages, num_pages_from_user); if (!vma_is_io(buf->vma)) while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); vb2_put_vma(buf->vma); userptr_fail_find_vma: kfree(buf->pages); userptr_fail_alloc_pages: kfree(buf); return NULL; }