static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; struct sg_table *sgt; DEFINE_DMA_ATTRS(attrs); struct frame_vector *vec; dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->dev = conf->dev; buf->dma_dir = dma_dir; buf->offset = vaddr & ~PAGE_MASK; buf->size = size; buf->dma_sgt = &buf->sg_table; vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE); if (IS_ERR(vec)) goto userptr_fail_pfnvec; buf->vec = vec; buf->pages = frame_vector_pages(vec); if (IS_ERR(buf->pages)) goto userptr_fail_sgtable; buf->num_pages = frame_vector_count(vec); if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, buf->offset, size, 0)) goto userptr_fail_sgtable; sgt = &buf->sg_table; /* * No need to sync to the device, this will happen later when the * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, &attrs); if (!sgt->nents) goto userptr_fail_map; return buf; userptr_fail_map: sg_free_table(&buf->sg_table); userptr_fail_sgtable: vb2_destroy_framevec(vec); userptr_fail_pfnvec: kfree(buf); return NULL; }
static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, unsigned long size, enum dma_data_direction dma_dir) { struct vb2_vmalloc_buf *buf; struct frame_vector *vec; int n_pages, offset, i; int ret = -ENOMEM; buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); buf->dma_dir = dma_dir; offset = vaddr & ~PAGE_MASK; buf->size = size; vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); if (IS_ERR(vec)) { ret = PTR_ERR(vec); goto fail_pfnvec_create; } buf->vec = vec; n_pages = frame_vector_count(vec); if (frame_vector_to_pages(vec) < 0) { unsigned long *nums = frame_vector_pfns(vec); /* * We cannot get page pointers for these pfns. Check memory is * physically contiguous and use direct mapping. */ for (i = 1; i < n_pages; i++) if (nums[i-1] + 1 != nums[i]) goto fail_map; buf->vaddr = (__force void *) ioremap_nocache(nums[0] << PAGE_SHIFT, size); } else { buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, PAGE_KERNEL); } if (!buf->vaddr) goto fail_map; buf->vaddr += offset; return buf; fail_map: vb2_destroy_framevec(vec); fail_pfnvec_create: kfree(buf); return ERR_PTR(ret); }
/** * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired * them * @vec: frame vector to put * * Drop references to pages if get_vaddr_frames() acquired them. We also * invalidate the frame vector so that it is prepared for the next call into * get_vaddr_frames(). */ void put_vaddr_frames(struct frame_vector *vec) { int i; struct page **pages; if (!vec->got_ref) goto out; pages = frame_vector_pages(vec); /* * frame_vector_pages() might needed to do a conversion when * get_vaddr_frames() got pages but vec was later converted to pfns. * But it shouldn't really fail to convert pfns back... */ if (WARN_ON(IS_ERR(pages))) goto out; for (i = 0; i < vec->nr_frames; i++) put_page(pages[i]); vec->got_ref = false; out: vec->nr_frames = 0; }
static void vb2_vmalloc_put_userptr(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; unsigned int i; struct page **pages; unsigned int n_pages; if (!buf->vec->is_pfns) { n_pages = frame_vector_count(buf->vec); pages = frame_vector_pages(buf->vec); if (vaddr) vm_unmap_ram((void *)vaddr, n_pages); if (buf->dma_dir == DMA_FROM_DEVICE) for (i = 0; i < n_pages; i++) set_page_dirty_lock(pages[i]); } else { iounmap((__force void __iomem *)buf->vaddr); } vb2_destroy_framevec(buf->vec); kfree(buf); }