int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, struct virtio_gpu_object *bo) { int ret; struct page **pages = bo->tbo.ttm->pages; int nr_pages = bo->tbo.num_pages; /* wtf swapping */ if (bo->pages) return 0; if (bo->tbo.ttm->state == tt_unpopulated) bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm); bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!bo->pages) goto out; ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, nr_pages << PAGE_SHIFT, GFP_KERNEL); if (ret) goto out; return 0; out: kfree(bo->pages); bo->pages = NULL; return -ENOMEM; }
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; struct sg_table *sgt; DEFINE_DMA_ATTRS(attrs); struct frame_vector *vec; dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->dev = conf->dev; buf->dma_dir = dma_dir; buf->offset = vaddr & ~PAGE_MASK; buf->size = size; buf->dma_sgt = &buf->sg_table; vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE); if (IS_ERR(vec)) goto userptr_fail_pfnvec; buf->vec = vec; buf->pages = frame_vector_pages(vec); if (IS_ERR(buf->pages)) goto userptr_fail_sgtable; buf->num_pages = frame_vector_count(vec); if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, buf->offset, size, 0)) goto userptr_fail_sgtable; sgt = &buf->sg_table; /* * No need to sync to the device, this will happen later when the * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, &attrs); if (!sgt->nents) goto userptr_fail_map; return buf; userptr_fail_map: sg_free_table(&buf->sg_table); userptr_fail_sgtable: vb2_destroy_framevec(vec); userptr_fail_pfnvec: kfree(buf); return NULL; }
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) { struct vb2_dma_sg_buf *buf; int ret; int num_pages; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->write = 0; buf->offset = 0; buf->size = size; /* size is already page aligned */ buf->num_pages = size >> PAGE_SHIFT; buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto fail_pages_array_alloc; ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags); if (ret) goto fail_pages_alloc; ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, buf->num_pages, 0, size, gfp_flags); if (ret) goto fail_table_alloc; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dma_sg_put; buf->handler.arg = buf; atomic_inc(&buf->refcount); dprintk(1, "%s: Allocated buffer of %d pages\n", __func__, buf->num_pages); return buf; fail_table_alloc: num_pages = buf->num_pages; while (num_pages--) __free_page(buf->pages[num_pages]); fail_pages_alloc: kfree(buf->pages); fail_pages_array_alloc: kfree(buf); return NULL; }
struct tee_shm *tee_shm_alloc(struct tee *tee, size_t size, uint32_t flags) { struct tee_shm *shm; unsigned long pfn; unsigned int nr_pages; struct page *page; int ret; INMSG(); shm = tee->ops->alloc(tee, size, flags); if (IS_ERR_OR_NULL(shm)) { dev_err(_DEV(tee), "%s: allocation failed (s=%d,flags=0x%08x) err=%ld\n", __func__, (int)size, flags, PTR_ERR(shm)); goto exit; } shm->tee = tee; dev_dbg(_DEV(tee), "%s: shm=%p, paddr=%p,s=%d/%d app=\"%s\" pid=%d\n", __func__, shm, (void *)shm->paddr, (int)shm->size_req, (int)shm->size_alloc, current->comm, current->pid); pfn = shm->paddr >> PAGE_SHIFT; page = pfn_to_page(pfn); if (IS_ERR_OR_NULL(page)) { dev_err(_DEV(tee), "%s: pfn_to_page(%lx) failed\n", __func__, pfn); tee->ops->free(shm); return (struct tee_shm *)page; } /* Only one page of contiguous physical memory */ nr_pages = 1; ret = sg_alloc_table_from_pages(&shm->sgt, &page, nr_pages, 0, nr_pages * PAGE_SIZE, GFP_KERNEL); if (IS_ERR_VALUE(ret)) { dev_err(_DEV(tee), "%s: sg_alloc_table_from_pages() failed\n", __func__); tee->ops->free(shm); shm = ERR_PTR(ret); } exit: OUTMSGX(shm); return shm; }
/** * vmw_ttm_map_dma - Make sure TTM pages are visible to the device * * @vmw_tt: Pointer to a struct vmw_ttm_tt * * Select the correct function for and make sure the TTM pages are * visible to the device. Allocate storage for the device mappings. * If a mapping has already been performed, indicated by the storage * pointer being non NULL, the function returns success. */ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) { struct vmw_private *dev_priv = vmw_tt->dev_priv; struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); struct vmw_sg_table *vsgt = &vmw_tt->vsgt; struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false }; struct vmw_piter iter; dma_addr_t old; int ret = 0; static size_t sgl_size; static size_t sgt_size; if (vmw_tt->mapped) return 0; vsgt->mode = dev_priv->map_mode; vsgt->pages = vmw_tt->dma_ttm.ttm.pages; vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; vsgt->addrs = vmw_tt->dma_ttm.dma_address; vsgt->sgt = &vmw_tt->sgt; switch (dev_priv->map_mode) { case vmw_dma_map_bind: case vmw_dma_map_populate: if (unlikely(!sgl_size)) { sgl_size = ttm_round_pot(sizeof(struct scatterlist)); sgt_size = ttm_round_pot(sizeof(struct sg_table)); } vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx); if (unlikely(ret != 0)) return ret; ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, (unsigned long) vsgt->num_pages << PAGE_SHIFT, GFP_KERNEL); if (unlikely(ret != 0)) goto out_sg_alloc_fail; if (vsgt->num_pages > vmw_tt->sgt.nents) { uint64_t over_alloc = sgl_size * (vsgt->num_pages - vmw_tt->sgt.nents); ttm_mem_global_free(glob, over_alloc); vmw_tt->sg_alloc_size -= over_alloc; } ret = vmw_ttm_map_for_dma(vmw_tt); if (unlikely(ret != 0)) goto out_map_fail; break; default: break; } old = ~((dma_addr_t) 0); vmw_tt->vsgt.num_regions = 0; for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { dma_addr_t cur = vmw_piter_dma_addr(&iter); if (cur != old + PAGE_SIZE) vmw_tt->vsgt.num_regions++; old = cur; } vmw_tt->mapped = true; return 0; out_map_fail: sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; out_sg_alloc_fail: ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); return ret; } /** * vmw_ttm_unmap_dma - Tear down any TTM page device mappings * * @vmw_tt: Pointer to a struct vmw_ttm_tt * * Tear down any previously set up device DMA mappings and free * any storage space allocated for them. If there are no mappings set up, * this function is a NOP. */ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) { struct vmw_private *dev_priv = vmw_tt->dev_priv; if (!vmw_tt->vsgt.sgt) return; switch (dev_priv->map_mode) { case vmw_dma_map_bind: case vmw_dma_map_populate: vmw_ttm_unmap_from_dma(vmw_tt); sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_tt->sg_alloc_size); break; default: break; } vmw_tt->mapped = false; } /** * vmw_bo_map_dma - Make sure buffer object pages are visible to the device * * @bo: Pointer to a struct ttm_buffer_object * * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer * instead of a pointer to a struct vmw_ttm_backend as argument. * Note that the buffer object must be either pinned or reserved before * calling this function. */ int vmw_bo_map_dma(struct ttm_buffer_object *bo) { struct vmw_ttm_tt *vmw_tt = container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); return vmw_ttm_map_dma(vmw_tt); } /** * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device * * @bo: Pointer to a struct ttm_buffer_object * * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer * instead of a pointer to a struct vmw_ttm_backend as argument. */ void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) { struct vmw_ttm_tt *vmw_tt = container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_ttm_unmap_dma(vmw_tt); } /** * vmw_bo_sg_table - Return a struct vmw_sg_table object for a * TTM buffer object * * @bo: Pointer to a struct ttm_buffer_object * * Returns a pointer to a struct vmw_sg_table object. The object should * not be freed after use. * Note that for the device addresses to be valid, the buffer object must * either be reserved or pinned. */ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) { struct vmw_ttm_tt *vmw_tt = container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); return &vmw_tt->vsgt; } static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); int ret; ret = vmw_ttm_map_dma(vmw_be); if (unlikely(ret != 0)) return ret; vmw_be->gmr_id = bo_mem->start; vmw_be->mem_type = bo_mem->mem_type; switch (bo_mem->mem_type) { case VMW_PL_GMR: return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); case VMW_PL_MOB: if (unlikely(vmw_be->mob == NULL)) { vmw_be->mob = vmw_mob_create(ttm->num_pages); if (unlikely(vmw_be->mob == NULL)) return -ENOMEM; } return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); default: BUG(); } return 0; } static int vmw_ttm_unbind(struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); switch (vmw_be->mem_type) { case VMW_PL_GMR: vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); break; case VMW_PL_MOB: vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); break; default: BUG(); } if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) vmw_ttm_unmap_dma(vmw_be); return 0; } static void vmw_ttm_destroy(struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_ttm_unmap_dma(vmw_be); if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) ttm_dma_tt_fini(&vmw_be->dma_ttm); else ttm_tt_fini(ttm); if (vmw_be->mob) vmw_mob_destroy(vmw_be->mob); kfree(vmw_be); } static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); struct vmw_private *dev_priv = vmw_tt->dev_priv; struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); int ret; if (ttm->state != tt_unpopulated) return 0; if (dev_priv->map_mode == vmw_dma_alloc_coherent) { size_t size = ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); ret = ttm_mem_global_alloc(glob, size, ctx); if (unlikely(ret != 0)) return ret; ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev, ctx); if (unlikely(ret != 0)) ttm_mem_global_free(glob, size); } else ret = ttm_pool_populate(ttm, ctx); return ret; } static void vmw_ttm_unpopulate(struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); struct vmw_private *dev_priv = vmw_tt->dev_priv; struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); if (vmw_tt->mob) { vmw_mob_destroy(vmw_tt->mob); vmw_tt->mob = NULL; } vmw_ttm_unmap_dma(vmw_tt); if (dev_priv->map_mode == vmw_dma_alloc_coherent) { size_t size = ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); ttm_mem_global_free(glob, size); } else ttm_pool_unpopulate(ttm); } static struct ttm_backend_func vmw_ttm_func = { .bind = vmw_ttm_bind, .unbind = vmw_ttm_unbind, .destroy = vmw_ttm_destroy, }; static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { struct vmw_ttm_tt *vmw_be; int ret; vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); if (!vmw_be) return NULL; vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); vmw_be->mob = NULL; if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags); else ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags); if (unlikely(ret != 0)) goto out_no_init; return &vmw_be->dma_ttm.ttm; out_no_init: kfree(vmw_be); return NULL; } static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; } static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { switch (type) { case TTM_PL_SYSTEM: /* System memory */ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: /* "On-card" video ram */ man->func = &ttm_bo_manager_func; man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; break; case VMW_PL_GMR: case VMW_PL_MOB: /* * "Guest Memory Regions" is an aperture like feature with * one slot per bo. There is an upper limit of the number of * slots as well as the bo size. */ man->func = &vmw_gmrid_manager_func; man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; } static void vmw_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { *placement = vmw_sys_placement; } static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct ttm_object_file *tfile = vmw_fpriv((struct drm_file *)filp->private_data)->tfile; return vmw_user_bo_verify_access(bo, tfile); } static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); mem->bus.addr = NULL; mem->bus.is_iomem = false; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: case VMW_PL_GMR: case VMW_PL_MOB: return 0; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = dev_priv->vram_start; mem->bus.is_iomem = true; break; default: return -EINVAL; } return 0; } static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { } static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { return 0; } /** * vmw_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. * @mem: The struct ttm_mem_reg indicating to what memory * region the move is taking place. * * Calls move_notify for all subsystems needing it. * (currently only resources). */ static void vmw_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_mem_reg *mem) { vmw_bo_move_notify(bo, mem); vmw_query_move_notify(bo, mem); }
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; unsigned long first, last; int num_pages_from_user; struct vm_area_struct *vma; struct sg_table *sgt; DEFINE_DMA_ATTRS(attrs); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); #endif buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->dev = conf->dev; buf->dma_dir = dma_dir; buf->offset = vaddr & ~PAGE_MASK; buf->size = size; buf->dma_sgt = &buf->sg_table; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; buf->num_pages = last - first + 1; buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto userptr_fail_alloc_pages; vma = find_vma(current->mm, vaddr); if (!vma) { dprintk(1, "no vma for address %lu\n", vaddr); goto userptr_fail_find_vma; } if (vma->vm_end < vaddr + size) { dprintk(1, "vma at %lu is too small for %lu bytes\n", vaddr, size); goto userptr_fail_find_vma; } buf->vma = vb2_get_vma(vma); if (!buf->vma) { dprintk(1, "failed to copy vma\n"); goto userptr_fail_find_vma; } if (vma_is_io(buf->vma)) { for (num_pages_from_user = 0; num_pages_from_user < buf->num_pages; ++num_pages_from_user, vaddr += PAGE_SIZE) { unsigned long pfn; if (follow_pfn(vma, vaddr, &pfn)) { dprintk(1, "no page for address %lu\n", vaddr); break; } buf->pages[num_pages_from_user] = pfn_to_page(pfn); } } else num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->num_pages, buf->dma_dir == DMA_FROM_DEVICE, 1, /* force */ buf->pages, NULL); if (num_pages_from_user != buf->num_pages) goto userptr_fail_get_user_pages; if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, buf->offset, size, 0)) goto userptr_fail_alloc_table_from_pages; sgt = &buf->sg_table; /* * No need to sync to the device, this will happen later when the * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, &attrs); if (!sgt->nents) goto userptr_fail_map; return buf; userptr_fail_map: sg_free_table(&buf->sg_table); userptr_fail_alloc_table_from_pages: userptr_fail_get_user_pages: dprintk(1, "get_user_pages requested/got: %d/%d]\n", buf->num_pages, num_pages_from_user); if (!vma_is_io(buf->vma)) while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); vb2_put_vma(buf->vma); userptr_fail_find_vma: kfree(buf->pages); userptr_fail_alloc_pages: kfree(buf); return NULL; }
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, enum dma_data_direction dma_dir, gfp_t gfp_flags) { struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; struct sg_table *sgt; int ret; int num_pages; DEFINE_DMA_ATTRS(attrs); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); #endif if (WARN_ON(alloc_ctx == NULL)) return NULL; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->dma_dir = dma_dir; buf->offset = 0; buf->size = size; /* size is already page aligned */ buf->num_pages = size >> PAGE_SHIFT; buf->dma_sgt = &buf->sg_table; buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto fail_pages_array_alloc; ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags); if (ret) goto fail_pages_alloc; ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, 0, size, GFP_KERNEL); if (ret) goto fail_table_alloc; /* Prevent the device from being released while the buffer is used */ buf->dev = get_device(conf->dev); sgt = &buf->sg_table; /* * No need to sync to the device, this will happen later when the * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, &attrs); if (!sgt->nents) goto fail_map; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dma_sg_put; buf->handler.arg = buf; atomic_inc(&buf->refcount); dprintk(1, "%s: Allocated buffer of %d pages\n", __func__, buf->num_pages); return buf; fail_map: put_device(buf->dev); sg_free_table(buf->dma_sgt); fail_table_alloc: num_pages = buf->num_pages; while (num_pages--) __free_page(buf->pages[num_pages]); fail_pages_alloc: kfree(buf->pages); fail_pages_array_alloc: kfree(buf); return NULL; }
/** * vmw_ttm_map_dma - Make sure TTM pages are visible to the device * * @vmw_tt: Pointer to a struct vmw_ttm_tt * * Select the correct function for and make sure the TTM pages are * visible to the device. Allocate storage for the device mappings. * If a mapping has already been performed, indicated by the storage * pointer being non NULL, the function returns success. */ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) { struct vmw_private *dev_priv = vmw_tt->dev_priv; struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); struct vmw_sg_table *vsgt = &vmw_tt->vsgt; struct vmw_piter iter; dma_addr_t old; int ret = 0; static size_t sgl_size; static size_t sgt_size; if (vmw_tt->mapped) return 0; vsgt->mode = dev_priv->map_mode; vsgt->pages = vmw_tt->dma_ttm.ttm.pages; vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; vsgt->addrs = vmw_tt->dma_ttm.dma_address; vsgt->sgt = &vmw_tt->sgt; switch (dev_priv->map_mode) { case vmw_dma_map_bind: case vmw_dma_map_populate: if (unlikely(!sgl_size)) { sgl_size = ttm_round_pot(sizeof(struct scatterlist)); sgt_size = ttm_round_pot(sizeof(struct sg_table)); } vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, true); if (unlikely(ret != 0)) return ret; ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, (unsigned long) vsgt->num_pages << PAGE_SHIFT, GFP_KERNEL); if (unlikely(ret != 0)) goto out_sg_alloc_fail; if (vsgt->num_pages > vmw_tt->sgt.nents) { uint64_t over_alloc = sgl_size * (vsgt->num_pages - vmw_tt->sgt.nents); ttm_mem_global_free(glob, over_alloc); vmw_tt->sg_alloc_size -= over_alloc; } ret = vmw_ttm_map_for_dma(vmw_tt); if (unlikely(ret != 0)) goto out_map_fail; break; default: break; } old = ~((dma_addr_t) 0); vmw_tt->vsgt.num_regions = 0; for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { dma_addr_t cur = vmw_piter_dma_addr(&iter); if (cur != old + PAGE_SIZE) vmw_tt->vsgt.num_regions++; old = cur; } vmw_tt->mapped = true; return 0; out_map_fail: sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; out_sg_alloc_fail: ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); return ret; }
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write) { struct vb2_dma_sg_buf *buf; unsigned long first, last; int num_pages_from_user; struct vm_area_struct *vma; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->write = write; buf->offset = vaddr & ~PAGE_MASK; buf->size = size; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; buf->num_pages = last - first + 1; buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto userptr_fail_alloc_pages; vma = find_vma(current->mm, vaddr); if (!vma) { dprintk(1, "no vma for address %lu\n", vaddr); goto userptr_fail_find_vma; } if (vma->vm_end < vaddr + size) { dprintk(1, "vma at %lu is too small for %lu bytes\n", vaddr, size); goto userptr_fail_find_vma; } buf->vma = vb2_get_vma(vma); if (!buf->vma) { dprintk(1, "failed to copy vma\n"); goto userptr_fail_find_vma; } if (vma_is_io(buf->vma)) { for (num_pages_from_user = 0; num_pages_from_user < buf->num_pages; ++num_pages_from_user, vaddr += PAGE_SIZE) { unsigned long pfn; if (follow_pfn(buf->vma, vaddr, &pfn)) { dprintk(1, "no page for address %lu\n", vaddr); break; } buf->pages[num_pages_from_user] = pfn_to_page(pfn); } } else num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->num_pages, write, 1, /* force */ buf->pages, NULL); if (num_pages_from_user != buf->num_pages) goto userptr_fail_get_user_pages; if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, buf->num_pages, buf->offset, size, 0)) goto userptr_fail_alloc_table_from_pages; return buf; userptr_fail_alloc_table_from_pages: userptr_fail_get_user_pages: dprintk(1, "get_user_pages requested/got: %d/%d]\n", buf->num_pages, num_pages_from_user); if (!vma_is_io(buf->vma)) while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); vb2_put_vma(buf->vma); userptr_fail_find_vma: kfree(buf->pages); userptr_fail_alloc_pages: kfree(buf); return NULL; }