static void ttm_tt_free_page_directory(struct ttm_tt *ttm) { drm_free_large(ttm->pages); ttm->pages = NULL; drm_free_large(ttm->dma_address); ttm->dma_address = NULL; }
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) { struct ttm_tt *ttm = &ttm_dma->ttm; drm_free_large(ttm->pages); ttm->pages = NULL; drm_free_large(ttm_dma->dma_address); ttm_dma->dma_address = NULL; }
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) { int page_count, i; struct page *page; struct inode *inode; struct address_space *mapping; if (obj->pages) return 0; page_count = obj->base.size / PAGE_SIZE; BUG_ON(obj->pages != NULL); obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); if (obj->pages == NULL) return -ENOMEM; inode = obj->base.filp->f_path.dentry->d_inode; mapping = inode->i_mapping; gfpmask |= mapping_gfp_mask(mapping); for (i = 0; i < page_count; i++) { page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); if (IS_ERR(page)) goto err_pages; obj->pages[i] = page; } return 0; err_pages: while (i--) page_cache_release(obj->pages[i]); drm_free_large(obj->pages); obj->pages = NULL; return PTR_ERR(page); }
/** * drm_gem_put_pages - helper to free backing pages for a GEM object * @obj: obj in question * @pages: pages to free * @dirty: if true, pages will be marked as dirty * @accessed: if true, the pages will be marked as accessed */ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed) { int i, npages; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless * driver author is doing something really wrong: */ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); npages = obj->size >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (dirty) set_page_dirty(pages[i]); if (accessed) mark_page_accessed(pages[i]); /* Undo the reference we took when populating the table */ page_cache_release(pages[i]); } drm_free_large(pages); }
static void udl_gem_put_pages(struct udl_gem_object *obj) { int page_count = obj->base.size / PAGE_SIZE; int i; if (obj->base.import_attach) { drm_free_large(obj->pages); obj->pages = NULL; return; } for (i = 0; i < page_count; i++) page_cache_release(obj->pages[i]); drm_free_large(obj->pages); obj->pages = NULL; }
/** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number * * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff) { unsigned i; if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. * This assures that the smallest buffers are added first * to the LRU list, so they are likely to be later evicted * first, instead of large buffers whose eviction is more * expensive. * * This slightly lowers the number of bytes moved by TTM * per frame under memory pressure. */ list_sort(NULL, &parser->validated, cmp_size_smaller_first); ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, parser->ib.fence); } else if (backoff) { ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { if (parser->relocs[i].gobj) drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); } } kfree(parser->track); kfree(parser->relocs); kfree(parser->relocs_ptr); drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); kfree(parser->chunks_array); radeon_ib_free(parser->rdev, &parser->ib); radeon_ib_free(parser->rdev, &parser->const_ib); }
void amdgpu_bo_list_free(struct amdgpu_bo_list *list) { unsigned i; for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); mutex_destroy(&list->lock); drm_free_large(list->array); kfree(list); }
void udl_gem_put_pages(struct udl_gem_object *obj) { if (obj->base.import_attach) { drm_free_large(obj->pages); obj->pages = NULL; return; } drm_gem_put_pages(&obj->base, obj->pages, false, false); obj->pages = NULL; }
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; struct sg_page_iter sg_iter; struct page **pages; int ret, i; ret = i915_mutex_lock_interruptible(dev); if (ret) return ERR_PTR(ret); if (obj->dma_buf_vmapping) { obj->vmapping_count++; goto out_unlock; } ret = i915_gem_object_get_pages(obj); if (ret) goto err; i915_gem_object_pin_pages(obj); ret = -ENOMEM; pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); if (pages == NULL) goto err_unpin; i = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) pages[i++] = sg_page_iter_page(&sg_iter); obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); drm_free_large(pages); if (!obj->dma_buf_vmapping) goto err_unpin; obj->vmapping_count = 1; out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; err_unpin: i915_gem_object_unpin_pages(obj); err: mutex_unlock(&dev->struct_mutex); return ERR_PTR(ret); }
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->base.dev; struct scatterlist *sg; struct page **pages; int ret, i; ret = i915_mutex_lock_interruptible(dev); if (ret) return ERR_PTR(ret); if (obj->dma_buf_vmapping) { obj->vmapping_count++; goto out_unlock; } ret = i915_gem_object_get_pages(obj); if (ret) goto error; ret = -ENOMEM; pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); if (pages == NULL) goto error; for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) pages[i] = sg_page(sg); obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); drm_free_large(pages); if (!obj->dma_buf_vmapping) goto error; obj->vmapping_count = 1; i915_gem_object_pin_pages(obj); out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; error: mutex_unlock(&dev->struct_mutex); return ERR_PTR(ret); }
void msm_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); int id; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); /* object should not be on active list: */ WARN_ON(is_active(msm_obj)); list_del(&msm_obj->mm_list); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { struct msm_mmu *mmu = priv->mmus[id]; if (mmu && msm_obj->domain[id].iova) { uint32_t offset = msm_obj->domain[id].iova; mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); } } if (obj->import_attach) { if (msm_obj->vaddr) dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: */ if (msm_obj->pages) drm_free_large(msm_obj->pages); drm_prime_gem_destroy(obj, msm_obj->sgt); } else { vunmap(msm_obj->vaddr); put_pages(obj); } if (msm_obj->resv == &msm_obj->_resv) reservation_object_fini(msm_obj->resv); drm_gem_object_release(obj); kfree(msm_obj); }
/** * drm_gem_put_pages - helper to free backing pages for a GEM object * @obj: obj in question * @pages: pages to free */ void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed) { int i, npages; npages = obj->size >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (dirty) set_page_dirty(pages[i]); if (accessed) mark_page_accessed(pages[i]); /* Undo the reference we took when populating the table */ page_cache_release(pages[i]); } drm_free_large(pages); }
void msm_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!mutex_is_locked(&dev->struct_mutex)); /* object should not be on active list: */ WARN_ON(is_active(msm_obj)); list_del(&msm_obj->mm_list); put_iova(obj); if (obj->import_attach) { if (msm_obj->vaddr) dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: */ if (msm_obj->pages) drm_free_large(msm_obj->pages); drm_prime_gem_destroy(obj, msm_obj->sgt); } else { msm_gem_vunmap(obj); put_pages(obj); } if (msm_obj->resv == &msm_obj->_resv) reservation_object_fini(msm_obj->resv); drm_gem_object_release(obj); kfree(msm_obj); }
static void put_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->pages) { /* For non-cached buffers, ensure the new pages are clean * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); if (use_pages(obj)) drm_gem_put_pages(obj, msm_obj->pages, true, false); else { drm_mm_remove_node(msm_obj->vram_node); drm_free_large(msm_obj->pages); } msm_obj->pages = NULL; } }
void ttm_tt_fini(struct ttm_tt *ttm) { drm_free_large(ttm->pages); ttm->pages = NULL; }
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; union drm_amdgpu_bo_list *args = data; uint32_t handle = args->in.list_handle; const void __user *uptr = (const void*)(long)args->in.bo_info_ptr; struct drm_amdgpu_bo_list_entry *info; struct amdgpu_bo_list *list; int r; info = drm_malloc_ab(args->in.bo_number, sizeof(struct drm_amdgpu_bo_list_entry)); if (!info) return -ENOMEM; /* copy the handle array from userspace to a kernel buffer */ r = -EFAULT; if (likely(info_size == args->in.bo_info_size)) { unsigned long bytes = args->in.bo_number * args->in.bo_info_size; if (copy_from_user(info, uptr, bytes)) goto error_free; } else { unsigned long bytes = min(args->in.bo_info_size, info_size); unsigned i; memset(info, 0, args->in.bo_number * info_size); for (i = 0; i < args->in.bo_number; ++i) { if (copy_from_user(&info[i], uptr, bytes)) goto error_free; uptr += args->in.bo_info_size; } } switch (args->in.operation) { case AMDGPU_BO_LIST_OP_CREATE: r = amdgpu_bo_list_create(fpriv, &list, &handle); if (r) goto error_free; r = amdgpu_bo_list_set(adev, filp, list, info, args->in.bo_number); amdgpu_bo_list_put(list); if (r) goto error_free; break; case AMDGPU_BO_LIST_OP_DESTROY: amdgpu_bo_list_destroy(fpriv, handle); handle = 0; break; case AMDGPU_BO_LIST_OP_UPDATE: r = -ENOENT; list = amdgpu_bo_list_get(fpriv, handle); if (!list) goto error_free; r = amdgpu_bo_list_set(adev, filp, list, info, args->in.bo_number); amdgpu_bo_list_put(list); if (r) goto error_free; break; default: r = -EINVAL; goto error_free; } memset(args, 0, sizeof(*args)); args->out.list_handle = handle; drm_free_large(info); return 0; error_free: drm_free_large(info); return r; }
static int amdgpu_bo_list_set(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_bo_list *list, struct drm_amdgpu_bo_list_entry *info, unsigned num_entries) { struct amdgpu_bo_list_entry *array; struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo; struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo; struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; unsigned last_entry = 0, first_userptr = num_entries; unsigned i; int r; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) return -ENOMEM; memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); for (i = 0; i < num_entries; ++i) { struct amdgpu_bo_list_entry *entry; struct drm_gem_object *gobj; struct amdgpu_bo *bo; struct mm_struct *usermm; gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); if (!gobj) { r = -ENOENT; goto error_free; } bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_unreference_unlocked(gobj); usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm) { if (usermm != current->mm) { amdgpu_bo_unref(&bo); r = -EPERM; goto error_free; } entry = &array[--first_userptr]; } else { entry = &array[last_entry++]; } entry->robj = bo; entry->priority = min(info[i].bo_priority, AMDGPU_BO_LIST_MAX_PRIORITY); entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; trace_amdgpu_bo_list_set(list, entry->robj); } for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); drm_free_large(list->array); list->gds_obj = gds_obj; list->gws_obj = gws_obj; list->oa_obj = oa_obj; list->first_userptr = first_userptr; list->array = array; list->num_entries = num_entries; return 0; error_free: while (i--) amdgpu_bo_unref(&array[i].robj); drm_free_large(array); return r; }
static int amdgpu_bo_list_set(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_bo_list *list, struct drm_amdgpu_bo_list_entry *info, unsigned num_entries) { struct amdgpu_bo_list_entry *array; struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo; struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo; struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; bool has_userptr = false; unsigned i; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) return -ENOMEM; memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); for (i = 0; i < num_entries; ++i) { struct amdgpu_bo_list_entry *entry = &array[i]; struct drm_gem_object *gobj; gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); if (!gobj) goto error_free; entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_unreference_unlocked(gobj); entry->priority = info[i].bo_priority; entry->prefered_domains = entry->robj->initial_domain; entry->allowed_domains = entry->prefered_domains; if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) { has_userptr = true; entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; } entry->tv.bo = &entry->robj->tbo; entry->tv.shared = !entry->robj->prime_shared_count; if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; trace_amdgpu_bo_list_set(list, entry->robj); } for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); drm_free_large(list->array); list->gds_obj = gds_obj; list->gws_obj = gws_obj; list->oa_obj = oa_obj; list->has_userptr = has_userptr; list->array = array; list->num_entries = num_entries; return 0; error_free: drm_free_large(array); return -ENOENT; }