static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); if (bo->pin_count > 0) amdgpu_bo_subtract_pin_size(bo); if (bo->kfd_bo) amdgpu_amdkfd_unreserve_memory_limit(bo); amdgpu_bo_kunmap(bo); if (bo->gem_base.import_attach) drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); if (!list_empty(&bo->shadow_list)) { mutex_lock(&adev->shadow_list_lock); list_del_init(&bo->shadow_list); mutex_unlock(&adev->shadow_list_lock); } kfree(bo->metadata); kfree(bo); }
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr) { struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; struct kgd_mem **mem = (struct kgd_mem **) mem_obj; int r; BUG_ON(kgd == NULL); BUG_ON(gpu_addr == NULL); BUG_ON(cpu_ptr == NULL); *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL); if ((*mem) == NULL) return -ENOMEM; r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); if (r) { dev_err(rdev->dev, "failed to allocate BO for amdkfd (%d)\n", r); return r; } /* map the buffer */ r = amdgpu_bo_reserve((*mem)->bo, true); if (r) { dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r); goto allocate_mem_reserve_bo_failed; } r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT, &(*mem)->gpu_addr); if (r) { dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r); goto allocate_mem_pin_bo_failed; } *gpu_addr = (*mem)->gpu_addr; r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr); if (r) { dev_err(rdev->dev, "(%d) failed to map bo to kernel for amdkfd\n", r); goto allocate_mem_kmap_bo_failed; } *cpu_ptr = (*mem)->cpu_ptr; amdgpu_bo_unreserve((*mem)->bo); return 0; allocate_mem_kmap_bo_failed: amdgpu_bo_unpin((*mem)->bo); allocate_mem_pin_bo_failed: amdgpu_bo_unreserve((*mem)->bo); allocate_mem_reserve_bo_failed: amdgpu_bo_unref(&(*mem)->bo); return r; }
/** * amdgpu_ring_fini - tear down the driver ring struct. * * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information * * Tear down the driver information for the selected ring (all asics). */ void amdgpu_ring_fini(struct amdgpu_ring *ring) { int r; struct amdgpu_bo *ring_obj; ring_obj = ring->ring_obj; ring->ready = false; ring->ring = NULL; ring->ring_obj = NULL; amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); amdgpu_wb_free(ring->adev, ring->next_rptr_offs); if (ring_obj) { r = amdgpu_bo_reserve(ring_obj, false); if (likely(r == 0)) { amdgpu_bo_kunmap(ring_obj); amdgpu_bo_unpin(ring_obj); amdgpu_bo_unreserve(ring_obj); } amdgpu_bo_unref(&ring_obj); } }
/** * amdgpu_gart_table_vram_free - free gart page table vram * * @adev: amdgpu_device pointer * * Free the video memory used for the GART page table * (pcie r4xx, r5xx+). These asics require the gart table to * be in video memory. */ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) { if (adev->gart.robj == NULL) { return; } amdgpu_bo_unref(&adev->gart.robj); }
/** * amdgpu_ih_ring_fini - tear down the IH state * * @adev: amdgpu_device pointer * * Tears down the IH state and frees buffer * used for the IH ring buffer. */ void amdgpu_ih_ring_fini(struct amdgpu_device *adev) { int r; if (adev->irq.ih.use_bus_addr) { if (adev->irq.ih.ring) { /* add 8 bytes for the rptr/wptr shadows and * add them to the end of the ring allocation. */ pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8, (void *)adev->irq.ih.ring, adev->irq.ih.rb_dma_addr); adev->irq.ih.ring = NULL; } } else { if (adev->irq.ih.ring_obj) { r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false); if (likely(r == 0)) { amdgpu_bo_kunmap(adev->irq.ih.ring_obj); amdgpu_bo_unpin(adev->irq.ih.ring_obj); amdgpu_bo_unreserve(adev->irq.ih.ring_obj); } amdgpu_bo_unref(&adev->irq.ih.ring_obj); adev->irq.ih.ring = NULL; adev->irq.ih.ring_obj = NULL; } amdgpu_wb_free(adev, adev->irq.ih.wptr_offs); amdgpu_wb_free(adev, adev->irq.ih.rptr_offs); } }
void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) { struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; amdgpu_bo_reserve(bo, true); amdgpu_bo_kunmap(bo); amdgpu_bo_unpin(bo); amdgpu_bo_unreserve(bo); amdgpu_bo_unref(&(bo)); }
void amdgpu_bo_list_free(struct amdgpu_bo_list *list) { unsigned i; for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); mutex_destroy(&list->lock); kvfree(list->array); kfree(list); }
void amdgpu_gem_object_free(struct drm_gem_object *gobj) { struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); if (robj) { if (robj->gem_base.import_attach) drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); amdgpu_mn_unregister(robj); amdgpu_bo_unref(&robj); } }
/** * amdgpu_driver_postclose_kms - drm callback for post close * * @dev: drm dev pointer * @file_priv: drm file * * On device post close, tear down vm on cayman+ (all asics). */ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_bo_list *list; struct amdgpu_bo *pd; unsigned int pasid; int handle; if (!fpriv) return; pm_runtime_get_sync(dev->dev); if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) amdgpu_uvd_free_handles(adev, file_priv); if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) amdgpu_vce_free_handles(adev, file_priv); amdgpu_vm_bo_rmv(adev, fpriv->prt_va); if (amdgpu_sriov_vf(adev)) { /* TODO: how to handle reserve failure */ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); amdgpu_vm_bo_rmv(adev, fpriv->csa_va); fpriv->csa_va = NULL; amdgpu_bo_unreserve(adev->virt.csa_obj); } pasid = fpriv->vm.pasid; pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); amdgpu_vm_fini(adev, &fpriv->vm); if (pasid) amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); amdgpu_bo_unref(&pd); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) amdgpu_bo_list_put(list); idr_destroy(&fpriv->bo_list_handles); mutex_destroy(&fpriv->bo_list_lock); kfree(fpriv); file_priv->driver_priv = NULL; pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); }
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) { struct kgd_mem *mem = (struct kgd_mem *) mem_obj; BUG_ON(mem == NULL); amdgpu_bo_reserve(mem->bo, true); amdgpu_bo_kunmap(mem->bo); amdgpu_bo_unpin(mem->bo); amdgpu_bo_unreserve(mem->bo); amdgpu_bo_unref(&(mem->bo)); kfree(mem); }
static void amdgpu_bo_list_release_rcu(struct kref *ref) { unsigned i; struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list, refcount); for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); mutex_destroy(&list->lock); kvfree(list->array); kfree_rcu(list, rhead); }
static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; if (obj) { int r = amdgpu_bo_reserve(obj, false); if (likely(r == 0)) { amdgpu_bo_unpin(obj); amdgpu_bo_unreserve(obj); } amdgpu_bo_unref(&obj); } return 0; }
void amdgpu_job_free(struct amdgpu_job *job) { unsigned i; struct fence *f; /* use sched fence if available */ f = (job->base.s_fence)? &job->base.s_fence->base : job->fence; for (i = 0; i < job->num_ibs; ++i) amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); fence_put(job->fence); amdgpu_bo_unref(&job->uf_bo); amdgpu_sync_free(&job->sync); if (!job->base.use_sched) kfree(job); }
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_bo *bo; bo = container_of(tbo, struct amdgpu_bo, tbo); amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL); drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); if (!list_empty(&bo->shadow_list)) { mutex_lock(&adev->shadow_list_lock); list_del_init(&bo->shadow_list); mutex_unlock(&adev->shadow_list_lock); } kfree(bo->metadata); kfree(bo); }
Bool amdgpu_set_shared_pixmap_backing(PixmapPtr ppix, void *fd_handle) { ScrnInfoPtr pScrn = xf86ScreenToScrn(ppix->drawable.pScreen); AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn); struct amdgpu_buffer *pixmap_buffer = NULL; int ihandle = (int)(long)fd_handle; uint32_t size = ppix->devKind * ppix->drawable.height; pixmap_buffer = amdgpu_gem_bo_open_prime(pAMDGPUEnt->pDev, ihandle, size); if (!pixmap_buffer) { return FALSE; } amdgpu_set_pixmap_bo(ppix, pixmap_buffer); close(ihandle); /* we have a reference from the alloc and one from set pixmap bo, drop one */ amdgpu_bo_unref(&pixmap_buffer); return TRUE; }
static int amdgpu_bo_list_set(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_bo_list *list, struct drm_amdgpu_bo_list_entry *info, unsigned num_entries) { struct amdgpu_bo_list_entry *array; struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo; struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo; struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; bool has_userptr = false; unsigned i; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) return -ENOMEM; memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); for (i = 0; i < num_entries; ++i) { struct amdgpu_bo_list_entry *entry = &array[i]; struct drm_gem_object *gobj; gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); if (!gobj) goto error_free; entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_unreference_unlocked(gobj); entry->priority = info[i].bo_priority; entry->prefered_domains = entry->robj->initial_domain; entry->allowed_domains = entry->prefered_domains; if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) { has_userptr = true; entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; } entry->tv.bo = &entry->robj->tbo; entry->tv.shared = !entry->robj->prime_shared_count; if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; trace_amdgpu_bo_list_set(list, entry->robj); } for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); drm_free_large(list->array); list->gds_obj = gds_obj; list->gws_obj = gws_obj; list->oa_obj = oa_obj; list->has_userptr = has_userptr; list->array = array; list->num_entries = num_entries; return 0; error_free: drm_free_large(array); return -ENOENT; }
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ static void amdgpu_do_test_moves(struct amdgpu_device *adev) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *vram_obj = NULL; struct amdgpu_bo **gtt_obj = NULL; uint64_t gtt_addr, vram_addr; unsigned n, size; int i, r; size = 1024 * 1024; /* Number of tests = * (Total GTT - IB pool - writeback page - ring buffers) / test size */ n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) if (adev->rings[i]) n -= adev->rings[i]->ring_size; if (adev->wb.wb_obj) n -= AMDGPU_GPU_PAGE_SIZE; if (adev->irq.ih.ring_obj) n -= adev->irq.ih.ring_size; n /= size; gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); if (!gtt_obj) { DRM_ERROR("Failed to allocate %d pointers\n", n); r = 1; goto out_cleanup; } r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; } r = amdgpu_bo_reserve(vram_obj, false); if (unlikely(r != 0)) goto out_unref; r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr); if (r) { DRM_ERROR("Failed to pin VRAM object\n"); goto out_unres; } for (i = 0; i < n; i++) { void *gtt_map, *vram_map; void **gtt_start, **gtt_end; void **vram_start, **vram_end; struct fence *fence = NULL; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; } r = amdgpu_bo_reserve(gtt_obj[i], false); if (unlikely(r != 0)) goto out_lclean_unref; r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, >t_addr); if (r) { DRM_ERROR("Failed to pin GTT object %d\n", i); goto out_lclean_unres; } r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { DRM_ERROR("Failed to map GTT object %d\n", i); goto out_lclean_unpin; } for (gtt_start = gtt_map, gtt_end = gtt_map + size; gtt_start < gtt_end; gtt_start++) *gtt_start = gtt_start; amdgpu_bo_kunmap(gtt_obj[i]); r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, size, NULL, &fence); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); goto out_lclean_unpin; } r = fence_wait(fence, false); if (r) { DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); goto out_lclean_unpin; } fence_put(fence); r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { DRM_ERROR("Failed to map VRAM object after copy %d\n", i); goto out_lclean_unpin; } for (gtt_start = gtt_map, gtt_end = gtt_map + size, vram_start = vram_map, vram_end = vram_map + size; vram_start < vram_end; gtt_start++, vram_start++) { if (*vram_start != gtt_start) { DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " "expected 0x%p (GTT/VRAM offset " "0x%16llx/0x%16llx)\n", i, *vram_start, gtt_start, (unsigned long long) (gtt_addr - adev->mc.gtt_start + (void*)gtt_start - gtt_map), (unsigned long long) (vram_addr - adev->mc.vram_start + (void*)gtt_start - gtt_map)); amdgpu_bo_kunmap(vram_obj); goto out_lclean_unpin; } *vram_start = vram_start; } amdgpu_bo_kunmap(vram_obj); r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, size, NULL, &fence); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); goto out_lclean_unpin; } r = fence_wait(fence, false); if (r) { DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); goto out_lclean_unpin; } fence_put(fence); r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { DRM_ERROR("Failed to map GTT object after copy %d\n", i); goto out_lclean_unpin; } for (gtt_start = gtt_map, gtt_end = gtt_map + size, vram_start = vram_map, vram_end = vram_map + size; gtt_start < gtt_end; gtt_start++, vram_start++) { if (*gtt_start != vram_start) { DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " "expected 0x%p (VRAM/GTT offset " "0x%16llx/0x%16llx)\n", i, *gtt_start, vram_start, (unsigned long long) (vram_addr - adev->mc.vram_start + (void*)vram_start - vram_map), (unsigned long long) (gtt_addr - adev->mc.gtt_start + (void*)vram_start - vram_map)); amdgpu_bo_kunmap(gtt_obj[i]); goto out_lclean_unpin; } } amdgpu_bo_kunmap(gtt_obj[i]); DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", gtt_addr - adev->mc.gtt_start); continue; out_lclean_unpin: amdgpu_bo_unpin(gtt_obj[i]); out_lclean_unres: amdgpu_bo_unreserve(gtt_obj[i]); out_lclean_unref: amdgpu_bo_unref(>t_obj[i]); out_lclean: for (--i; i >= 0; --i) { amdgpu_bo_unpin(gtt_obj[i]); amdgpu_bo_unreserve(gtt_obj[i]); amdgpu_bo_unref(>t_obj[i]); } if (fence) fence_put(fence); break; } amdgpu_bo_unpin(vram_obj); out_unres: amdgpu_bo_unreserve(vram_obj); out_unref: amdgpu_bo_unref(&vram_obj); out_cleanup: kfree(gtt_obj); if (r) { printk(KERN_WARNING "Error while testing BO move.\n"); } }
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; struct amdgpu_bo_param bp; int r; void *cpu_ptr_tmp = NULL; memset(&bp, 0, sizeof(bp)); bp.size = size; bp.byte_align = PAGE_SIZE; bp.domain = AMDGPU_GEM_DOMAIN_GTT; bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; bp.type = ttm_bo_type_kernel; bp.resv = NULL; if (mqd_gfx9) bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; r = amdgpu_bo_create(adev, &bp, &bo); if (r) { dev_err(adev->dev, "failed to allocate BO for amdkfd (%d)\n", r); return r; } /* map the buffer */ r = amdgpu_bo_reserve(bo, true); if (r) { dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r); goto allocate_mem_reserve_bo_failed; } r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); if (r) { dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r); goto allocate_mem_pin_bo_failed; } r = amdgpu_ttm_alloc_gart(&bo->tbo); if (r) { dev_err(adev->dev, "%p bind failed\n", bo); goto allocate_mem_kmap_bo_failed; } r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp); if (r) { dev_err(adev->dev, "(%d) failed to map bo to kernel for amdkfd\n", r); goto allocate_mem_kmap_bo_failed; } *mem_obj = bo; *gpu_addr = amdgpu_bo_gpu_offset(bo); *cpu_ptr = cpu_ptr_tmp; amdgpu_bo_unreserve(bo); return 0; allocate_mem_kmap_bo_failed: amdgpu_bo_unpin(bo); allocate_mem_pin_bo_failed: amdgpu_bo_unreserve(bo); allocate_mem_reserve_bo_failed: amdgpu_bo_unref(&bo); return r; }
static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, unsigned sdomain, unsigned ddomain) { struct amdgpu_bo *dobj = NULL; struct amdgpu_bo *sobj = NULL; uint64_t saddr, daddr; int r, n; int time; n = AMDGPU_BENCHMARK_ITERATIONS; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj); if (r) { goto out_cleanup; } r = amdgpu_bo_reserve(sobj, false); if (unlikely(r != 0)) goto out_cleanup; r = amdgpu_bo_pin(sobj, sdomain, &saddr); amdgpu_bo_unreserve(sobj); if (r) { goto out_cleanup; } r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj); if (r) { goto out_cleanup; } r = amdgpu_bo_reserve(dobj, false); if (unlikely(r != 0)) goto out_cleanup; r = amdgpu_bo_pin(dobj, ddomain, &daddr); amdgpu_bo_unreserve(dobj); if (r) { goto out_cleanup; } if (adev->mman.buffer_funcs) { time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n); if (time < 0) goto out_cleanup; if (time > 0) amdgpu_benchmark_log_results(n, size, time, sdomain, ddomain, "dma"); } out_cleanup: if (sobj) { r = amdgpu_bo_reserve(sobj, false); if (likely(r == 0)) { amdgpu_bo_unpin(sobj); amdgpu_bo_unreserve(sobj); } amdgpu_bo_unref(&sobj); } if (dobj) { r = amdgpu_bo_reserve(dobj, false); if (likely(r == 0)) { amdgpu_bo_unpin(dobj); amdgpu_bo_unreserve(dobj); } amdgpu_bo_unref(&dobj); } if (r) { DRM_ERROR("Error while benchmarking BO move.\n"); } }
static int amdgpu_bo_list_set(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_bo_list *list, struct drm_amdgpu_bo_list_entry *info, unsigned num_entries) { struct amdgpu_bo_list_entry *array; struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo; struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo; struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; unsigned last_entry = 0, first_userptr = num_entries; unsigned i; int r; unsigned long total_size = 0; array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL); if (!array) return -ENOMEM; memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); for (i = 0; i < num_entries; ++i) { struct amdgpu_bo_list_entry *entry; struct drm_gem_object *gobj; struct amdgpu_bo *bo; struct mm_struct *usermm; gobj = drm_gem_object_lookup(filp, info[i].bo_handle); if (!gobj) { r = -ENOENT; goto error_free; } bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_put_unlocked(gobj); usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm) { if (usermm != current->mm) { amdgpu_bo_unref(&bo); r = -EPERM; goto error_free; } entry = &array[--first_userptr]; } else { entry = &array[last_entry++]; } entry->robj = bo; entry->priority = min(info[i].bo_priority, AMDGPU_BO_LIST_MAX_PRIORITY); entry->tv.bo = &entry->robj->tbo; entry->tv.shared = !entry->robj->prime_shared_count; if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; total_size += amdgpu_bo_size(entry->robj); trace_amdgpu_bo_list_set(list, entry->robj); } for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); kvfree(list->array); list->gds_obj = gds_obj; list->gws_obj = gws_obj; list->oa_obj = oa_obj; list->first_userptr = first_userptr; list->array = array; list->num_entries = num_entries; trace_amdgpu_cs_bo_status(list->num_entries, total_size); return 0; error_free: while (i--) amdgpu_bo_unref(&array[i].robj); kvfree(array); return r; }