int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) { int r; struct amdgpu_bo_va *bo_va; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; struct ttm_validate_buffer csa_tv; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&csa_tv.head); csa_tv.bo = &adev->virt.csa_obj->tbo; csa_tv.shared = true; list_add(&csa_tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &pd); r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) { DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); return r; } bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); if (!bo_va) { ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("failed to create bo_va for static CSA\n"); return -ENOMEM; } r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR, AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); amdgpu_vm_bo_rmv(adev, bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_EXECUTABLE); if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); amdgpu_vm_bo_rmv(adev, bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } vm->csa_bo_va = bo_va; ttm_eu_backoff_reservation(&ticket, &list); return 0; }
/** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number * * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; if (!error && parser->ib) ttm_eu_fence_buffer_objects(&parser->validated, parser->ib->fence); else ttm_eu_backoff_reservation(&parser->validated); if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { if (parser->relocs[i].gobj) drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); } } kfree(parser->track); kfree(parser->relocs); kfree(parser->relocs_ptr); for (i = 0; i < parser->nchunks; i++) { kfree(parser->chunks[i].kdata); kfree(parser->chunks[i].kpage[0]); kfree(parser->chunks[i].kpage[1]); } kfree(parser->chunks); kfree(parser->chunks_array); radeon_ib_free(parser->rdev, &parser->ib); }
/** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number * * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff) { unsigned i; if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. * This assures that the smallest buffers are added first * to the LRU list, so they are likely to be later evicted * first, instead of large buffers whose eviction is more * expensive. * * This slightly lowers the number of bytes moved by TTM * per frame under memory pressure. */ list_sort(NULL, &parser->validated, cmp_size_smaller_first); ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, parser->ib.fence); } else if (backoff) { ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { if (parser->relocs[i].gobj) drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); } } kfree(parser->track); kfree(parser->relocs); kfree(parser->relocs_ptr); kfree(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); kfree(parser->chunks_array); radeon_ib_free(parser->rdev, &parser->ib); radeon_ib_free(parser->rdev, &parser->const_ib); }
static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, struct list_head *head) { struct ttm_validate_buffer *buf; struct ttm_buffer_object *bo; struct virtio_gpu_object *qobj; int ret; ret = ttm_eu_reserve_buffers(ticket, head, true, NULL); if (ret != 0) return ret; list_for_each_entry(buf, head, head) { bo = buf->bo; qobj = container_of(bo, struct virtio_gpu_object, tbo); ret = ttm_bo_validate(bo, &qobj->placement, false, false); if (ret) { ttm_eu_backoff_reservation(ticket, head); return ret; } }
void psb_fence_or_sync(struct drm_file *file_priv, uint32_t engine, uint32_t fence_types, uint32_t fence_flags, struct list_head *list, struct psb_ttm_fence_rep *fence_arg, struct ttm_fence_object **fence_p) { struct drm_device *dev = file_priv->minor->dev; struct drm_psb_private *dev_priv = psb_priv(dev); struct ttm_fence_device *fdev = &dev_priv->fdev; int ret; struct ttm_fence_object *fence; struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; uint32_t handle; ret = ttm_fence_user_create(fdev, tfile, engine, fence_types, TTM_FENCE_FLAG_EMIT, &fence, &handle); if (ret) { /* * Fence creation failed. * Fall back to synchronous operation and idle the engine. */ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) { /* * Communicate to user-space that * fence creation has failed and that * the engine is idle. */ fence_arg->handle = ~0; fence_arg->error = ret; } ttm_eu_backoff_reservation(list); if (fence_p) *fence_p = NULL; return; } ttm_eu_fence_buffer_objects(list, fence); if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) { struct ttm_fence_info info = ttm_fence_get_info(fence); fence_arg->handle = handle; fence_arg->fence_class = ttm_fence_class(fence); fence_arg->fence_type = ttm_fence_types(fence); fence_arg->signaled_types = info.signaled_types; fence_arg->error = 0; } else { ret = ttm_ref_object_base_unref(tfile, handle, ttm_fence_type); BUG_ON(ret); } if (fence_p) *fence_p = fence; else if (fence) ttm_fence_object_unref(&fence); }