static void amdgpu_bo_export_import_do_type(enum amdgpu_bo_handle_type type) { struct amdgpu_bo_import_result res = {0}; uint32_t shared_handle; int r; r = amdgpu_bo_export(buffer_handle, type, &shared_handle); CU_ASSERT_EQUAL(r, 0); r = amdgpu_bo_import(device_handle, type, shared_handle, &res); CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(res.buf_handle, buffer_handle); CU_ASSERT_EQUAL(res.alloc_size, BUFFER_SIZE); r = amdgpu_bo_free(res.buf_handle); CU_ASSERT_EQUAL(r, 0); }
struct amdgpu_buffer *amdgpu_gem_bo_open_prime(amdgpu_device_handle pDev, int fd_handle, uint32_t size) { struct amdgpu_buffer *bo = NULL; struct amdgpu_bo_import_result buffer = {0}; bo = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer)); if (bo == NULL) { return NULL; } if (amdgpu_bo_import(pDev, amdgpu_bo_handle_type_dma_buf_fd, (uint32_t)fd_handle, &buffer)) { free(bo); return FALSE; } bo->bo.amdgpu = buffer.buf_handle; bo->ref_count = 1; return bo; }
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws, struct winsys_handle *whandle, unsigned *stride) { struct amdgpu_winsys *ws = amdgpu_winsys(rws); struct amdgpu_winsys_bo *bo; enum amdgpu_bo_handle_type type; struct amdgpu_bo_import_result result = {0}; uint64_t va; amdgpu_va_handle va_handle; struct amdgpu_bo_info info = {0}; enum radeon_bo_domain initial = 0; int r; /* Initialize the structure. */ bo = CALLOC_STRUCT(amdgpu_winsys_bo); if (!bo) { return NULL; } switch (whandle->type) { case DRM_API_HANDLE_TYPE_SHARED: type = amdgpu_bo_handle_type_gem_flink_name; break; case DRM_API_HANDLE_TYPE_FD: type = amdgpu_bo_handle_type_dma_buf_fd; break; default: return NULL; } r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result); if (r) goto error; /* Get initial domains. */ r = amdgpu_bo_query_info(result.buf_handle, &info); if (r) goto error_query; r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, result.alloc_size, 1 << 20, 0, &va, &va_handle, 0); if (r) goto error_query; r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP); if (r) goto error_va_map; if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM) initial |= RADEON_DOMAIN_VRAM; if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT) initial |= RADEON_DOMAIN_GTT; pipe_reference_init(&bo->base.reference, 1); bo->base.alignment = info.phys_alignment; bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ; bo->bo = result.buf_handle; bo->base.size = result.alloc_size; bo->base.vtbl = &amdgpu_winsys_bo_vtbl; bo->rws = ws; bo->va = va; bo->va_handle = va_handle; bo->initial_domain = initial; bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1); bo->is_shared = true; if (stride) *stride = whandle->stride; if (bo->initial_domain & RADEON_DOMAIN_VRAM) ws->allocated_vram += align(bo->base.size, ws->gart_page_size); else if (bo->initial_domain & RADEON_DOMAIN_GTT) ws->allocated_gtt += align(bo->base.size, ws->gart_page_size); return &bo->base; error_va_map: amdgpu_va_range_free(va_handle); error_query: amdgpu_bo_free(result.buf_handle); error: FREE(bo); return NULL; }