static struct pb_buffer * pb_cache_manager_create_buffer(struct pb_manager *_mgr, pb_size size, const struct pb_desc *desc) { struct pb_cache_manager *mgr = pb_cache_manager(_mgr); struct pb_cache_buffer *buf; /* get a buffer from the cache */ buf = (struct pb_cache_buffer *) pb_cache_reclaim_buffer(&mgr->cache, size, desc->alignment, desc->usage, 0); if (buf) return &buf->base; /* create a new one */ buf = CALLOC_STRUCT(pb_cache_buffer); if (!buf) return NULL; buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc); /* Empty the cache and try again. */ if (!buf->buffer) { pb_cache_release_all_buffers(&mgr->cache); buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc); } if(!buf->buffer) { FREE(buf); return NULL; } assert(pipe_is_referenced(&buf->buffer->reference)); assert(pb_check_alignment(desc->alignment, buf->buffer->alignment)); assert(buf->buffer->size >= size); pipe_reference_init(&buf->base.reference, 1); buf->base.alignment = buf->buffer->alignment; buf->base.usage = buf->buffer->usage; buf->base.size = buf->buffer->size; buf->base.vtbl = &pb_cache_buffer_vtbl; buf->mgr = mgr; pb_cache_init_entry(&mgr->cache, &buf->cache_entry, &buf->base, 0); return &buf->base; }
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws, unsigned size, unsigned alignment, unsigned usage, enum radeon_bo_domain initial_domain, unsigned flags) { struct amdgpu_bo_alloc_request request = {0}; amdgpu_bo_handle buf_handle; uint64_t va = 0; struct amdgpu_winsys_bo *bo; amdgpu_va_handle va_handle; int r; assert(initial_domain & RADEON_DOMAIN_VRAM_GTT); bo = CALLOC_STRUCT(amdgpu_winsys_bo); if (!bo) { return NULL; } pb_cache_init_entry(&ws->bo_cache, &bo->cache_entry, &bo->base); request.alloc_size = size; request.phys_alignment = alignment; if (initial_domain & RADEON_DOMAIN_VRAM) { request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM; if (flags & RADEON_FLAG_CPU_ACCESS) request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; } if (initial_domain & RADEON_DOMAIN_GTT) { request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT; if (flags & RADEON_FLAG_GTT_WC) request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC; } r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle); if (r) { fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n"); fprintf(stderr, "amdgpu: size : %d bytes\n", size); fprintf(stderr, "amdgpu: alignment : %d bytes\n", alignment); fprintf(stderr, "amdgpu: domains : %d\n", initial_domain); goto error_bo_alloc; } r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, size, alignment, 0, &va, &va_handle, 0); if (r) goto error_va_alloc; r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP); if (r) goto error_va_map; pipe_reference_init(&bo->base.reference, 1); bo->base.alignment = alignment; bo->base.usage = usage; bo->base.size = size; bo->base.vtbl = &amdgpu_winsys_bo_vtbl; bo->ws = ws; bo->bo = buf_handle; bo->va = va; bo->va_handle = va_handle; bo->initial_domain = initial_domain; bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1); if (initial_domain & RADEON_DOMAIN_VRAM) ws->allocated_vram += align(size, ws->gart_page_size); else if (initial_domain & RADEON_DOMAIN_GTT) ws->allocated_gtt += align(size, ws->gart_page_size); return bo; error_va_map: amdgpu_va_range_free(va_handle); error_va_alloc: amdgpu_bo_free(buf_handle); error_bo_alloc: FREE(bo); return NULL; }