static int radeon_lookup_or_add_slab_buffer(struct radeon_drm_cs *cs, struct radeon_bo *bo) { struct radeon_cs_context *csc = cs->csc; unsigned hash; struct radeon_bo_item *item; int idx; int real_idx; idx = radeon_lookup_buffer(csc, bo); if (idx >= 0) return idx; real_idx = radeon_lookup_or_add_real_buffer(cs, bo->u.slab.real); /* Check if the backing array is large enough. */ if (csc->num_slab_buffers >= csc->max_slab_buffers) { unsigned new_max = MAX2(csc->max_slab_buffers + 16, (unsigned)(csc->max_slab_buffers * 1.3)); struct radeon_bo_item *new_buffers = REALLOC(csc->slab_buffers, csc->max_slab_buffers * sizeof(*new_buffers), new_max * sizeof(*new_buffers)); if (!new_buffers) { fprintf(stderr, "radeon_lookup_or_add_slab_buffer: allocation failure\n"); return -1; } csc->max_slab_buffers = new_max; csc->slab_buffers = new_buffers; } /* Initialize the new relocation. */ idx = csc->num_slab_buffers++; item = &csc->slab_buffers[idx]; item->bo = NULL; item->u.slab.real_idx = real_idx; radeon_bo_reference(&item->bo, bo); p_atomic_inc(&bo->num_cs_references); hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1); csc->reloc_indices_hashlist[hash] = idx; return idx; }
static unsigned radeon_drm_cs_add_buffer(struct radeon_cmdbuf *rcs, struct pb_buffer *buf, enum radeon_bo_usage usage, enum radeon_bo_domain domains, enum radeon_bo_priority priority) { struct radeon_drm_cs *cs = radeon_drm_cs(rcs); struct radeon_bo *bo = (struct radeon_bo*)buf; enum radeon_bo_domain added_domains; /* If VRAM is just stolen system memory, allow both VRAM and * GTT, whichever has free space. If a buffer is evicted from * VRAM to GTT, it will stay there. */ if (!cs->ws->info.has_dedicated_vram) domains |= RADEON_DOMAIN_GTT; enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0; enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0; struct drm_radeon_cs_reloc *reloc; int index; if (!bo->handle) { index = radeon_lookup_or_add_slab_buffer(cs, bo); if (index < 0) return 0; index = cs->csc->slab_buffers[index].u.slab.real_idx; } else { index = radeon_lookup_or_add_real_buffer(cs, bo); } reloc = &cs->csc->relocs[index]; added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain); reloc->read_domains |= rd; reloc->write_domain |= wd; reloc->flags = MAX2(reloc->flags, priority); cs->csc->relocs_bo[index].u.real.priority_usage |= 1u << priority; if (added_domains & RADEON_DOMAIN_VRAM) cs->base.used_vram += bo->base.size; else if (added_domains & RADEON_DOMAIN_GTT) cs->base.used_gart += bo->base.size; return index; }
static unsigned radeon_drm_cs_add_buffer(struct radeon_winsys_cs *rcs, struct pb_buffer *buf, enum radeon_bo_usage usage, enum radeon_bo_domain domains, enum radeon_bo_priority priority) { struct radeon_drm_cs *cs = radeon_drm_cs(rcs); struct radeon_bo *bo = (struct radeon_bo*)buf; enum radeon_bo_domain added_domains; enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0; enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0; struct drm_radeon_cs_reloc *reloc; int index; if (!bo->handle) { index = radeon_lookup_or_add_slab_buffer(cs, bo); if (index < 0) return 0; index = cs->csc->slab_buffers[index].u.slab.real_idx; } else { index = radeon_lookup_or_add_real_buffer(cs, bo); } reloc = &cs->csc->relocs[index]; added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain); reloc->read_domains |= rd; reloc->write_domain |= wd; reloc->flags = MAX2(reloc->flags, priority); cs->csc->relocs_bo[index].u.real.priority_usage |= 1llu << priority; if (added_domains & RADEON_DOMAIN_VRAM) cs->base.used_vram += bo->base.size; else if (added_domains & RADEON_DOMAIN_GTT) cs->base.used_gart += bo->base.size; return index; }