static void add_surface(struct anv_image *image, struct anv_surface *surf) { assert(surf->isl.size > 0); /* isl surface must be initialized */ surf->offset = align_u32(image->size, surf->isl.alignment); image->size = surf->offset + surf->isl.size; image->alignment = MAX2(image->alignment, surf->isl.alignment); }
static void radv_descriptor_set_destroy(struct radv_device *device, struct radv_descriptor_pool *pool, struct radv_descriptor_set *set, bool free_bo) { if (free_bo && set->size) { assert(pool->full_list >= 0); int next = pool->free_nodes[pool->full_list].next; pool->free_nodes[pool->full_list].next = pool->free_list; pool->free_nodes[pool->full_list].offset = (uint8_t*)set->mapped_ptr - pool->mapped_ptr; pool->free_nodes[pool->full_list].size = align_u32(set->size, 32); pool->free_list = pool->full_list; pool->full_list = next; } if (set->dynamic_descriptors) vk_free2(&device->alloc, NULL, set->dynamic_descriptors); if (!list_empty(&set->descriptor_pool)) list_del(&set->descriptor_pool); vk_free2(&device->alloc, NULL, set); }
static VkResult radv_descriptor_set_create(struct radv_device *device, struct radv_descriptor_pool *pool, const struct radv_descriptor_set_layout *layout, struct radv_descriptor_set **out_set) { struct radv_descriptor_set *set; unsigned range_offset = sizeof(struct radv_descriptor_set) + sizeof(struct radeon_winsys_bo *) * layout->buffer_count; unsigned mem_size = range_offset + sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count; if (pool->host_memory_base) { if (pool->host_memory_end - pool->host_memory_ptr < mem_size) return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR); set = (struct radv_descriptor_set*)pool->host_memory_ptr; pool->host_memory_ptr += mem_size; } else { set = vk_alloc2(&device->alloc, NULL, mem_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!set) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } memset(set, 0, mem_size); if (layout->dynamic_offset_count) { set->dynamic_descriptors = (struct radv_descriptor_range*)((uint8_t*)set + range_offset); } set->layout = layout; if (layout->size) { uint32_t layout_size = align_u32(layout->size, 32); set->size = layout->size; if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) { vk_free2(&device->alloc, NULL, set); return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR); } /* try to allocate linearly first, so that we don't spend * time looking for gaps if the app only allocates & * resets via the pool. */ if (pool->current_offset + layout_size <= pool->size) { set->bo = pool->bo; set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset); set->va = radv_buffer_get_va(set->bo) + pool->current_offset; if (!pool->host_memory_base) { pool->entries[pool->entry_count].offset = pool->current_offset; pool->entries[pool->entry_count].size = layout_size; pool->entries[pool->entry_count].set = set; pool->entry_count++; } pool->current_offset += layout_size; } else if (!pool->host_memory_base) { uint64_t offset = 0; int index; for (index = 0; index < pool->entry_count; ++index) { if (pool->entries[index].offset - offset >= layout_size) break; offset = pool->entries[index].offset + pool->entries[index].size; } if (pool->size - offset < layout_size) { vk_free2(&device->alloc, NULL, set); return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR); } set->bo = pool->bo; set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset); set->va = radv_buffer_get_va(set->bo) + offset; memmove(&pool->entries[index + 1], &pool->entries[index], sizeof(pool->entries[0]) * (pool->entry_count - index)); pool->entries[index].offset = offset; pool->entries[index].size = layout_size; pool->entries[index].set = set; pool->entry_count++; } else return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR); } if (layout->has_immutable_samplers) { for (unsigned i = 0; i < layout->binding_count; ++i) { if (!layout->binding[i].immutable_samplers_offset || layout->binding[i].immutable_samplers_equal) continue; unsigned offset = layout->binding[i].offset / 4; if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) offset += 16; const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset); for (unsigned j = 0; j < layout->binding[i].array_size; ++j) { memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16); offset += layout->binding[i].size / 4; } } } *out_set = set; return VK_SUCCESS; }
static VkResult radv_descriptor_set_create(struct radv_device *device, struct radv_descriptor_pool *pool, struct radv_cmd_buffer *cmd_buffer, const struct radv_descriptor_set_layout *layout, struct radv_descriptor_set **out_set) { struct radv_descriptor_set *set; unsigned mem_size = sizeof(struct radv_descriptor_set) + sizeof(struct radeon_winsys_bo *) * layout->buffer_count; set = vk_alloc2(&device->alloc, NULL, mem_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!set) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); memset(set, 0, mem_size); if (layout->dynamic_offset_count) { unsigned size = sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count; set->dynamic_descriptors = vk_alloc2(&device->alloc, NULL, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!set->dynamic_descriptors) { vk_free2(&device->alloc, NULL, set); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } } set->layout = layout; if (layout->size) { uint32_t layout_size = align_u32(layout->size, 32); set->size = layout->size; if (!cmd_buffer) { if (pool->current_offset + layout_size <= pool->size && pool->allocated_sets < pool->max_sets) { set->bo = pool->bo; set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset); set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset; pool->current_offset += layout_size; ++pool->allocated_sets; } else { int entry = pool->free_list, prev_entry = -1; uint32_t offset; while (entry >= 0) { if (pool->free_nodes[entry].size >= layout_size) { if (prev_entry >= 0) pool->free_nodes[prev_entry].next = pool->free_nodes[entry].next; else pool->free_list = pool->free_nodes[entry].next; break; } prev_entry = entry; entry = pool->free_nodes[entry].next; } if (entry < 0) { vk_free2(&device->alloc, NULL, set); return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY); } offset = pool->free_nodes[entry].offset; pool->free_nodes[entry].next = pool->full_list; pool->full_list = entry; set->bo = pool->bo; set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset); set->va = device->ws->buffer_get_va(set->bo) + offset; } } else { unsigned bo_offset; if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32, &bo_offset, (void**)&set->mapped_ptr)) { vk_free2(&device->alloc, NULL, set->dynamic_descriptors); vk_free2(&device->alloc, NULL, set); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } set->va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); set->va += bo_offset; } } if (pool) list_add(&set->descriptor_pool, &pool->descriptor_sets); else list_inithead(&set->descriptor_pool); for (unsigned i = 0; i < layout->binding_count; ++i) { if (!layout->binding[i].immutable_samplers) continue; unsigned offset = layout->binding[i].offset / 4; if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) offset += 16; for (unsigned j = 0; j < layout->binding[i].array_size; ++j) { struct radv_sampler* sampler = layout->binding[i].immutable_samplers[j]; memcpy(set->mapped_ptr + offset, &sampler->state, 16); offset += layout->binding[i].size / 4; } } *out_set = set; return VK_SUCCESS; }