/** * Record an item for later decoding. */ void cmd_writer_record(struct intel_cmd *cmd, enum intel_cmd_writer_type which, enum intel_cmd_item_type type, size_t offset, size_t size) { struct intel_cmd_writer *writer = &cmd->writers[which]; struct intel_cmd_item *item; if (writer->item_used == writer->item_alloc) { const unsigned new_alloc = (writer->item_alloc) ? writer->item_alloc << 1 : 256; struct intel_cmd_item *items; items = intel_alloc(cmd, sizeof(writer->items[0]) * new_alloc, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!items) { writer->item_used = 0; cmd_fail(cmd, VK_ERROR_OUT_OF_HOST_MEMORY); return; } memcpy(items, writer->items, sizeof(writer->items[0]) * writer->item_alloc); intel_free(cmd, writer->items); writer->items = items; writer->item_alloc = new_alloc; } item = &writer->items[writer->item_used++]; item->type = type; item->offset = offset; item->size = size; }
/** * Create an intel_base. obj_size and dbg_size specify the real sizes of the * object and the debug metadata. Memories are zeroed. */ struct intel_base *intel_base_create(const struct intel_handle *handle, size_t obj_size, bool debug, VkDebugReportObjectTypeEXT type, const void *create_info, size_t dbg_size) { struct intel_base *base; if (!obj_size) obj_size = sizeof(*base); assert(obj_size >= sizeof(*base)); base = intel_alloc(handle, obj_size, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!base) return NULL; memset(base, 0, obj_size); intel_handle_init(&base->handle, type, handle->instance); if (debug) { base->dbg = intel_base_dbg_create(&base->handle, type, create_info, dbg_size); if (!base->dbg) { intel_free(handle, base); return NULL; } } base->get_memory_requirements = intel_base_get_memory_requirements; return base; }
/** * Create an intel_base_dbg. When dbg_size is non-zero, a buffer of that * size is allocated and zeroed. */ struct intel_base_dbg *intel_base_dbg_create(const struct intel_handle *handle, VkDebugReportObjectTypeEXT type, const void *create_info, size_t dbg_size) { struct intel_base_dbg *dbg; if (!dbg_size) dbg_size = sizeof(*dbg); assert(dbg_size >= sizeof(*dbg)); dbg = intel_alloc(handle, dbg_size, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!dbg) return NULL; memset(dbg, 0, dbg_size); dbg->type = type; if (!base_dbg_copy_create_info(handle, dbg, create_info)) { intel_free(handle, dbg); return NULL; } return dbg; }
VkResult intel_cmd_create(struct intel_dev *dev, const VkCommandBufferAllocateInfo *info, struct intel_cmd **cmd_ret) { int pipeline_select; struct intel_cmd *cmd; struct intel_cmd_pool *pool = intel_cmd_pool(info->commandPool); switch (pool->queue_family_index) { case INTEL_GPU_ENGINE_3D: pipeline_select = GEN6_PIPELINE_SELECT_DW0_SELECT_3D; break; default: /* TODOVV: Add validation check for this */ assert(0 && "icd: Invalid queue_family_index"); return VK_ERROR_VALIDATION_FAILED_EXT; break; } cmd = (struct intel_cmd *) intel_base_create(&dev->base.handle, sizeof(*cmd), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, info, 0); if (!cmd) return VK_ERROR_OUT_OF_HOST_MEMORY; cmd->obj.destroy = cmd_destroy; cmd->dev = dev; cmd->scratch_bo = dev->cmd_scratch_bo; cmd->primary = (info->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY); cmd->pipeline_select = pipeline_select; /* * XXX This is not quite right. intel_gpu sets maxMemReferences to * batch_buffer_reloc_count, but we may emit up to two relocs, for start * and end offsets, for each referenced memories. */ cmd->reloc_count = dev->gpu->batch_buffer_reloc_count; cmd->relocs = intel_alloc(cmd, sizeof(cmd->relocs[0]) * cmd->reloc_count, 4096, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!cmd->relocs) { intel_cmd_destroy(cmd); return VK_ERROR_OUT_OF_HOST_MEMORY; } *cmd_ret = cmd; return VK_SUCCESS; }
VkResult intel_img_create(struct intel_dev *dev, const VkImageCreateInfo *info, const VkAllocationCallbacks *allocator, bool scanout, struct intel_img **img_ret) { struct intel_img *img; struct intel_layout *layout; img = (struct intel_img *) intel_base_create(&dev->base.handle, sizeof(*img), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, info, 0); if (!img) return VK_ERROR_OUT_OF_HOST_MEMORY; layout = &img->layout; img->type = info->imageType; img->depth = info->extent.depth; img->mip_levels = info->mipLevels; img->array_size = info->arrayLayers; img->usage = info->usage; img->sample_count = (uint32_t) info->samples; intel_layout_init(layout, dev, info, scanout); img->total_size = img->layout.bo_stride * img->layout.bo_height; if (layout->aux != INTEL_LAYOUT_AUX_NONE) { img->aux_offset = u_align(img->total_size, 4096); img->total_size = img->aux_offset + layout->aux_stride * layout->aux_height; } if (layout->separate_stencil) { VkImageCreateInfo s8_info; img->s8_layout = intel_alloc(img, sizeof(*img->s8_layout), sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!img->s8_layout) { intel_img_destroy(img); return VK_ERROR_OUT_OF_HOST_MEMORY; } s8_info = *info; s8_info.format = VK_FORMAT_S8_UINT; /* no stencil texturing */ s8_info.usage &= ~VK_IMAGE_USAGE_SAMPLED_BIT; assert(icd_format_is_ds(info->format)); intel_layout_init(img->s8_layout, dev, &s8_info, scanout); img->s8_offset = u_align(img->total_size, 4096); img->total_size = img->s8_offset + img->s8_layout->bo_stride * img->s8_layout->bo_height; } if (scanout) { VkResult ret = intel_wsi_img_init(img); if (ret != VK_SUCCESS) { intel_img_destroy(img); return ret; } } img->obj.destroy = img_destroy; img->obj.base.get_memory_requirements = img_get_memory_requirements; *img_ret = img; return VK_SUCCESS; }
static bool base_dbg_copy_create_info(const struct intel_handle *handle, struct intel_base_dbg *dbg, const void *create_info) { const union { const void *ptr; const struct { VkStructureType struct_type; void *next; } *header; } info = { .ptr = create_info }; size_t shallow_copy = 0; if (!create_info) return true; switch (dbg->type) { case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO); break; case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO); break; case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO); shallow_copy = sizeof(VkEventCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO); shallow_copy = sizeof(VkFenceCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO); shallow_copy = sizeof(VkQueryPoolCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO); shallow_copy = sizeof(VkBufferCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO); shallow_copy = sizeof(VkBufferViewCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO); shallow_copy = sizeof(VkImageCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO); shallow_copy = sizeof(VkImageViewCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO); shallow_copy = sizeof(VkSamplerCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: /* no create info */ break; case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO); shallow_copy = sizeof(VkCommandPoolCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO); shallow_copy = sizeof(VkCommandBufferAllocateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO); break; case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO); shallow_copy = sizeof(VkFramebufferCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO); shallow_copy = sizeof(VkRenderPassCreateInfo); break; case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO); /* TODO */ shallow_copy = sizeof(VkDescriptorSetLayoutCreateInfo) * 0; break; case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: assert(info.header->struct_type == VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO); shallow_copy = sizeof(VkDescriptorPoolCreateInfo); break; default: assert(!"unknown dbg object type"); return false; break; } if (shallow_copy) { dbg->create_info = intel_alloc(handle, shallow_copy, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!dbg->create_info) return false; memcpy(dbg->create_info, create_info, shallow_copy); dbg->create_info_size = shallow_copy; } else if (info.header->struct_type == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO) { size_t size; const VkMemoryAllocateInfo *src = info.ptr; VkMemoryAllocateInfo *dst; uint8_t *d; size = sizeof(*src); dbg->create_info_size = size; dst = intel_alloc(handle, size, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!dst) return false; memcpy(dst, src, sizeof(*src)); d = (uint8_t *) dst; d += sizeof(*src); dbg->create_info = dst; } else if (info.header->struct_type == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO) { const VkDeviceCreateInfo *src = info.ptr; VkDeviceCreateInfo *dst; uint8_t *d; size_t size; size = sizeof(*src); dbg->create_info_size = size; size += sizeof(src->pQueueCreateInfos[0]) * src->queueCreateInfoCount; for (uint32_t i = 0; i < src->queueCreateInfoCount; i++) { size += src->pQueueCreateInfos[i].queueCount * sizeof(float); } size += sizeof(src->ppEnabledExtensionNames[0]) * src->enabledExtensionCount; for (uint32_t i = 0; i < src->enabledExtensionCount; i++) { size += strlen(src->ppEnabledExtensionNames[i]) + 1; } dst = intel_alloc(handle, size, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!dst) return false; memcpy(dst, src, sizeof(*src)); d = (uint8_t *) dst; d += sizeof(*src); size = sizeof(src->pQueueCreateInfos[0]) * src->queueCreateInfoCount; memcpy(d, src->pQueueCreateInfos, size); dst->pQueueCreateInfos = (const VkDeviceQueueCreateInfo *) d; d += size; for (uint32_t i = 0; i < src->queueCreateInfoCount; i++) { size = sizeof(float) * dst->pQueueCreateInfos[i].queueCount; memcpy(d, src->pQueueCreateInfos[i].pQueuePriorities, size); *((float **) &dst->pQueueCreateInfos[i].pQueuePriorities) = (float *) d; d += size; } size = sizeof(src->ppEnabledExtensionNames[0]) * src->enabledExtensionCount; dst->ppEnabledExtensionNames = (const char **) d; memcpy(d, src->ppEnabledExtensionNames, size); d += size; for (uint32_t i = 0; i < src->enabledExtensionCount; i++) { char **ptr = (char **) &dst->ppEnabledExtensionNames[i]; strcpy((char *) d, src->ppEnabledExtensionNames[i]); *ptr = (char *) d; d += strlen(src->ppEnabledExtensionNames[i]) + 1; } dbg->create_info = dst; } else if (info.header->struct_type == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO) { // TODO: What do we want to copy here? } return true; }
VkResult intel_gpu_create(const struct intel_instance *instance, int devid, const char *primary_node, const char *render_node, struct intel_gpu **gpu_ret) { const int gen = devid_to_gen(devid); size_t primary_len, render_len; struct intel_gpu *gpu; if (gen < 0) { intel_log(instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, VK_NULL_HANDLE, 0, 0, "unsupported device id 0x%04x", devid); return VK_ERROR_INITIALIZATION_FAILED; } gpu = intel_alloc(instance, sizeof(*gpu), sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!gpu) return VK_ERROR_OUT_OF_HOST_MEMORY; memset(gpu, 0, sizeof(*gpu)); /* there is no VK_DBG_OBJECT_GPU */ intel_handle_init(&gpu->handle, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, instance); gpu->devid = devid; primary_len = strlen(primary_node); render_len = (render_node) ? strlen(render_node) : 0; gpu->primary_node = intel_alloc(gpu, primary_len + 1 + ((render_len) ? (render_len + 1) : 0), sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!gpu->primary_node) { intel_free(instance, gpu); return VK_ERROR_OUT_OF_HOST_MEMORY; } memcpy(gpu->primary_node, primary_node, primary_len + 1); if (render_node) { gpu->render_node = gpu->primary_node + primary_len + 1; memcpy(gpu->render_node, render_node, render_len + 1); } else { gpu->render_node = gpu->primary_node; } gpu->gen_opaque = gen; switch (intel_gpu_gen(gpu)) { case INTEL_GEN(7.5): gpu->gt = gen_get_hsw_gt(devid); break; case INTEL_GEN(7): gpu->gt = gen_get_ivb_gt(devid); break; case INTEL_GEN(6): gpu->gt = gen_get_snb_gt(devid); break; } /* 150K dwords */ gpu->max_batch_buffer_size = sizeof(uint32_t) * 150*1024; /* the winsys is prepared for one reloc every two dwords, then minus 2 */ gpu->batch_buffer_reloc_count = gpu->max_batch_buffer_size / sizeof(uint32_t) / 2 - 2; gpu->primary_fd_internal = -1; gpu->render_fd_internal = -1; *gpu_ret = gpu; return VK_SUCCESS; }