void intel_gpu_destroy(struct intel_gpu *gpu) { intel_wsi_gpu_cleanup(gpu); intel_gpu_cleanup_winsys(gpu); intel_free(gpu, gpu->primary_node); intel_free(gpu, gpu); }
void intel_base_dbg_destroy(const struct intel_handle *handle, struct intel_base_dbg *dbg) { if (dbg->tag) intel_free(handle, dbg->tag); if (dbg->create_info) intel_free(handle, dbg->create_info); intel_free(handle, dbg); }
void intel_cmd_destroy(struct intel_cmd *cmd) { cmd_reset(cmd); intel_free(cmd, cmd->relocs); intel_base_destroy(&cmd->obj.base); }
/** * Record an item for later decoding. */ void cmd_writer_record(struct intel_cmd *cmd, enum intel_cmd_writer_type which, enum intel_cmd_item_type type, size_t offset, size_t size) { struct intel_cmd_writer *writer = &cmd->writers[which]; struct intel_cmd_item *item; if (writer->item_used == writer->item_alloc) { const unsigned new_alloc = (writer->item_alloc) ? writer->item_alloc << 1 : 256; struct intel_cmd_item *items; items = intel_alloc(cmd, sizeof(writer->items[0]) * new_alloc, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!items) { writer->item_used = 0; cmd_fail(cmd, VK_ERROR_OUT_OF_HOST_MEMORY); return; } memcpy(items, writer->items, sizeof(writer->items[0]) * writer->item_alloc); intel_free(cmd, writer->items); writer->items = items; writer->item_alloc = new_alloc; } item = &writer->items[writer->item_used++]; item->type = type; item->offset = offset; item->size = size; }
/** * Create an intel_base. obj_size and dbg_size specify the real sizes of the * object and the debug metadata. Memories are zeroed. */ struct intel_base *intel_base_create(const struct intel_handle *handle, size_t obj_size, bool debug, VkDebugReportObjectTypeEXT type, const void *create_info, size_t dbg_size) { struct intel_base *base; if (!obj_size) obj_size = sizeof(*base); assert(obj_size >= sizeof(*base)); base = intel_alloc(handle, obj_size, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!base) return NULL; memset(base, 0, obj_size); intel_handle_init(&base->handle, type, handle->instance); if (debug) { base->dbg = intel_base_dbg_create(&base->handle, type, create_info, dbg_size); if (!base->dbg) { intel_free(handle, base); return NULL; } } base->get_memory_requirements = intel_base_get_memory_requirements; return base; }
/** * Create an intel_base_dbg. When dbg_size is non-zero, a buffer of that * size is allocated and zeroed. */ struct intel_base_dbg *intel_base_dbg_create(const struct intel_handle *handle, VkDebugReportObjectTypeEXT type, const void *create_info, size_t dbg_size) { struct intel_base_dbg *dbg; if (!dbg_size) dbg_size = sizeof(*dbg); assert(dbg_size >= sizeof(*dbg)); dbg = intel_alloc(handle, dbg_size, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!dbg) return NULL; memset(dbg, 0, dbg_size); dbg->type = type; if (!base_dbg_copy_create_info(handle, dbg, create_info)) { intel_free(handle, dbg); return NULL; } return dbg; }
void intel_img_destroy(struct intel_img *img) { if (img->wsi_data) intel_wsi_img_cleanup(img); if (img->s8_layout) intel_free(img, img->s8_layout); intel_base_destroy(&img->obj.base); }
static void cmd_reset(struct intel_cmd *cmd) { uint32_t i; for (i = 0; i < INTEL_CMD_WRITER_COUNT; i++) cmd_writer_reset(cmd, i); if (cmd->bind.shader_cache.entries) intel_free(cmd, cmd->bind.shader_cache.entries); if (cmd->bind.dset.graphics_data.set_offsets) intel_free(cmd, cmd->bind.dset.graphics_data.set_offsets); if (cmd->bind.dset.graphics_data.dynamic_offsets) intel_free(cmd, cmd->bind.dset.graphics_data.dynamic_offsets); if (cmd->bind.dset.compute_data.set_offsets) intel_free(cmd, cmd->bind.dset.compute_data.set_offsets); if (cmd->bind.dset.compute_data.dynamic_offsets) intel_free(cmd, cmd->bind.dset.compute_data.dynamic_offsets); memset(&cmd->bind, 0, sizeof(cmd->bind)); cmd->reloc_used = 0; cmd->result = VK_SUCCESS; }
/** * Free all resources used by a writer. Note that the initial size is not * reset. */ static void cmd_writer_reset(struct intel_cmd *cmd, enum intel_cmd_writer_type which) { struct intel_cmd_writer *writer = &cmd->writers[which]; if (writer->ptr) { intel_bo_unmap(writer->bo); writer->ptr = NULL; } intel_bo_unref(writer->bo); writer->bo = NULL; writer->used = 0; writer->sba_offset = 0; if (writer->items) { intel_free(cmd, writer->items); writer->items = NULL; writer->item_alloc = 0; writer->item_used = 0; } }
void intel_base_destroy(struct intel_base *base) { if (base->dbg) intel_base_dbg_destroy(&base->handle, base->dbg); intel_free(base, base); }
VkResult intel_gpu_create(const struct intel_instance *instance, int devid, const char *primary_node, const char *render_node, struct intel_gpu **gpu_ret) { const int gen = devid_to_gen(devid); size_t primary_len, render_len; struct intel_gpu *gpu; if (gen < 0) { intel_log(instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, VK_NULL_HANDLE, 0, 0, "unsupported device id 0x%04x", devid); return VK_ERROR_INITIALIZATION_FAILED; } gpu = intel_alloc(instance, sizeof(*gpu), sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!gpu) return VK_ERROR_OUT_OF_HOST_MEMORY; memset(gpu, 0, sizeof(*gpu)); /* there is no VK_DBG_OBJECT_GPU */ intel_handle_init(&gpu->handle, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, instance); gpu->devid = devid; primary_len = strlen(primary_node); render_len = (render_node) ? strlen(render_node) : 0; gpu->primary_node = intel_alloc(gpu, primary_len + 1 + ((render_len) ? (render_len + 1) : 0), sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!gpu->primary_node) { intel_free(instance, gpu); return VK_ERROR_OUT_OF_HOST_MEMORY; } memcpy(gpu->primary_node, primary_node, primary_len + 1); if (render_node) { gpu->render_node = gpu->primary_node + primary_len + 1; memcpy(gpu->render_node, render_node, render_len + 1); } else { gpu->render_node = gpu->primary_node; } gpu->gen_opaque = gen; switch (intel_gpu_gen(gpu)) { case INTEL_GEN(7.5): gpu->gt = gen_get_hsw_gt(devid); break; case INTEL_GEN(7): gpu->gt = gen_get_ivb_gt(devid); break; case INTEL_GEN(6): gpu->gt = gen_get_snb_gt(devid); break; } /* 150K dwords */ gpu->max_batch_buffer_size = sizeof(uint32_t) * 150*1024; /* the winsys is prepared for one reloc every two dwords, then minus 2 */ gpu->batch_buffer_reloc_count = gpu->max_batch_buffer_size / sizeof(uint32_t) / 2 - 2; gpu->primary_fd_internal = -1; gpu->render_fd_internal = -1; *gpu_ret = gpu; return VK_SUCCESS; }