Пример #1
0
/**
 * Create an intel_base.  obj_size and dbg_size specify the real sizes of the
 * object and the debug metadata.  Memories are zeroed.
 */
struct intel_base *intel_base_create(const struct intel_handle *handle,
                                     size_t obj_size, bool debug,
                                     VkDebugReportObjectTypeEXT type,
                                     const void *create_info,
                                     size_t dbg_size)
{
    struct intel_base *base;

    if (!obj_size)
        obj_size = sizeof(*base);

    assert(obj_size >= sizeof(*base));

    base = intel_alloc(handle, obj_size, sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (!base)
        return NULL;

    memset(base, 0, obj_size);
    intel_handle_init(&base->handle, type, handle->instance);

    if (debug) {
        base->dbg = intel_base_dbg_create(&base->handle,
                type, create_info, dbg_size);
        if (!base->dbg) {
            intel_free(handle, base);
            return NULL;
        }
    }

    base->get_memory_requirements = intel_base_get_memory_requirements;

    return base;
}
Пример #2
0
static VkResult intel_instance_create(
        const VkInstanceCreateInfo* info,
        const VkAllocationCallbacks* allocator,
        struct intel_instance **pInstance)
{
    struct intel_instance *instance;
    struct icd_instance *icd;
    uint32_t i;

    intel_debug_init();

    icd = icd_instance_create(info->pApplicationInfo, allocator);
    if (!icd)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    instance = icd_instance_alloc(icd, sizeof(*instance), sizeof(int),
            VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
    if (!instance) {
        icd_instance_destroy(icd);
        return VK_ERROR_OUT_OF_HOST_MEMORY;
    }

    memset(instance, 0, sizeof(*instance));
    intel_handle_init(&instance->handle, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, instance);

    instance->icd = icd;

    for (i = 0; i < info->enabledExtensionCount; i++) {
        const enum intel_global_ext_type ext =
            intel_gpu_lookup_global_extension(
                    info->ppEnabledExtensionNames[i]);

        if (ext != INTEL_GLOBAL_EXT_INVALID) {
            instance->global_exts[ext] = true;
        } else {
            /* Fail create if extensions are specified that
             * ICD cannot satisfy. Loader will filter out extensions / layers
             * not meant by the ICD.
             */
            icd_instance_destroy(icd);
            intel_instance_destroy(instance);
            return VK_ERROR_EXTENSION_NOT_PRESENT;
        }
    }

    /*
     * This ICD does not support any layers.
     */
    if (info->enabledLayerCount > 0) {
        icd_instance_destroy(icd);
        intel_instance_destroy(instance);
        return VK_ERROR_LAYER_NOT_PRESENT;
    }

    *pInstance = instance;

    return VK_SUCCESS;
}
Пример #3
0
VkResult intel_gpu_create(const struct intel_instance *instance, int devid,
                            const char *primary_node, const char *render_node,
                            struct intel_gpu **gpu_ret)
{
    const int gen = devid_to_gen(devid);
    size_t primary_len, render_len;
    struct intel_gpu *gpu;

    if (gen < 0) {
        intel_log(instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
                VK_NULL_HANDLE, 0, 0, "unsupported device id 0x%04x", devid);
        return VK_ERROR_INITIALIZATION_FAILED;
    }

    gpu = intel_alloc(instance, sizeof(*gpu), sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
    if (!gpu)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    memset(gpu, 0, sizeof(*gpu));
    /* there is no VK_DBG_OBJECT_GPU */
    intel_handle_init(&gpu->handle, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, instance);

    gpu->devid = devid;

    primary_len = strlen(primary_node);
    render_len = (render_node) ? strlen(render_node) : 0;

    gpu->primary_node = intel_alloc(gpu, primary_len + 1 +
            ((render_len) ? (render_len + 1) : 0), sizeof(int), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
    if (!gpu->primary_node) {
        intel_free(instance, gpu);
        return VK_ERROR_OUT_OF_HOST_MEMORY;
    }

    memcpy(gpu->primary_node, primary_node, primary_len + 1);

    if (render_node) {
        gpu->render_node = gpu->primary_node + primary_len + 1;
        memcpy(gpu->render_node, render_node, render_len + 1);
    } else {
        gpu->render_node = gpu->primary_node;
    }

    gpu->gen_opaque = gen;

    switch (intel_gpu_gen(gpu)) {
    case INTEL_GEN(7.5):
        gpu->gt = gen_get_hsw_gt(devid);
        break;
    case INTEL_GEN(7):
        gpu->gt = gen_get_ivb_gt(devid);
        break;
    case INTEL_GEN(6):
        gpu->gt = gen_get_snb_gt(devid);
        break;
    }

    /* 150K dwords */
    gpu->max_batch_buffer_size = sizeof(uint32_t) * 150*1024;

    /* the winsys is prepared for one reloc every two dwords, then minus 2 */
    gpu->batch_buffer_reloc_count =
        gpu->max_batch_buffer_size / sizeof(uint32_t) / 2 - 2;

    gpu->primary_fd_internal = -1;
    gpu->render_fd_internal = -1;

    *gpu_ret = gpu;

    return VK_SUCCESS;
}