Exemple #1
0
VkResult intel_mem_alloc(struct intel_dev *dev,
                           const VkMemoryAllocateInfo *info,
                           struct intel_mem **mem_ret)
{
    struct intel_mem *mem;

    /* ignore any IMAGE_INFO and BUFFER_INFO usage: they don't alter allocations */

    mem = (struct intel_mem *) intel_base_create(&dev->base.handle,
            sizeof(*mem), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, info, 0);
    if (!mem)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    mem->bo = intel_winsys_alloc_bo(dev->winsys,
            "vk-gpu-memory", info->allocationSize, 0);
    if (!mem->bo) {
        intel_mem_free(mem);
        return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    }

    mem->size = info->allocationSize;

    *mem_ret = mem;

    return VK_SUCCESS;
}
Exemple #2
0
VkResult intel_cmd_create(struct intel_dev *dev,
                            const VkCommandBufferAllocateInfo *info,
                            struct intel_cmd **cmd_ret)
{
    int pipeline_select;
    struct intel_cmd *cmd;
    struct intel_cmd_pool *pool = intel_cmd_pool(info->commandPool);

    switch (pool->queue_family_index) {
    case INTEL_GPU_ENGINE_3D:
        pipeline_select = GEN6_PIPELINE_SELECT_DW0_SELECT_3D;
        break;
    default:
        /* TODOVV: Add validation check for this */
        assert(0 && "icd: Invalid queue_family_index");
        return VK_ERROR_VALIDATION_FAILED_EXT;
        break;
    }

    cmd = (struct intel_cmd *) intel_base_create(&dev->base.handle,
            sizeof(*cmd), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, info, 0);
    if (!cmd)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    cmd->obj.destroy = cmd_destroy;

    cmd->dev = dev;
    cmd->scratch_bo = dev->cmd_scratch_bo;
    cmd->primary = (info->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
    cmd->pipeline_select = pipeline_select;

    /*
     * XXX This is not quite right.  intel_gpu sets maxMemReferences to
     * batch_buffer_reloc_count, but we may emit up to two relocs, for start
     * and end offsets, for each referenced memories.
     */
    cmd->reloc_count = dev->gpu->batch_buffer_reloc_count;
    cmd->relocs = intel_alloc(cmd, sizeof(cmd->relocs[0]) * cmd->reloc_count,
            4096, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (!cmd->relocs) {
        intel_cmd_destroy(cmd);
        return VK_ERROR_OUT_OF_HOST_MEMORY;
    }

    *cmd_ret = cmd;

    return VK_SUCCESS;
}
Exemple #3
0
VkResult intel_query_create(struct intel_dev *dev,
                            const VkQueryPoolCreateInfo *info,
                            struct intel_query **query_ret)
{
    struct intel_query *query;

    query = (struct intel_query *) intel_base_create(&dev->base.handle,
            sizeof(*query), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
            info, 0);
    if (!query)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    query->type = info->queryType;
    query->slot_count = info->queryCount;

    /*
     * For each query type, the GPU will be asked to write the values of some
     * registers to a buffer before and after a sequence of commands.  We will
     * compare the differences to get the query results.
     */
    switch (info->queryType) {
    case VK_QUERY_TYPE_OCCLUSION:
        query->slot_stride = u_align(sizeof(uint64_t) * 2, 64);
        break;
    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
        query_init_pipeline_statistics(dev, info, query);
        break;
    case VK_QUERY_TYPE_TIMESTAMP:
        query->slot_stride = u_align(sizeof(uint64_t), 64);
        break;
    default:
        assert(!"unknown query type");
        break;
    }

    VkMemoryAllocateInfo mem_reqs;
    mem_reqs.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
    mem_reqs.allocationSize = query->slot_stride * query->slot_count;
    mem_reqs.pNext = NULL;
    mem_reqs.memoryTypeIndex = 0;
    intel_mem_alloc(dev, &mem_reqs, &query->obj.mem);

    query->obj.destroy = query_destroy;

    *query_ret = query;

    return VK_SUCCESS;
}
Exemple #4
0
VkResult intel_semaphore_create(struct intel_dev *dev,
                                const VkSemaphoreCreateInfo *info,
                                struct intel_semaphore **semaphore_ret)
{
    struct intel_semaphore *semaphore;
    semaphore = (struct intel_semaphore *) intel_base_create(&dev->base.handle,
            sizeof(*semaphore), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, info, 0);

    if (!semaphore)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    semaphore->references = 0;
    *semaphore_ret = semaphore;
    semaphore->obj.destroy = semaphore_destroy;

    return VK_SUCCESS;
}
Exemple #5
0
VkResult intel_queue_create(struct intel_dev *dev,
                            enum intel_gpu_engine_type engine,
                            struct intel_queue **queue_ret)
{
    struct intel_queue *queue;
    enum intel_ring_type ring;
    VkFenceCreateInfo fence_info;
    VkResult ret;

    switch (engine) {
    case INTEL_GPU_ENGINE_3D:
        ring = INTEL_RING_RENDER;
        break;
    default:
        intel_dev_log(dev, VK_DEBUG_REPORT_ERROR_BIT_EXT,
                      &dev->base, 0, 0,
                      "invalid engine type");
        return VK_ERROR_VALIDATION_FAILED_EXT;
        break;
    }

    queue = (struct intel_queue *) intel_base_create(&dev->base.handle,
            sizeof(*queue), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, NULL, 0);
    if (!queue)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    queue->dev = dev;
    queue->ring = ring;

    if (queue_init_hw_and_atomic_bo(queue) != VK_SUCCESS) {
        intel_queue_destroy(queue);
        return VK_ERROR_INITIALIZATION_FAILED;
    }

    memset(&fence_info, 0, sizeof(fence_info));
    fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
    ret = intel_fence_create(dev, &fence_info, &queue->fence);
    if (ret != VK_SUCCESS) {
        intel_queue_destroy(queue);
        return ret;
    }

    *queue_ret = queue;

    return VK_SUCCESS;
}
Exemple #6
0
VkResult intel_buf_view_create(struct intel_dev *dev,
                               const VkBufferViewCreateInfo *info,
                               struct intel_buf_view **view_ret)
{
    struct intel_buf_view *view;

    view = (struct intel_buf_view *) intel_base_create(&dev->base.handle,
            sizeof(*view), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
            info, 0);
    if (!view)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    intel_buf_view_init(dev, info, view, false);

    *view_ret = view;

    return VK_SUCCESS;
}
Exemple #7
0
VkResult intel_img_view_create(struct intel_dev *dev,
                               const VkImageViewCreateInfo *info,
                               struct intel_img_view **view_ret)
{
    struct intel_img_view *view;

    view = (struct intel_img_view *) intel_base_create(&dev->base.handle,
            sizeof(*view), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, info, 0);
    if (!view)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    intel_img_view_init(dev, info, view);

    /* Initialize attachment view info in case it's needed */
    intel_att_view_init(dev, info, &view->att_view);

    *view_ret = view;

    return VK_SUCCESS;
}
Exemple #8
0
VkResult intel_cmd_pool_create(struct intel_dev *dev,
                            const VkCommandPoolCreateInfo *info,
                            struct intel_cmd_pool **cmd_pool_ret)
{
    struct intel_cmd_pool *cmd_pool;

    cmd_pool = (struct intel_cmd_pool *) intel_base_create(&dev->base.handle,
            sizeof(*cmd_pool), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, info, 0);
    if (!cmd_pool)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    cmd_pool->obj.destroy = pool_destroy;

    cmd_pool->dev = dev;
    cmd_pool->queue_family_index = info->queueFamilyIndex;
    cmd_pool->create_flags = info->flags;

    *cmd_pool_ret = cmd_pool;

    return VK_SUCCESS;
}
Exemple #9
0
VkResult intel_buf_create(struct intel_dev *dev,
                            const VkBufferCreateInfo *info,
                            struct intel_buf **buf_ret)
{
    struct intel_buf *buf;

    buf = (struct intel_buf *) intel_base_create(&dev->base.handle,
            sizeof(*buf), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, info, 0);
    if (!buf)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    buf->size = info->size;
    buf->usage = info->usage;

    buf->obj.destroy = buf_destroy;
    buf->obj.base.get_memory_requirements = buf_get_memory_requirements;

    *buf_ret = buf;

    return VK_SUCCESS;
}
Exemple #10
0
VkResult intel_event_create(struct intel_dev *dev,
                              const VkEventCreateInfo *info,
                              struct intel_event **event_ret)
{
    struct intel_event *event;
    VkMemoryAllocateInfo mem_reqs;

    event = (struct intel_event *) intel_base_create(&dev->base.handle,
            sizeof(*event), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, info, 0);
    if (!event)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    mem_reqs.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
    mem_reqs.allocationSize = 4; // We know allocation is page alignned
    mem_reqs.pNext = NULL;
    mem_reqs.memoryTypeIndex = 0;
    intel_mem_alloc(dev, &mem_reqs, &event->obj.mem);

    event->obj.destroy = event_destroy;

    *event_ret = event;

    return VK_SUCCESS;
}
Exemple #11
0
VkResult intel_dev_create(struct intel_gpu *gpu,
                          const VkDeviceCreateInfo *info,
                          struct intel_dev **dev_ret)
{
    struct intel_dev *dev;
    uint32_t i;
    VkResult ret;

    // ICD limited to a single virtual device
    if (gpu->winsys)
        return VK_ERROR_INITIALIZATION_FAILED;

    dev = (struct intel_dev *) intel_base_create(&gpu->handle,
            sizeof(*dev), false,
            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, info, sizeof(struct intel_dev_dbg));
    if (!dev)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    for (i = 0; i < info->enabledExtensionCount; i++) {
        const enum intel_phy_dev_ext_type ext =
            intel_gpu_lookup_phy_dev_extension(gpu,
                    info->ppEnabledExtensionNames[i]);

        if (ext != INTEL_PHY_DEV_EXT_INVALID)
            dev->phy_dev_exts[ext] = true;
    }

    dev->gpu = gpu;

    ret = intel_gpu_init_winsys(gpu);
    if (ret != VK_SUCCESS) {
        intel_dev_destroy(dev);
        return ret;
    }

    dev->winsys = gpu->winsys;

    dev->cmd_scratch_bo = intel_winsys_alloc_bo(dev->winsys,
            "command buffer scratch", 4096, false);
    if (!dev->cmd_scratch_bo) {
        intel_dev_destroy(dev);
        return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    }

    if (!dev_create_meta_shaders(dev)) {
        intel_dev_destroy(dev);
        return VK_ERROR_OUT_OF_HOST_MEMORY;
    }

    ret = intel_desc_region_create(dev, &dev->desc_region);
    if (ret != VK_SUCCESS) {
        intel_dev_destroy(dev);
        return ret;
    }

    intel_pipeline_init_default_sample_patterns(dev,
            (uint8_t *) &dev->sample_pattern_1x,
            (uint8_t *) &dev->sample_pattern_2x,
            (uint8_t *) &dev->sample_pattern_4x,
            (uint8_t *) dev->sample_pattern_8x,
            (uint8_t *) dev->sample_pattern_16x);

    ret = dev_create_queues(dev, info->pQueueCreateInfos,
            info->queueCreateInfoCount);
    if (ret != VK_SUCCESS) {
        intel_dev_destroy(dev);
        return ret;
    }

    *dev_ret = dev;

    return VK_SUCCESS;
}
Exemple #12
0
VkResult intel_img_create(struct intel_dev *dev,
                          const VkImageCreateInfo *info,
                          const VkAllocationCallbacks *allocator,
                          bool scanout,
                          struct intel_img **img_ret)
{
    struct intel_img *img;
    struct intel_layout *layout;

    img = (struct intel_img *) intel_base_create(&dev->base.handle,
            sizeof(*img), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, info, 0);
    if (!img)
        return VK_ERROR_OUT_OF_HOST_MEMORY;

    layout = &img->layout;

    img->type = info->imageType;
    img->depth = info->extent.depth;
    img->mip_levels = info->mipLevels;
    img->array_size = info->arrayLayers;
    img->usage = info->usage;
    img->sample_count = (uint32_t) info->samples;
    intel_layout_init(layout, dev, info, scanout);

    img->total_size = img->layout.bo_stride * img->layout.bo_height;

    if (layout->aux != INTEL_LAYOUT_AUX_NONE) {
        img->aux_offset = u_align(img->total_size, 4096);
        img->total_size = img->aux_offset +
            layout->aux_stride * layout->aux_height;
    }

    if (layout->separate_stencil) {
        VkImageCreateInfo s8_info;

        img->s8_layout = intel_alloc(img, sizeof(*img->s8_layout), sizeof(int),
                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
        if (!img->s8_layout) {
            intel_img_destroy(img);
            return VK_ERROR_OUT_OF_HOST_MEMORY;
        }

        s8_info = *info;
        s8_info.format = VK_FORMAT_S8_UINT;
        /* no stencil texturing */
        s8_info.usage &= ~VK_IMAGE_USAGE_SAMPLED_BIT;
        assert(icd_format_is_ds(info->format));

        intel_layout_init(img->s8_layout, dev, &s8_info, scanout);

        img->s8_offset = u_align(img->total_size, 4096);
        img->total_size = img->s8_offset +
            img->s8_layout->bo_stride * img->s8_layout->bo_height;
    }

    if (scanout) {
        VkResult ret = intel_wsi_img_init(img);
        if (ret != VK_SUCCESS) {
            intel_img_destroy(img);
            return ret;
        }
    }

    img->obj.destroy = img_destroy;
    img->obj.base.get_memory_requirements = img_get_memory_requirements;

    *img_ret = img;

    return VK_SUCCESS;
}