示例#1
0
bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
                              bool* createdNewBuffer) {
    SkASSERT(!this->vkIsMapped());
    VALIDATE();
    if (srcSizeInBytes > fDesc.fSizeInBytes) {
        return false;
    }

    if (!fResource->unique()) {
        // in use by the command buffer, so we need to create a new one
        fResource->unref(gpu);
        fResource = Create(gpu, fDesc);
        if (createdNewBuffer) {
            *createdNewBuffer = true;
        }
    }

    void* mapPtr;
    VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, srcSizeInBytes, 0, &mapPtr));

    if (VK_SUCCESS != err) {
        return false;
    }

    memcpy(mapPtr, src, srcSizeInBytes);

    VK_CALL(gpu, UnmapMemory(gpu->device(), alloc()));

    return true;
}
示例#2
0
const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
    VkBuffer       buffer;
    GrVkAlloc      alloc;

    // create the buffer object
    VkBufferCreateInfo bufInfo;
    memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
    bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    bufInfo.flags = 0;
    bufInfo.size = desc.fSizeInBytes;
    switch (desc.fType) {
        case kVertex_Type:
            bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
            break;
        case kIndex_Type:
            bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
            break;
        case kUniform_Type:
            bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
            break;
        case kCopyRead_Type:
            bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
            break;
        case kCopyWrite_Type:
            bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
            break;
        case kTexel_Type:
            bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
    }
    if (!desc.fDynamic) {
        bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    }

    bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
    bufInfo.queueFamilyIndexCount = 0;
    bufInfo.pQueueFamilyIndices = nullptr;

    VkResult err;
    err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
    if (err) {
        return nullptr;
    }

    if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
                                              buffer,
                                              desc.fType,
                                              desc.fDynamic,
                                              &alloc)) {
        return nullptr;
    }

    const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc, desc.fType);
    if (!resource) {
        VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
        GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc);
        return nullptr;
    }

    return resource;
}
示例#3
0
void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) {
    VALIDATE();
    SkASSERT(this->vkIsMapped());

    VK_CALL(gpu, UnmapMemory(gpu->device(), alloc()));

    fMapPtr = nullptr;
}
示例#4
0
bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
    VkImage image = 0;
    GrVkAlloc alloc;

    bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
    VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
                                           : VK_IMAGE_LAYOUT_UNDEFINED;

    // Create Image
    VkSampleCountFlagBits vkSamples;
    if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
        return false;
    }

    SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
             VK_SAMPLE_COUNT_1_BIT == vkSamples);

    // sRGB format images may need to be aliased to linear for various reasons (legacy mode):
    VkImageCreateFlags createFlags = GrVkFormatIsSRGB(imageDesc.fFormat, nullptr)
        ? VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT : 0;

    const VkImageCreateInfo imageCreateInfo = {
        VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
        NULL,                                        // pNext
        createFlags,                                 // VkImageCreateFlags
        imageDesc.fImageType,                        // VkImageType
        imageDesc.fFormat,                           // VkFormat
        { imageDesc.fWidth, imageDesc.fHeight, 1 },  // VkExtent3D
        imageDesc.fLevels,                           // mipLevels
        1,                                           // arrayLayers
        vkSamples,                                   // samples
        imageDesc.fImageTiling,                      // VkImageTiling
        imageDesc.fUsageFlags,                       // VkImageUsageFlags
        VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
        0,                                           // queueFamilyCount
        0,                                           // pQueueFamilyIndices
        initialLayout                                // initialLayout
    };

    GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateImage(gpu->device(), &imageCreateInfo, nullptr,
                                                        &image));

    if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
        VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
        return false;
    }

    info->fImage = image;
    info->fAlloc = alloc;
    info->fImageTiling = imageDesc.fImageTiling;
    info->fImageLayout = initialLayout;
    info->fFormat = imageDesc.fFormat;
    info->fLevelCount = imageDesc.fLevels;
    return true;
}
示例#5
0
void GrVkBuffer::vkUnmap(GrVkGpu* gpu) {
    VALIDATE();
    SkASSERT(this->vkIsMapped());

    if (fDesc.fDynamic) {
        VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
    } else {
        gpu->updateBuffer(this, fMapPtr, this->offset(), this->size());
        delete [] (unsigned char*)fMapPtr;
    }

    fMapPtr = nullptr;
}
示例#6
0
void* GrVkBuffer::vkMap(const GrVkGpu* gpu) {
    VALIDATE();
    SkASSERT(!this->vkIsMapped());

    if (!fResource->unique()) {
        // in use by the command buffer, so we need to create a new one
        fResource->unref(gpu);
        fResource = Create(gpu, fDesc);
    }

    VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, VK_WHOLE_SIZE, 0, &fMapPtr));
    if (err) {
        fMapPtr = nullptr;
    }

    VALIDATE();
    return fMapPtr;
}
示例#7
0
void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
    VALIDATE();
    SkASSERT(this->vkIsMapped());

    if (fDesc.fDynamic) {
        GrVkMemory::FlushMappedAlloc(gpu, this->alloc());
        VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
        fMapPtr = nullptr;
    } else {
        gpu->updateBuffer(this, fMapPtr, this->offset(), size);
        this->addMemoryBarrier(gpu,
                               VK_ACCESS_TRANSFER_WRITE_BIT,
                               buffer_type_to_access_flags(fDesc.fType),
                               VK_PIPELINE_STAGE_TRANSFER_BIT,
                               VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
                               false);
    }
}
示例#8
0
void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
    VALIDATE();
    SkASSERT(!this->vkIsMapped());

    if (!fResource->unique()) {
        if (fDesc.fDynamic) {
            // in use by the command buffer, so we need to create a new one
            fResource->recycle(gpu);
            fResource = this->createResource(gpu, fDesc);
            if (createdNewBuffer) {
                *createdNewBuffer = true;
            }
        } else {
            SkASSERT(fMapPtr);
            this->addMemoryBarrier(gpu,
                                   buffer_type_to_access_flags(fDesc.fType),
                                   VK_ACCESS_TRANSFER_WRITE_BIT,
                                   VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
                                   false);
        }
    }

    if (fDesc.fDynamic) {
        const GrVkAlloc& alloc = this->alloc();
        VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory,
                                              alloc.fOffset + fOffset,
                                              size, 0, &fMapPtr));
        if (err) {
            fMapPtr = nullptr;
        }
    } else {
        if (!fMapPtr) {
            fMapPtr = new unsigned char[this->size()];
        }
    }

    VALIDATE();
}
示例#9
0
void* GrVkBuffer::vkMap(const GrVkGpu* gpu) {
    VALIDATE();
    SkASSERT(!this->vkIsMapped());
    if (!fResource->unique()) {
        // in use by the command buffer, so we need to create a new one
        fResource->unref(gpu);
        fResource = Create(gpu, fDesc);
    }

    if (fDesc.fDynamic) {
        const GrVkAlloc& alloc = this->alloc();
        VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory,
                                              alloc.fOffset + fOffset,
                                              fDesc.fSizeInBytes, 0, &fMapPtr));
        if (err) {
            fMapPtr = nullptr;
        }
    } else {
        fMapPtr = new unsigned char[this->size()];
    }

    VALIDATE();
    return fMapPtr;
}
示例#10
0
void GrVkImage::Resource::freeGPUData(const GrVkGpu* gpu) const {
    VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
    bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling);
    GrVkMemory::FreeImageMemory(gpu, isLinear, fAlloc);
}
示例#11
0
void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
    VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
    bool isLinear = VK_IMAGE_TILING_LINEAR == info->fImageTiling;
    GrVkMemory::FreeImageMemory(gpu, isLinear, info->fAlloc);
}
示例#12
0
const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
    VkBuffer       buffer;
    VkDeviceMemory alloc;

    // create the buffer object
    VkBufferCreateInfo bufInfo;
    memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
    bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    bufInfo.flags = 0;
    bufInfo.size = desc.fSizeInBytes;
    switch (desc.fType) {
    case kVertex_Type:
        bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
        break;
    case kIndex_Type:
        bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
        break;
    case kUniform_Type:
        bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
        break;
    case kCopyRead_Type:
        bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
        break;
    case kCopyWrite_Type:
        bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
        break;

    }
    bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
    bufInfo.queueFamilyIndexCount = 0;
    bufInfo.pQueueFamilyIndices = nullptr;

    VkResult err;
    err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
    if (err) {
        return nullptr;
    }

    VkMemoryPropertyFlags requiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
                                             VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
                                             VK_MEMORY_PROPERTY_HOST_CACHED_BIT;

    if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
                                              buffer,
                                              requiredMemProps,
                                              &alloc)) {
        // Try again without requiring host cached memory
        requiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
        if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
                                                  buffer,
                                                  requiredMemProps,
                                                  &alloc)) {
            VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
            return nullptr;
        }
    }

    const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc);
    if (!resource) {
        VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
        VK_CALL(gpu, FreeMemory(gpu->device(), alloc, nullptr));
        return nullptr;
    }

    return resource;
}
示例#13
0
void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const {
    SkASSERT(fBuffer);
    SkASSERT(fAlloc);
    VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
    VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr));
}
static GrBackendTexture make_vk_backend_texture(
        GrContext* context, AHardwareBuffer* hardwareBuffer,
        int width, int height, GrPixelConfig config,
        GrAHardwareBufferImageGenerator::DeleteImageProc* deleteProc,
        GrAHardwareBufferImageGenerator::DeleteImageCtx* deleteCtx,
        bool isProtectedContent,
        const GrBackendFormat& backendFormat) {
    SkASSERT(context->contextPriv().getBackend() == kVulkan_GrBackend);
    GrVkGpu* gpu = static_cast<GrVkGpu*>(context->contextPriv().getGpu());

    VkPhysicalDevice physicalDevice = gpu->physicalDevice();
    VkDevice device = gpu->device();

    SkASSERT(gpu);

    if (!gpu->vkCaps().supportsAndroidHWBExternalMemory()) {
        return GrBackendTexture();
    }

    SkASSERT(backendFormat.getVkFormat());
    VkFormat format = *backendFormat.getVkFormat();

    VkResult err;

    VkAndroidHardwareBufferFormatPropertiesANDROID hwbFormatProps;
    hwbFormatProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
    hwbFormatProps.pNext = nullptr;

    VkAndroidHardwareBufferPropertiesANDROID hwbProps;
    hwbProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
    hwbProps.pNext = &hwbFormatProps;

    err = VK_CALL(GetAndroidHardwareBufferProperties(device, hardwareBuffer, &hwbProps));
    if (VK_SUCCESS != err) {
        return GrBackendTexture();
    }

    SkASSERT(format == hwbFormatProps.format);
    SkASSERT(SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT & hwbFormatProps.formatFeatures) &&
             SkToBool(VK_FORMAT_FEATURE_TRANSFER_SRC_BIT & hwbFormatProps.formatFeatures) &&
             SkToBool(VK_FORMAT_FEATURE_TRANSFER_DST_BIT & hwbFormatProps.formatFeatures));

    const VkExternalMemoryImageCreateInfo externalMemoryImageInfo {
        VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, // sType
        nullptr, // pNext
        VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, // handleTypes
    };
    VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
                                   VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
                                   VK_IMAGE_USAGE_TRANSFER_DST_BIT;

    // TODO: Check the supported tilings vkGetPhysicalDeviceImageFormatProperties2 to see if we have
    // to use linear. Add better linear support throughout Ganesh.
    VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL;

    const VkImageCreateInfo imageCreateInfo = {
        VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
        &externalMemoryImageInfo,                    // pNext
        0,                                           // VkImageCreateFlags
        VK_IMAGE_TYPE_2D,                            // VkImageType
        format,                                      // VkFormat
        { (uint32_t)width, (uint32_t)height, 1 },    // VkExtent3D
        1,                                           // mipLevels
        1,                                           // arrayLayers
        VK_SAMPLE_COUNT_1_BIT,                       // samples
        tiling,                                      // VkImageTiling
        usageFlags,                                  // VkImageUsageFlags
        VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
        0,                                           // queueFamilyCount
        0,                                           // pQueueFamilyIndices
        VK_IMAGE_LAYOUT_UNDEFINED,                   // initialLayout
    };

    VkImage image;
    err = VK_CALL(CreateImage(device, &imageCreateInfo, nullptr, &image));
    if (VK_SUCCESS != err) {
        return GrBackendTexture();
    }

    VkImageMemoryRequirementsInfo2 memReqsInfo;
    memReqsInfo.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2;
    memReqsInfo.pNext = nullptr;
    memReqsInfo.image = image;

    VkMemoryDedicatedRequirements dedicatedMemReqs;
    dedicatedMemReqs.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS;
    dedicatedMemReqs.pNext = nullptr;

    VkMemoryRequirements2 memReqs;
    memReqs.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
    memReqs.pNext = &dedicatedMemReqs;

    VK_CALL(GetImageMemoryRequirements2(device, &memReqsInfo, &memReqs));
    SkASSERT(VK_TRUE == dedicatedMemReqs.requiresDedicatedAllocation);

    VkPhysicalDeviceMemoryProperties2 phyDevMemProps;
    phyDevMemProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2;
    phyDevMemProps.pNext = nullptr;

    uint32_t typeIndex = 0;
    uint32_t heapIndex = 0;
    bool foundHeap = false;
    VK_CALL(GetPhysicalDeviceMemoryProperties2(physicalDevice, &phyDevMemProps));
    uint32_t memTypeCnt = phyDevMemProps.memoryProperties.memoryTypeCount;
    for (uint32_t i = 0; i < memTypeCnt && !foundHeap; ++i) {
        if (hwbProps.memoryTypeBits & (1 << i)) {
            const VkPhysicalDeviceMemoryProperties& pdmp = phyDevMemProps.memoryProperties;
            uint32_t supportedFlags = pdmp.memoryTypes[i].propertyFlags &
                    VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
            if (supportedFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
                typeIndex = i;
                heapIndex = pdmp.memoryTypes[i].heapIndex;
                foundHeap = true;
            }
        }
    }
    if (!foundHeap) {
        VK_CALL(DestroyImage(device, image, nullptr));
        return GrBackendTexture();
    }

    VkImportAndroidHardwareBufferInfoANDROID hwbImportInfo;
    hwbImportInfo.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
    hwbImportInfo.pNext = nullptr;
    hwbImportInfo.buffer = hardwareBuffer;

    VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
    dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
    dedicatedAllocInfo.pNext = &hwbImportInfo;
    dedicatedAllocInfo.image = image;
    dedicatedAllocInfo.buffer = VK_NULL_HANDLE;

    VkMemoryAllocateInfo allocInfo = {
        VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,      // sType
        &dedicatedAllocInfo,                         // pNext
        hwbProps.allocationSize,                     // allocationSize
        typeIndex,                                   // memoryTypeIndex
    };

    VkDeviceMemory memory;

    err = VK_CALL(AllocateMemory(device, &allocInfo, nullptr, &memory));
    if (VK_SUCCESS != err) {
        VK_CALL(DestroyImage(device, image, nullptr));
        return GrBackendTexture();
    }

    VkBindImageMemoryInfo bindImageInfo;
    bindImageInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
    bindImageInfo.pNext = nullptr;
    bindImageInfo.image = image;
    bindImageInfo.memory = memory;
    bindImageInfo.memoryOffset = 0;

    err = VK_CALL(BindImageMemory2(device, 1, &bindImageInfo));
    if (VK_SUCCESS != err) {
        VK_CALL(DestroyImage(device, image, nullptr));
        VK_CALL(FreeMemory(device, memory, nullptr));
        return GrBackendTexture();
    }

    GrVkImageInfo imageInfo;

    imageInfo.fImage = image;
    imageInfo.fAlloc = GrVkAlloc(memory, 0, hwbProps.allocationSize, 0);
    imageInfo.fImageTiling = tiling;
    imageInfo.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
    imageInfo.fFormat = format;
    imageInfo.fLevelCount = 1;
    // TODO: This should possibly be VK_QUEUE_FAMILY_FOREIGN_EXT but current Adreno devices do not
    // support that extension. Or if we know the source of the AHardwareBuffer is not from a
    // "foreign" device we can leave them as external.
    imageInfo.fCurrentQueueFamily = VK_QUEUE_FAMILY_EXTERNAL;

    *deleteProc = GrAHardwareBufferImageGenerator::DeleteVkImage;
    *deleteCtx = new VulkanCleanupHelper(gpu, image, memory);

    return GrBackendTexture(width, height, imageInfo);
}