void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
    VALIDATE();
    SkASSERT(this->vkIsMapped());

    if (fDesc.fDynamic) {
        GrVkMemory::FlushMappedAlloc(gpu, this->alloc());
        VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
        fMapPtr = nullptr;
    } else {
        gpu->updateBuffer(this, fMapPtr, this->offset(), size);
        this->addMemoryBarrier(gpu,
                               VK_ACCESS_TRANSFER_WRITE_BIT,
                               buffer_type_to_access_flags(fDesc.fType),
                               VK_PIPELINE_STAGE_TRANSFER_BIT,
                               VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
                               false);
    }
}
Exemple #2
0
void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
    VALIDATE();
    SkASSERT(this->vkIsMapped());

    if (fDesc.fDynamic) {
        const GrVkAlloc& alloc = this->alloc();
        SkASSERT(alloc.fSize > 0);
        SkASSERT(alloc.fSize >= size);
        // We currently don't use fOffset
        SkASSERT(0 == fOffset);

        GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, size);
        GrVkMemory::UnmapAlloc(gpu, alloc);
        fMapPtr = nullptr;
    } else {
        // vkCmdUpdateBuffer requires size < 64k and 4-byte alignment.
        // https://bugs.chromium.org/p/skia/issues/detail?id=7488
        if (size <= 65536 && 0 == (size & 0x3)) {
            gpu->updateBuffer(this, fMapPtr, this->offset(), size);
        } else {
            GrVkTransferBuffer* transferBuffer =
                    GrVkTransferBuffer::Create(gpu, size, GrVkBuffer::kCopyRead_Type);
            if (!transferBuffer) {
                return;
            }

            char* buffer = (char*) transferBuffer->map();
            memcpy (buffer, fMapPtr, size);
            transferBuffer->unmap();

            gpu->copyBuffer(transferBuffer, this, 0, this->offset(), size);
            transferBuffer->unref();
        }
        this->addMemoryBarrier(gpu,
                               VK_ACCESS_TRANSFER_WRITE_BIT,
                               buffer_type_to_access_flags(fDesc.fType),
                               VK_PIPELINE_STAGE_TRANSFER_BIT,
                               VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
                               false);
    }
}
void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
    VALIDATE();
    SkASSERT(!this->vkIsMapped());

    if (!fResource->unique()) {
        if (fDesc.fDynamic) {
            // in use by the command buffer, so we need to create a new one
            fResource->recycle(gpu);
            fResource = this->createResource(gpu, fDesc);
            if (createdNewBuffer) {
                *createdNewBuffer = true;
            }
        } else {
            SkASSERT(fMapPtr);
            this->addMemoryBarrier(gpu,
                                   buffer_type_to_access_flags(fDesc.fType),
                                   VK_ACCESS_TRANSFER_WRITE_BIT,
                                   VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
                                   false);
        }
    }

    if (fDesc.fDynamic) {
        const GrVkAlloc& alloc = this->alloc();
        VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory,
                                              alloc.fOffset + fOffset,
                                              size, 0, &fMapPtr));
        if (err) {
            fMapPtr = nullptr;
        }
    } else {
        if (!fMapPtr) {
            fMapPtr = new unsigned char[this->size()];
        }
    }

    VALIDATE();
}