Exemple #1
0
void Buffer11::markTransformFeedbackUsage()
{
    BufferStorage *transformFeedbackStorage = getBufferStorage(BUFFER_USAGE_VERTEX_OR_TRANSFORM_FEEDBACK);

    if (transformFeedbackStorage)
    {
        transformFeedbackStorage->setDataRevision(transformFeedbackStorage->getDataRevision() + 1);
    }

    invalidateStaticData();
}
Exemple #2
0
gl::Error Buffer11::setSubData(const void *data, size_t size, size_t offset)
{
    size_t requiredSize = size + offset;

    if (data && size > 0)
    {
        // Use system memory storage for dynamic buffers.

        BufferStorage *writeBuffer = nullptr;
        if (supportsDirectBinding())
        {
            writeBuffer = getStagingStorage();

            if (!writeBuffer)
            {
                return gl::Error(GL_OUT_OF_MEMORY, "Failed to allocate internal buffer.");
            }
        }
        else
        {
            SystemMemoryStorage *systemMemoryStorage = nullptr;
            gl::Error error = getSystemMemoryStorage(&systemMemoryStorage);
            if (error.isError())
            {
                return error;
            }

            writeBuffer = systemMemoryStorage;
        }

        ASSERT(writeBuffer);

        // Explicitly resize the staging buffer, preserving data if the new data will not
        // completely fill the buffer
        if (writeBuffer->getSize() < requiredSize)
        {
            bool preserveData = (offset > 0);
            gl::Error error = writeBuffer->resize(requiredSize, preserveData);
            if (error.isError())
            {
                return error;
            }
        }

        writeBuffer->setData(static_cast<const uint8_t *>(data), offset, size);
        writeBuffer->setDataRevision(writeBuffer->getDataRevision() + 1);
    }

    mSize = std::max(mSize, requiredSize);
    invalidateStaticData();

    return gl::Error(GL_NO_ERROR);
}
Exemple #3
0
gl::Error Buffer11::copySubData(BufferImpl* source, GLintptr sourceOffset, GLintptr destOffset, GLsizeiptr size)
{
    Buffer11 *sourceBuffer = makeBuffer11(source);
    ASSERT(sourceBuffer != NULL);

    BufferStorage *copyDest = getLatestBufferStorage();
    if (!copyDest)
    {
        copyDest = getStagingStorage();
    }

    BufferStorage *copySource = sourceBuffer->getLatestBufferStorage();

    if (!copySource || !copyDest)
    {
        return gl::Error(GL_OUT_OF_MEMORY, "Failed to allocate internal staging buffer.");
    }

    // If copying to/from a pixel pack buffer, we must have a staging or
    // pack buffer partner, because other native buffers can't be mapped
    if (copyDest->getUsage() == BUFFER_USAGE_PIXEL_PACK && !copySource->isMappable())
    {
        copySource = sourceBuffer->getStagingStorage();
    }
    else if (copySource->getUsage() == BUFFER_USAGE_PIXEL_PACK && !copyDest->isMappable())
    {
        copyDest = getStagingStorage();
    }

    // D3D11 does not allow overlapped copies until 11.1, and only if the
    // device supports D3D11_FEATURE_DATA_D3D11_OPTIONS::CopyWithOverlap
    // Get around this via a different source buffer
    if (copySource == copyDest)
    {
        if (copySource->getUsage() == BUFFER_USAGE_STAGING)
        {
            copySource = getBufferStorage(BUFFER_USAGE_VERTEX_OR_TRANSFORM_FEEDBACK);
        }
        else
        {
            copySource = getStagingStorage();
        }
    }

    copyDest->copyFromStorage(copySource, sourceOffset, size, destOffset);
    copyDest->setDataRevision(copyDest->getDataRevision() + 1);

    mSize = std::max<size_t>(mSize, destOffset + size);
    invalidateStaticData();

    return gl::Error(GL_NO_ERROR);
}
Exemple #4
0
Buffer11::BufferStorage *Buffer11::getBufferStorage(BufferUsage usage)
{
    BufferStorage *newStorage = NULL;
    auto directBufferIt = mBufferStorages.find(usage);
    if (directBufferIt != mBufferStorages.end())
    {
        newStorage = directBufferIt->second;
    }

    if (!newStorage)
    {
        if (usage == BUFFER_USAGE_PIXEL_PACK)
        {
            newStorage = new PackStorage(mRenderer);
        }
        else if (usage == BUFFER_USAGE_SYSTEM_MEMORY)
        {
            newStorage = new SystemMemoryStorage(mRenderer);
            mHasSystemMemoryStorage = true;
        }
        else
        {
            // buffer is not allocated, create it
            newStorage = new NativeStorage(mRenderer, usage);
        }

        mBufferStorages.insert(std::make_pair(usage, newStorage));
    }

    // resize buffer
    if (newStorage->getSize() < mSize)
    {
        if (newStorage->resize(mSize, true).isError())
        {
            // Out of memory error
            return NULL;
        }
    }

    BufferStorage *latestBuffer = getLatestBufferStorage();
    if (latestBuffer && latestBuffer->getDataRevision() > newStorage->getDataRevision())
    {
        // Copy through a staging buffer if we're copying from or to a non-staging, mappable
        // buffer storage. This is because we can't map a GPU buffer, and copy CPU
        // data directly. If we're already using a staging buffer we're fine.
        if (latestBuffer->getUsage() != BUFFER_USAGE_STAGING &&
            newStorage->getUsage() != BUFFER_USAGE_STAGING &&
            (!latestBuffer->isMappable() || !newStorage->isMappable()))
        {
            NativeStorage *stagingBuffer = getStagingStorage();

            stagingBuffer->copyFromStorage(latestBuffer, 0, latestBuffer->getSize(), 0);
            stagingBuffer->setDataRevision(latestBuffer->getDataRevision());

            latestBuffer = stagingBuffer;
        }

        // if copyFromStorage returns true, the D3D buffer has been recreated
        // and we should update our serial
        if (newStorage->copyFromStorage(latestBuffer, 0, latestBuffer->getSize(), 0))
        {
            updateSerial();
        }
        newStorage->setDataRevision(latestBuffer->getDataRevision());
    }

    return newStorage;
}