Пример #1
0
void GrGLBuffer::onMap() {
    if (this->wasDestroyed()) {
        return;
    }

    VALIDATE();
    SkASSERT(!this->isMapped());

    if (0 == fBufferID) {
        fMapPtr = fCPUData;
        VALIDATE();
        return;
    }

    // TODO: Make this a function parameter.
    bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType);

    // Handling dirty context is done in the bindBuffer call
    switch (this->glCaps().mapBufferType()) {
        case GrGLCaps::kNone_MapBufferType:
            break;
        case GrGLCaps::kMapBuffer_MapBufferType: {
            GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
            // Let driver know it can discard the old data
            if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInBytes) {
                GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage));
            }
            GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
            break;
        }
        case GrGLCaps::kMapBufferRange_MapBufferType: {
            GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
            // Make sure the GL buffer size agrees with fDesc before mapping.
            if (fGLSizeInBytes != fSizeInBytes) {
                GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage));
            }
            GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
            if (kXferCpuToGpu_GrBufferType != fIntendedType) {
                // TODO: Make this a function parameter.
                writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
            }
            GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, fSizeInBytes,
                                                readOnly ?  GR_GL_MAP_READ_BIT : writeAccess));
            break;
        }
        case GrGLCaps::kChromium_MapBufferType: {
            GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
            // Make sure the GL buffer size agrees with fDesc before mapping.
            if (fGLSizeInBytes != fSizeInBytes) {
                GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage));
            }
            GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, fSizeInBytes,
                                                  readOnly ?  GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
            break;
        }
    }
    fGLSizeInBytes = fSizeInBytes;
    VALIDATE();
}
Пример #2
0
	void UGraphicsDevice::UpdateBuffer(BufferPtr inBuffer, const void* inData, size_t inDataSize)
	{
		assert(inData);

		void* buf = MapBuffer(inBuffer);
		memcpy(buf, inData, inDataSize);
		UnmapBuffer(inBuffer);
	}
Пример #3
0
bool IndexBuffer::SetDataRange(const void* data, unsigned start, unsigned count, bool discard)
{
    if (start == 0 && count == indexCount_)
        return SetData(data);

    if (!data)
    {
        LOGERROR("Null pointer for index buffer data");
        return false;
    }

    if (!indexSize_)
    {
        LOGERROR("Index size not defined, can not set index buffer data");
        return false;
    }

    if (start + count > indexCount_)
    {
        LOGERROR("Illegal range for setting new index buffer data");
        return false;
    }

    if (!count)
        return true;

    if (shadowData_ && shadowData_.Get() + start * indexSize_ != data)
        memcpy(shadowData_.Get() + start * indexSize_, data, count * indexSize_);

    if (object_)
    {
        if (dynamic_)
        {
            void* hwData = MapBuffer(start, count, discard);
            if (hwData)
            {
                memcpy(hwData, data, count * indexSize_);
                UnmapBuffer();
            }
            else
                return false;
        }
        else
        {
            D3D11_BOX destBox;
            destBox.left = start * indexSize_;
            destBox.right = destBox.left + count * indexSize_;
            destBox.top = 0;
            destBox.bottom = 1;
            destBox.front = 0;
            destBox.back = 1;

            graphics_->GetImpl()->GetDeviceContext()->UpdateSubresource((ID3D11Buffer*)object_, 0, &destBox, data, 0, 0);
        }
    }

    return true;
}
Пример #4
0
void GrGLBuffer::onMap() {
    SkASSERT(fBufferID);
    SkASSERT(!this->wasDestroyed());
    VALIDATE();
    SkASSERT(!this->isMapped());

    // TODO: Make this a function parameter.
    bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType);

    // Handling dirty context is done in the bindBuffer call
    switch (this->glCaps().mapBufferType()) {
        case GrGLCaps::kNone_MapBufferType:
            return;
        case GrGLCaps::kMapBuffer_MapBufferType: {
            GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
            if (!readOnly) {
                // Let driver know it can discard the old data
                if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
                    GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
                }
            }
            GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
            break;
        }
        case GrGLCaps::kMapBufferRange_MapBufferType: {
            GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
            // Make sure the GL buffer size agrees with fDesc before mapping.
            if (fGLSizeInBytes != this->size()) {
                GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
            }
            GrGLbitfield access;
            if (readOnly) {
                access = GR_GL_MAP_READ_BIT;
            } else {
                access = GR_GL_MAP_WRITE_BIT;
                if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
                    // TODO: Make this a function parameter.
                    access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
                }
            }
            GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
            break;
        }
        case GrGLCaps::kChromium_MapBufferType: {
            GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
            // Make sure the GL buffer size agrees with fDesc before mapping.
            if (fGLSizeInBytes != this->size()) {
                GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
            }
            GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
                                                  readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
            break;
        }
    }
    fGLSizeInBytes = this->size();
    VALIDATE();
}
Пример #5
0
void* GrGLBufferImpl::map(GrGpuGL* gpu) {
    VALIDATE();
    SkASSERT(!this->isMapped());
    if (0 == fDesc.fID) {
        fMapPtr = fCPUData;
    } else {
        switch (gpu->glCaps().mapBufferType()) {
            case GrGLCaps::kNone_MapBufferType:
                VALIDATE();
                return NULL;
            case GrGLCaps::kMapBuffer_MapBufferType:
                this->bind(gpu);
                // Let driver know it can discard the old data
                if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fDesc.fSizeInBytes != fGLSizeInBytes) {
                    fGLSizeInBytes = fDesc.fSizeInBytes;
                    GL_CALL(gpu,
                            BufferData(fBufferType, fGLSizeInBytes, NULL,
                                       fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
                }
                GR_GL_CALL_RET(gpu->glInterface(), fMapPtr,
                               MapBuffer(fBufferType, GR_GL_WRITE_ONLY));
                break;
            case GrGLCaps::kMapBufferRange_MapBufferType: {
                this->bind(gpu);
                // Make sure the GL buffer size agrees with fDesc before mapping.
                if (fDesc.fSizeInBytes != fGLSizeInBytes) {
                    fGLSizeInBytes = fDesc.fSizeInBytes;
                    GL_CALL(gpu,
                            BufferData(fBufferType, fGLSizeInBytes, NULL,
                                       fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
                }
                static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT |
                                                    GR_GL_MAP_WRITE_BIT;
                GR_GL_CALL_RET(gpu->glInterface(),
                               fMapPtr,
                               MapBufferRange(fBufferType, 0, fGLSizeInBytes, kAccess));
                break;
            }
            case GrGLCaps::kChromium_MapBufferType:
                this->bind(gpu);
                // Make sure the GL buffer size agrees with fDesc before mapping.
                if (fDesc.fSizeInBytes != fGLSizeInBytes) {
                    fGLSizeInBytes = fDesc.fSizeInBytes;
                    GL_CALL(gpu,
                            BufferData(fBufferType, fGLSizeInBytes, NULL,
                                       fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
                }
                GR_GL_CALL_RET(gpu->glInterface(),
                               fMapPtr,
                               MapBufferSubData(fBufferType, 0, fGLSizeInBytes, GR_GL_WRITE_ONLY));
                break;
        }
    }
    VALIDATE();
    return fMapPtr;
}
void* GrGLVertexBuffer::lock() {
    GrAssert(fBufferID);
    GrAssert(!isLocked());
    if (GPUGL->supportsBufferLocking()) {
        this->bind();
        // Let driver know it can discard the old data
        GR_GL(BufferData(GR_GL_ARRAY_BUFFER, size(), NULL,
                         dynamic() ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
        fLockPtr = GR_GL(MapBuffer(GR_GL_ARRAY_BUFFER, GR_GL_WRITE_ONLY));
        return fLockPtr;
    }
    return NULL;
}
Пример #7
0
bool VertexBuffer::SetDataRange(const void* data, unsigned start, unsigned count, bool discard)
{
    if (start == 0 && count == vertexCount_)
        return SetData(data);

    if (!data)
    {
        URHO3D_LOGERROR("Null pointer for vertex buffer data");
        return false;
    }

    if (!vertexSize_)
    {
        URHO3D_LOGERROR("Vertex elements not defined, can not set vertex buffer data");
        return false;
    }

    if (start + count > vertexCount_)
    {
        URHO3D_LOGERROR("Illegal range for setting new vertex buffer data");
        return false;
    }

    if (!count)
        return true;

    if (shadowData_ && shadowData_.Get() + start * vertexSize_ != data)
        memcpy(shadowData_.Get() + start * vertexSize_, data, count * vertexSize_);

    if (object_)
    {
        if (graphics_->IsDeviceLost())
        {
            URHO3D_LOGWARNING("Vertex buffer data assignment while device is lost");
            dataPending_ = true;
            return true;
        }

        void* hwData = MapBuffer(start, count, discard);
        if (hwData)
        {
            memcpy(hwData, data, count * vertexSize_);
            UnmapBuffer();
        }
        else
            return false;
    }

    return true;
}
Пример #8
0
void* GrGLVertexBuffer::lock() {
    GrAssert(fBufferID);
    GrAssert(!isLocked());
    if (this->getGpu()->getCaps().fBufferLockSupport) {
        this->bind();
        // Let driver know it can discard the old data
        GL_CALL(BufferData(GR_GL_ARRAY_BUFFER, this->sizeInBytes(), NULL,
                           this->dynamic() ? GR_GL_DYNAMIC_DRAW :
                                             GR_GL_STATIC_DRAW));
        GR_GL_CALL_RET(GPUGL->glInterface(),
                       fLockPtr,
                       MapBuffer(GR_GL_ARRAY_BUFFER, GR_GL_WRITE_ONLY));
        return fLockPtr;
    }
    return NULL;
}
Пример #9
0
GLubyte* PBO::MapBuffer(GLbitfield access)
{
	assert(!mapped);

	//! we don't use glMapBuffer, because glMapBufferRange seems to be a small
	//! step faster than it due to GL_MAP_UNSYNCHRONIZED_BIT
	/*mapped = true;
	if (PBOused) {
		return (GLubyte*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, access);
	} else {
		assert(data);
		return data;
	}*/

	return MapBuffer(0, size, access);
}
Пример #10
0
bool IndexBuffer::SetData(const void* data)
{
    if (!data)
    {
        LOGERROR("Null pointer for index buffer data");
        return false;
    }

    if (!indexSize_)
    {
        LOGERROR("Index size not defined, can not set index buffer data");
        return false;
    }

    if (shadowData_ && data != shadowData_.Get())
        memcpy(shadowData_.Get(), data, indexCount_ * indexSize_);

    if (object_)
    {
        if (dynamic_)
        {
            void* hwData = MapBuffer(0, indexCount_, true);
            if (hwData)
            {
                memcpy(hwData, data, indexCount_ * indexSize_);
                UnmapBuffer();
            }
            else
                return false;
        }
        else
        {
            D3D11_BOX destBox;
            destBox.left = 0;
            destBox.right = indexCount_ * indexSize_;
            destBox.top = 0;
            destBox.bottom = 1;
            destBox.front = 0;
            destBox.back = 1;

            graphics_->GetImpl()->GetDeviceContext()->UpdateSubresource((ID3D11Buffer*)object_, 0, &destBox, data, 0, 0);
        }
    }

    return true;
}
void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
    VALIDATE();
    SkASSERT(!this->isLocked());
    if (0 == fDesc.fID) {
        fLockPtr = fCPUData;
    } else if (gpu->caps()->bufferLockSupport()) {
        this->bind(gpu);
        // Let driver know it can discard the old data
        GL_CALL(gpu, BufferData(fBufferType,
                                fDesc.fSizeInBytes,
                                NULL,
                                fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
        GR_GL_CALL_RET(gpu->glInterface(),
                       fLockPtr,
                       MapBuffer(fBufferType, GR_GL_WRITE_ONLY));
    }
    return fLockPtr;
}
void* VertexBuffer::Lock(unsigned start, unsigned count, bool discard)
{
    if (lockState_ != LOCK_NONE)
    {
        ATOMIC_LOGERROR("Vertex buffer already locked");
        return 0;
    }

    if (!vertexSize_)
    {
        ATOMIC_LOGERROR("Vertex elements not defined, can not lock vertex buffer");
        return 0;
    }

    if (start + count > vertexCount_)
    {
        ATOMIC_LOGERROR("Illegal range for locking vertex buffer");
        return 0;
    }

    if (!count)
        return 0;

    lockStart_ = start;
    lockCount_ = count;

    // Because shadow data must be kept in sync, can only lock hardware buffer if not shadowed
    if (object_.ptr_ && !shadowData_ && !graphics_->IsDeviceLost())
        return MapBuffer(start, count, discard);
    else if (shadowData_)
    {
        lockState_ = LOCK_SHADOW;
        return shadowData_.Get() + start * vertexSize_;
    }
    else if (graphics_)
    {
        lockState_ = LOCK_SCRATCH;
        lockScratchData_ = graphics_->ReserveScratchBuffer(count * vertexSize_);
        return lockScratchData_;
    }
    else
        return 0;
}
Пример #13
0
gralloc1_error_t BufferManager::LockBuffer(const private_handle_t *hnd,
                                           gralloc1_producer_usage_t prod_usage,
                                           gralloc1_consumer_usage_t cons_usage) {
  std::lock_guard<std::mutex> lock(buffer_lock_);
  gralloc1_error_t err = GRALLOC1_ERROR_NONE;
  ALOGD_IF(DEBUG, "LockBuffer buffer handle:%p id: %" PRIu64, hnd, hnd->id);

  // If buffer is not meant for CPU return err
  if (!CpuCanAccess(prod_usage, cons_usage)) {
    return GRALLOC1_ERROR_BAD_VALUE;
  }

  if (hnd->base == 0) {
    // we need to map for real
    err = MapBuffer(hnd);
  }

  auto buf = GetBufferFromHandleLocked(hnd);
  if (buf == nullptr) {
    return GRALLOC1_ERROR_BAD_HANDLE;
  }

  // Invalidate if CPU reads in software and there are non-CPU
  // writers. No need to do this for the metadata buffer as it is
  // only read/written in software.

  // todo use handle here
  if (!err && (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION) &&
      (hnd->flags & private_handle_t::PRIV_FLAGS_CACHED)) {
    if (allocator_->CleanBuffer(reinterpret_cast<void *>(hnd->base), hnd->size, hnd->offset,
                                buf->ion_handle_main, CACHE_INVALIDATE)) {
      return GRALLOC1_ERROR_BAD_HANDLE;
    }
  }

  // Mark the buffer to be flushed after CPU write.
  if (!err && CpuCanWrite(prod_usage)) {
    private_handle_t *handle = const_cast<private_handle_t *>(hnd);
    handle->flags |= private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
  }

  return err;
}
Пример #14
0
void* IndexBuffer::Lock(unsigned start, unsigned count, bool discard)
{
    if (lockState_ != LOCK_NONE)
    {
        LOGERROR("Index buffer already locked");
        return 0;
    }

    if (!indexSize_)
    {
        LOGERROR("Index size not defined, can not lock index buffer");
        return 0;
    }

    if (start + count > indexCount_)
    {
        LOGERROR("Illegal range for locking index buffer");
        return 0;
    }

    if (!count)
        return 0;

    lockStart_ = start;
    lockCount_ = count;

    // Because shadow data must be kept in sync, can only lock hardware buffer if not shadowed
    if (object_ && !shadowData_ && dynamic_)
        return MapBuffer(start, count, discard);
    else if (shadowData_)
    {
        lockState_ = LOCK_SHADOW;
        return shadowData_.Get() + start * indexSize_;
    }
    else if (graphics_)
    {
        lockState_ = LOCK_SCRATCH;
        lockScratchData_ = graphics_->ReserveScratchBuffer(count * indexSize_);
        return lockScratchData_;
    }
    else
        return 0;
}
Пример #15
0
void OgreSimBuffer::SetOgreVertexBuffer(Ogre::HardwareVertexBufferSharedPtr ogreVertexBuffer)
{
    bool wasMapped = false;
    if(mOgreVertexBuffer != ogreVertexBuffer)
    {
        if(mMapped)
        {
            wasMapped = true;
            UnmapBuffer();
        }

    }
    mOgreVertexBuffer = ogreVertexBuffer;
    mAllocedSize = mOgreVertexBuffer->getSizeInBytes();
    mSize = mAllocedSize;

    if(wasMapped)
        MapBuffer();

}
Пример #16
0
bool IndexBuffer::SetData(const void* data)
{
    if (!data)
    {
        ATOMIC_LOGERROR("Null pointer for index buffer data");
        return false;
    }

    if (!indexSize_)
    {
        ATOMIC_LOGERROR("Index size not defined, can not set index buffer data");
        return false;
    }

    if (shadowData_ && data != shadowData_.Get())
        memcpy(shadowData_.Get(), data, indexCount_ * indexSize_);

    if (object_.ptr_)
    {
        if (graphics_->IsDeviceLost())
        {
            ATOMIC_LOGWARNING("Index buffer data assignment while device is lost");
            dataPending_ = true;
            return true;
        }

        void* hwData = MapBuffer(0, indexCount_, true);
        if (hwData)
        {
            memcpy(hwData, data, indexCount_ * indexSize_);
            UnmapBuffer();
        }
        else
            return false;
    }

    dataLost_ = false;
    return true;
}
Пример #17
0
bool VertexBuffer::SetData(const void* data)
{
    if (!data)
    {
        URHO3D_LOGERROR("Null pointer for vertex buffer data");
        return false;
    }

    if (!vertexSize_)
    {
        URHO3D_LOGERROR("Vertex elements not defined, can not set vertex buffer data");
        return false;
    }

    if (shadowData_ && data != shadowData_.Get())
        memcpy(shadowData_.Get(), data, vertexCount_ * vertexSize_);

    if (object_)
    {
        if (graphics_->IsDeviceLost())
        {
            URHO3D_LOGWARNING("Vertex buffer data assignment while device is lost");
            dataPending_ = true;
            return true;
        }

        void* hwData = MapBuffer(0, vertexCount_, true);
        if (hwData)
        {
            memcpy(hwData, data, vertexCount_ * vertexSize_);
            UnmapBuffer();
        }
        else
            return false;
    }

    dataLost_ = false;
    return true;
}
Пример #18
0
/*******************************************************************************
 *
 * Buffer functions.
 *
 ******************************************************************************/
int
rb_create_buffer
  (struct rb_context* ctxt,
   const struct rb_buffer_desc* public_desc,
   const void* init_data,
   struct rb_buffer** out_buffer)
{
  const struct rb_ogl3_buffer_desc private_desc = {
    .size = public_desc->size,
    .target = public_to_private_rb_target(public_desc->target),
    .usage = public_desc->usage
  };
  return rb_ogl3_create_buffer(ctxt, &private_desc,init_data, out_buffer);
}

int
rb_buffer_ref_get(struct rb_buffer* buffer)
{
  if(!buffer)
    return -1;
  ref_get(&buffer->ref);
  return 0;
}

int
rb_buffer_ref_put(struct rb_buffer* buffer)
{
  if(!buffer)
    return -1;
  ref_put(&buffer->ref, release_buffer);
  return 0;
}

int
rb_bind_buffer
  (struct rb_context* ctxt,
   struct rb_buffer* buffer, 
   enum rb_buffer_target target)
{
  return rb_ogl3_bind_buffer(ctxt, buffer, public_to_private_rb_target(target));
}

int
rb_buffer_data
  (struct rb_buffer* buffer,
   int offset,
   int size,
   const void* data)
{
  void* mapped_mem = NULL;
  GLboolean unmap = GL_FALSE;

  if(!buffer
  || (offset < 0)
  || (size < 0)
  || (size != 0 && !data)
  || (buffer->size < offset + size))
    return -1;

  if(size == 0)
    return 0;

  OGL(BindBuffer(buffer->target, buffer->name));

  if(offset == 0 && size == buffer->size) {
    mapped_mem = OGL(MapBuffer(buffer->target, GL_WRITE_ONLY));
  } else {
    const GLbitfield access = GL_MAP_WRITE_BIT;
    mapped_mem = OGL(MapBufferRange(buffer->target, offset, size, access));
  }
  ASSERT(mapped_mem != NULL);
  memcpy(mapped_mem, data, (size_t)size);
  unmap = OGL(UnmapBuffer(buffer->target));
  OGL(BindBuffer
    (buffer->target, 
     buffer->ctxt->state_cache.buffer_binding[buffer->binding]));

  /* unmap == GL_FALSE must be handled by the application. TODO return a real
   * error code to differentiate this case from the error. */
  return unmap == GL_TRUE ? 0 : -1;
}
Пример #19
0
GLubyte* VBO::MapBuffer(GLbitfield access)
{
	assert(!mapped);
	return MapBuffer(0, size, access);
}
Пример #20
0
	void* D3D11GeometryBuffer::MapIndexBuffer(MAP_HINT hint)
	{
		return MapBuffer(m_pIndexBuffer, hint);
	}
Пример #21
0
	void* D3D11RenderData::MapVertexBuffer(MAP_HINT hint)
	{
		return MapBuffer(m_pVertexBuffer, hint);
	}