void GrGLBuffer::onMap() { if (this->wasDestroyed()) { return; } VALIDATE(); SkASSERT(!this->isMapped()); if (0 == fBufferID) { fMapPtr = fCPUData; VALIDATE(); return; } // TODO: Make this a function parameter. bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType); // Handling dirty context is done in the bindBuffer call switch (this->glCaps().mapBufferType()) { case GrGLCaps::kNone_MapBufferType: break; case GrGLCaps::kMapBuffer_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); // Let driver know it can discard the old data if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInBytes) { GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); } GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); break; } case GrGLCaps::kMapBufferRange_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); // Make sure the GL buffer size agrees with fDesc before mapping. if (fGLSizeInBytes != fSizeInBytes) { GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); } GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; if (kXferCpuToGpu_GrBufferType != fIntendedType) { // TODO: Make this a function parameter. writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; } GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, fSizeInBytes, readOnly ? GR_GL_MAP_READ_BIT : writeAccess)); break; } case GrGLCaps::kChromium_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); // Make sure the GL buffer size agrees with fDesc before mapping. if (fGLSizeInBytes != fSizeInBytes) { GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); } GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, fSizeInBytes, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); break; } } fGLSizeInBytes = fSizeInBytes; VALIDATE(); }
void GrGLBuffer::onMap() { SkASSERT(fBufferID); SkASSERT(!this->wasDestroyed()); VALIDATE(); SkASSERT(!this->isMapped()); // TODO: Make this a function parameter. bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType); // Handling dirty context is done in the bindBuffer call switch (this->glCaps().mapBufferType()) { case GrGLCaps::kNone_MapBufferType: return; case GrGLCaps::kMapBuffer_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); if (!readOnly) { // Let driver know it can discard the old data if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) { GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); } } GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); break; } case GrGLCaps::kMapBufferRange_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); // Make sure the GL buffer size agrees with fDesc before mapping. if (fGLSizeInBytes != this->size()) { GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); } GrGLbitfield access; if (readOnly) { access = GR_GL_MAP_READ_BIT; } else { access = GR_GL_MAP_WRITE_BIT; if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) { // TODO: Make this a function parameter. access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; } } GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access)); break; } case GrGLCaps::kChromium_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); // Make sure the GL buffer size agrees with fDesc before mapping. if (fGLSizeInBytes != this->size()) { GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); } GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(), readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); break; } } fGLSizeInBytes = this->size(); VALIDATE(); }
void* GrGLBufferImpl::map(GrGpuGL* gpu) { VALIDATE(); SkASSERT(!this->isMapped()); if (0 == fDesc.fID) { fMapPtr = fCPUData; } else { switch (gpu->glCaps().mapBufferType()) { case GrGLCaps::kNone_MapBufferType: VALIDATE(); return NULL; case GrGLCaps::kMapBuffer_MapBufferType: this->bind(gpu); // Let driver know it can discard the old data if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fDesc.fSizeInBytes != fGLSizeInBytes) { fGLSizeInBytes = fDesc.fSizeInBytes; GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW)); } GR_GL_CALL_RET(gpu->glInterface(), fMapPtr, MapBuffer(fBufferType, GR_GL_WRITE_ONLY)); break; case GrGLCaps::kMapBufferRange_MapBufferType: { this->bind(gpu); // Make sure the GL buffer size agrees with fDesc before mapping. if (fDesc.fSizeInBytes != fGLSizeInBytes) { fGLSizeInBytes = fDesc.fSizeInBytes; GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW)); } static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT | GR_GL_MAP_WRITE_BIT; GR_GL_CALL_RET(gpu->glInterface(), fMapPtr, MapBufferRange(fBufferType, 0, fGLSizeInBytes, kAccess)); break; } case GrGLCaps::kChromium_MapBufferType: this->bind(gpu); // Make sure the GL buffer size agrees with fDesc before mapping. if (fDesc.fSizeInBytes != fGLSizeInBytes) { fGLSizeInBytes = fDesc.fSizeInBytes; GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW)); } GR_GL_CALL_RET(gpu->glInterface(), fMapPtr, MapBufferSubData(fBufferType, 0, fGLSizeInBytes, GR_GL_WRITE_ONLY)); break; } } VALIDATE(); return fMapPtr; }
/******************************************************************************* * * Buffer functions. * ******************************************************************************/ int rb_create_buffer (struct rb_context* ctxt, const struct rb_buffer_desc* public_desc, const void* init_data, struct rb_buffer** out_buffer) { const struct rb_ogl3_buffer_desc private_desc = { .size = public_desc->size, .target = public_to_private_rb_target(public_desc->target), .usage = public_desc->usage }; return rb_ogl3_create_buffer(ctxt, &private_desc,init_data, out_buffer); } int rb_buffer_ref_get(struct rb_buffer* buffer) { if(!buffer) return -1; ref_get(&buffer->ref); return 0; } int rb_buffer_ref_put(struct rb_buffer* buffer) { if(!buffer) return -1; ref_put(&buffer->ref, release_buffer); return 0; } int rb_bind_buffer (struct rb_context* ctxt, struct rb_buffer* buffer, enum rb_buffer_target target) { return rb_ogl3_bind_buffer(ctxt, buffer, public_to_private_rb_target(target)); } int rb_buffer_data (struct rb_buffer* buffer, int offset, int size, const void* data) { void* mapped_mem = NULL; GLboolean unmap = GL_FALSE; if(!buffer || (offset < 0) || (size < 0) || (size != 0 && !data) || (buffer->size < offset + size)) return -1; if(size == 0) return 0; OGL(BindBuffer(buffer->target, buffer->name)); if(offset == 0 && size == buffer->size) { mapped_mem = OGL(MapBuffer(buffer->target, GL_WRITE_ONLY)); } else { const GLbitfield access = GL_MAP_WRITE_BIT; mapped_mem = OGL(MapBufferRange(buffer->target, offset, size, access)); } ASSERT(mapped_mem != NULL); memcpy(mapped_mem, data, (size_t)size); unmap = OGL(UnmapBuffer(buffer->target)); OGL(BindBuffer (buffer->target, buffer->ctxt->state_cache.buffer_binding[buffer->binding])); /* unmap == GL_FALSE must be handled by the application. TODO return a real * error code to differentiate this case from the error. */ return unmap == GL_TRUE ? 0 : -1; }