bool addSubImage(int width, int height, const void* image, SkIPoint16* loc, size_t rowBytes) { if (!fRects->addRect(width, height, loc)) { return false; } if (!fData) { fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth * fHeight)); } const unsigned char* imagePtr = (const unsigned char*)image; // point ourselves at the right starting spot unsigned char* dataPtr = fData; dataPtr += fBytesPerPixel * fWidth * loc->fY; dataPtr += fBytesPerPixel * loc->fX; // copy into the data buffer for (int i = 0; i < height; ++i) { memcpy(dataPtr, imagePtr, rowBytes); dataPtr += fBytesPerPixel * fWidth; imagePtr += rowBytes; } fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); adjust_for_offset(loc, fOffset); SkDEBUGCODE(fDirty = true;) return true;
GrGLBufferImpl::GrGLBufferImpl(GrGLGpu* gpu, const Desc& desc, GrGLenum bufferType) : fDesc(desc) , fBufferType(bufferType) , fMapPtr(nullptr) { if (0 == desc.fID) { if (gpu->caps()->mustClearUploadedBufferData()) { fCPUData = sk_calloc_throw(desc.fSizeInBytes); } else { fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW); } fGLSizeInBytes = 0; } else { fCPUData = nullptr; // We assume that the GL buffer was created at the desc's size initially. fGLSizeInBytes = fDesc.fSizeInBytes; } VALIDATE(); }
GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType, GrAccessPattern accessPattern, bool cpuBacked, const void* data) : INHERITED(gpu, size, intendedType, accessPattern, cpuBacked), fCPUData(nullptr), fIntendedType(intendedType), fBufferID(0), fSizeInBytes(size), fUsage(gr_to_gl_access_pattern(intendedType, accessPattern)), fGLSizeInBytes(0), fHasAttachedToTexture(false) { if (this->isCPUBacked()) { // Core profile uses vertex array objects, which disallow client side arrays. SkASSERT(!gpu->glCaps().isCoreProfile()); if (gpu->caps()->mustClearUploadedBufferData()) { fCPUData = sk_calloc_throw(fSizeInBytes); } else { fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW); } if (data) { memcpy(fCPUData, data, fSizeInBytes); } } else { GL_CALL(GenBuffers(1, &fBufferID)); if (fBufferID) { GrGLenum target = gpu->bindBuffer(fIntendedType, this); CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); // make sure driver can allocate memory for this buffer GL_ALLOC_CALL(gpu->glInterface(), BufferData(target, (GrGLsizeiptr) fSizeInBytes, data, fUsage)); if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { GL_CALL(DeleteBuffers(1, &fBufferID)); fBufferID = 0; } else { fGLSizeInBytes = fSizeInBytes; } } } VALIDATE(); this->registerWithCache(SkBudgeted::kYes); }
bool GrBatchAtlas::BatchPlot::addSubImage(int width, int height, const void* image, SkIPoint16* loc) { SkASSERT(width <= fWidth && height <= fHeight); if (!fRects) { fRects = GrRectanizer::Factory(fWidth, fHeight); } if (!fRects->addRect(width, height, loc)) { return false; } if (!fData) { fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth * fHeight)); } size_t rowBytes = width * fBytesPerPixel; const unsigned char* imagePtr = (const unsigned char*)image; // point ourselves at the right starting spot unsigned char* dataPtr = fData; dataPtr += fBytesPerPixel * fWidth * loc->fY; dataPtr += fBytesPerPixel * loc->fX; // copy into the data buffer for (int i = 0; i < height; ++i) { memcpy(dataPtr, imagePtr, rowBytes); dataPtr += fBytesPerPixel * fWidth; imagePtr += rowBytes; } fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); loc->fX += fOffset.fX; loc->fY += fOffset.fY; SkDEBUGCODE(fDirty = true;) return true;
void* sk_calloc_throw(size_t count, size_t elemSize) { return sk_calloc_throw(SkSafeMath::Mul(count, elemSize)); }