void GrBufferAllocPool::reset() { VALIDATE(); fBytesInUse = 0; if (fBlocks.count()) { GrGeometryBuffer* buffer = fBlocks.back().fBuffer; if (buffer->isMapped()) { UNMAP_BUFFER(fBlocks.back()); } } // fPreallocBuffersInUse will be decremented down to zero in the while loop int preallocBuffersInUse = fPreallocBuffersInUse; while (!fBlocks.empty()) { this->destroyBlock(); } if (fPreallocBuffers.count()) { // must set this after above loop. fPreallocBufferStartIdx = (fPreallocBufferStartIdx + preallocBuffersInUse) % fPreallocBuffers.count(); } // we may have created a large cpu mirror of a large VB. Reset the size // to match our pre-allocated VBs. fCpuData.reset(fMinBlockSize); SkASSERT(0 == fPreallocBuffersInUse); VALIDATE(); }
GrBufferAllocPool::~GrBufferAllocPool() { VALIDATE(); if (fBlocks.count()) { GrGeometryBuffer* buffer = fBlocks.back().fBuffer; if (buffer->isMapped()) { UNMAP_BUFFER(fBlocks.back()); } } while (!fBlocks.empty()) { this->destroyBlock(); } fPreallocBuffers.unrefAll(); fGpu->unref(); }
void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { GrGeometryBuffer* buffer = block.fBuffer; SkASSERT(buffer); SkASSERT(!buffer->isMapped()); SkASSERT(fCpuData.get() == fBufferPtr); SkASSERT(flushSize <= buffer->gpuMemorySize()); VALIDATE(true); if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && flushSize > fGeometryBufferMapThreshold) { void* data = buffer->map(); if (data) { memcpy(data, fBufferPtr, flushSize); UNMAP_BUFFER(block); return; } } buffer->updateData(fBufferPtr, flushSize); VALIDATE(true); }