// CREATORS Blob::Blob(bslma::Allocator *basicAllocator) : d_buffers(basicAllocator) , d_totalSize(0) , d_dataLength(0) , d_dataIndex(0) , d_preDataIndexLength(0) , d_bufferFactory_p(InvalidBlobBufferFactory::factory(0)) { BSLS_ASSERT_SAFE(0 == assertInvariants()); }
Blob::Blob(const Blob& original, bslma::Allocator *basicAllocator) : d_buffers(original.d_buffers, basicAllocator) , d_totalSize(original.d_totalSize) , d_dataLength(original.d_dataLength) , d_dataIndex(original.d_dataIndex) , d_preDataIndexLength(original.d_preDataIndexLength) , d_bufferFactory_p(InvalidBlobBufferFactory::factory(0)) { BSLS_ASSERT_SAFE(0 == assertInvariants()); }
Blob::Blob(const BlobBuffer *buffers, int numBuffers, BlobBufferFactory *factory, bslma::Allocator *basicAllocator) : d_buffers(buffers, buffers + numBuffers, basicAllocator) , d_totalSize(0) , d_dataLength(0) , d_dataIndex(0) , d_preDataIndexLength(0) , d_bufferFactory_p(InvalidBlobBufferFactory::factory(factory)) { for (BlobBufferConstIterator it = d_buffers.begin(); it != d_buffers.end(); ++it) { BSLS_ASSERT(0 <= it->size()); d_totalSize += it->size(); } BSLS_ASSERT_SAFE(0 == assertInvariants()); }
void CCPrioritizedTextureManager::prioritizeTextures() { TRACE_EVENT0("cc", "CCPrioritizedTextureManager::prioritizeTextures"); #if !ASSERT_DISABLED assertInvariants(); #endif // Sorting textures in this function could be replaced by a slightly // modified O(n) quick-select to partition textures rather than // sort them (if performance of the sort becomes an issue). TextureVector& sortedTextures = m_tempTextureVector; BackingVector& sortedBackings = m_tempBackingVector; sortedTextures.clear(); sortedBackings.clear(); // Copy all textures into a vector and sort them. for (TextureSet::iterator it = m_textures.begin(); it != m_textures.end(); ++it) sortedTextures.append(*it); std::sort(sortedTextures.begin(), sortedTextures.end(), compareTextures); m_memoryAvailableBytes = m_maxMemoryLimitBytes; m_priorityCutoff = CCPriorityCalculator::lowestPriority(); size_t memoryBytes = 0; for (TextureVector::iterator it = sortedTextures.begin(); it != sortedTextures.end(); ++it) { if ((*it)->requestPriority() == CCPriorityCalculator::lowestPriority()) break; if ((*it)->isSelfManaged()) { // Account for self-managed memory immediately by reducing the memory // available (since it never gets acquired). size_t newMemoryBytes = memoryBytes + (*it)->bytes(); if (newMemoryBytes > m_memoryAvailableBytes) { m_priorityCutoff = (*it)->requestPriority(); m_memoryAvailableBytes = memoryBytes; break; } m_memoryAvailableBytes -= (*it)->bytes(); } else { size_t newMemoryBytes = memoryBytes + (*it)->bytes(); if (newMemoryBytes > m_memoryAvailableBytes) { m_priorityCutoff = (*it)->requestPriority(); break; } memoryBytes = newMemoryBytes; } } // Only allow textures if they are higher than the cutoff. All textures // of the same priority are accepted or rejected together, rather than // being partially allowed randomly. m_memoryAboveCutoffBytes = 0; for (TextureVector::iterator it = sortedTextures.begin(); it != sortedTextures.end(); ++it) { bool isAbovePriorityCutoff = CCPriorityCalculator::priorityIsHigher((*it)->requestPriority(), m_priorityCutoff); (*it)->setAbovePriorityCutoff(isAbovePriorityCutoff); if (isAbovePriorityCutoff && !(*it)->isSelfManaged()) m_memoryAboveCutoffBytes += (*it)->bytes(); } ASSERT(m_memoryAboveCutoffBytes <= m_memoryAvailableBytes); // Put backings in eviction/recycling order. for (BackingSet::iterator it = m_backings.begin(); it != m_backings.end(); ++it) sortedBackings.append(*it); std::sort(sortedBackings.begin(), sortedBackings.end(), compareBackings); for (BackingVector::iterator it = sortedBackings.begin(); it != sortedBackings.end(); ++it) { m_backings.remove(*it); m_backings.add(*it); } sortedTextures.clear(); sortedBackings.clear(); #if !ASSERT_DISABLED assertInvariants(); ASSERT(memoryAboveCutoffBytes() <= maxMemoryLimitBytes()); #endif }
Blob::~Blob() { BSLS_ASSERT_SAFE(0 == assertInvariants()); }