void Image::discard() { assertValid(); setEmpty(); m_State = CPU; assertValid(); }
void Image::setFilename(const std::string& sFilename, TextureCompression comp) { assertValid(); AVG_TRACE(Logger::category::MEMORY, Logger::severity::INFO, "Loading " << sFilename); BitmapPtr pBmp = loadBitmap(sFilename); if (comp == TEXTURECOMPRESSION_B5G6R5 && pBmp->hasAlpha()) { throw Exception(AVG_ERR_UNSUPPORTED, "B5G6R5-compressed textures with an alpha channel are not supported."); } changeSource(FILE); m_pBmp = pBmp; m_sFilename = sFilename; switch (comp) { case TEXTURECOMPRESSION_B5G6R5: m_pBmp = BitmapPtr(new Bitmap(pBmp->getSize(), B5G6R5, sFilename)); if (!BitmapLoader::get()->isBlueFirst()) { FilterFlipRGB().applyInPlace(pBmp); } m_pBmp->copyPixels(*pBmp); break; case TEXTURECOMPRESSION_NONE: break; default: assert(false); } if (m_State == GPU) { m_pSurface->destroy(); setupSurface(); } assertValid(); }
void GPUImage::setBitmap(BitmapPtr pBmp, TexCompression comp) { assertValid(); if (!pBmp) { throw Exception(AVG_ERR_UNSUPPORTED, "setBitmap(): bitmap must not be None!"); } if (comp == TEXCOMPRESSION_B5G6R5 && pBmp->hasAlpha()) { throw Exception(AVG_ERR_UNSUPPORTED, "B5G6R5-compressed textures with an alpha channel are not supported."); } unload(); changeSource(BITMAP); m_pBmp = BitmapPtr(new Bitmap(pBmp->getSize(), pBmp->getPixelFormat(), "")); m_pBmp->copyPixels(*pBmp); if (comp == TEXCOMPRESSION_B5G6R5) { BitmapPtr pDestBmp = BitmapPtr(new Bitmap(pBmp->getSize(), B5G6R5, "")); if (!BitmapLoader::get()->isBlueFirst()) { FilterFlipRGB().applyInPlace(m_pBmp); } pDestBmp->copyPixels(*m_pBmp); m_pBmp = pDestBmp; } if (m_State == GPU) { setupBitmapSurface(); } assertValid(); }
void GPUImage::setEmpty() { assertValid(); unload(); changeSource(NONE); assertValid(); }
void ImageCache::checkGPUUnload() { if (m_GPUCacheUsed > m_GPUCacheCapacity) { LRUListType::reverse_iterator it = m_pLRUList.rbegin(); // Find first item that actually has a texture loaded. while (it != m_pLRUList.rend() && !((*it)->hasTex())) { it++; } if (it != m_pLRUList.rend()) { while (m_GPUCacheUsed > m_GPUCacheCapacity) { assertValid(); CachedImagePtr pImg = *it; if (pImg->getRefCount(CachedImage::STORAGE_GPU) == 0) { if (pImg->hasTex()) { m_GPUCacheUsed -= pImg->getMemUsed(CachedImage::STORAGE_GPU); pImg->unloadTex(); } ++it; } else { // Cache full, but everything's in use. break; } } } } assertValid(); }
void Image::moveToCPU() { assertValid(); if (m_State == GPU) { m_State = CPU; m_pSurface->destroy(); } assertValid(); }
void Image::setEmpty() { assertValid(); if (m_State == GPU) { m_pSurface->destroy(); } changeSource(NONE); assertValid(); }
void GPUImage::moveToCPU() { assertValid(); if (m_State == GPU) { m_State = CPU; m_pSurface->destroy(); if (m_pImage) { m_pImage->decTexRef(); } } assertValid(); }
void Image::setCanvas(OffscreenCanvasPtr pCanvas) { assertValid(); if (m_Source == SCENE && pCanvas == m_pCanvas) { return; } changeSource(SCENE); m_pCanvas = pCanvas; if (m_State == GPU) { m_pSurface->create(B8G8R8X8, m_pCanvas->getTex(), MCTexturePtr(), MCTexturePtr(), MCTexturePtr(), true); } assertValid(); }
void ImageCache::onSizeChange(int sizeDiff, CachedImage::StorageType st) { if (st == CachedImage::STORAGE_CPU) { m_CPUCacheUsed += sizeDiff; } else { m_GPUCacheUsed += sizeDiff; } assertValid(); }
GPUImage::GPUImage(OGLSurface * pSurface, bool bUseMipmaps) : m_sFilename(""), m_pSurface(pSurface), m_State(CPU), m_Source(NONE), m_bUseMipmaps(bUseMipmaps) { ObjectCounter::get()->incRef(&typeid(*this)); assertValid(); }
Image::Image(OGLSurface * pSurface, const MaterialInfo& material) : m_sFilename(""), m_pSurface(pSurface), m_State(CPU), m_Source(NONE), m_Material(material) { ObjectCounter::get()->incRef(&typeid(*this)); assertValid(); }
void Image::setBitmap(BitmapPtr pBmp, TextureCompression comp) { assertValid(); if (!pBmp) { throw Exception(AVG_ERR_UNSUPPORTED, "setBitmap(): bitmap must not be None!"); } if (comp == TEXTURECOMPRESSION_B5G6R5 && pBmp->hasAlpha()) { throw Exception(AVG_ERR_UNSUPPORTED, "B5G6R5-compressed textures with an alpha channel are not supported."); } bool bSourceChanged = changeSource(BITMAP); PixelFormat pf; switch (comp) { case TEXTURECOMPRESSION_NONE: pf = pBmp->getPixelFormat(); break; case TEXTURECOMPRESSION_B5G6R5: pf = B5G6R5; if (!BitmapLoader::get()->isBlueFirst()) { FilterFlipRGB().applyInPlace(pBmp); } break; default: assert(false); } m_pBmp = BitmapPtr(new Bitmap(pBmp->getSize(), pf, "")); m_pBmp->copyPixels(*pBmp); if (m_State == GPU) { MCTexturePtr pTex = m_pSurface->getTex(); if (bSourceChanged || m_pSurface->getSize() != m_pBmp->getSize() || m_pSurface->getPixelFormat() != pf) { pTex = GLContextManager::get()->createTexture(m_pBmp->getSize(), pf, m_Material.getUseMipmaps(), m_Material.getWrapSMode(), m_Material.getWrapTMode()); m_pSurface->create(pf, pTex); } GLContextManager::get()->scheduleTexUpload(pTex, m_pBmp); } assertValid(); }
// Documented in header. zvalue symtabFromZassoc(zassoc ass) { if (DAT_CONSTRUCTION_PARANOIA) { for (zint i = 0; i < ass.size; i++) { assertValid(ass.elems[i].key); assertValid(ass.elems[i].value); } } if (ass.size == 0) { return EMPTY_SYMBOL_TABLE; } zvalue result = allocInstance(ass.size); SymbolTableInfo *info = getInfo(result); for (zint i = 0; i < ass.size; i++) { putInto(&result, &info, ass.elems[i]); } return result; }
void Image::moveToGPU() { assertValid(); if (m_State == CPU) { switch (m_Source) { case FILE: case BITMAP: setupSurface(); break; case SCENE: m_pSurface->create(B8G8R8X8, m_pCanvas->getTex(), MCTexturePtr(), MCTexturePtr(), MCTexturePtr(), true); break; case NONE: break; default: AVG_ASSERT(false); } m_State = GPU; } assertValid(); }
void GPUImage::setFilename(const std::string& sFilename, TexCompression comp) { assertValid(); CachedImagePtr pImage = ImageCache::get()->getImage(sFilename, comp); BitmapPtr pBmp = pImage->getBmp(); if (comp == TEXCOMPRESSION_B5G6R5 && pBmp->hasAlpha()) { pImage->decBmpRef(); throw Exception(AVG_ERR_UNSUPPORTED, "B5G6R5-compressed textures with an alpha channel are not supported."); } unload(); m_pImage = pImage; m_pBmp = m_pImage->getBmp(); changeSource(FILE); m_sFilename = sFilename; if (m_State == GPU) { m_pSurface->destroy(); setupImageSurface(); } assertValid(); }
void ImageCache::checkCPUUnload() { while (m_CPUCacheUsed > m_CPUCacheCapacity) { CachedImagePtr pImg = *(m_pLRUList.rbegin()); if (pImg->getRefCount(CachedImage::STORAGE_CPU) == 0) { m_pImageMap.erase(pImg->getFilename()); m_pLRUList.pop_back(); m_CPUCacheUsed -= pImg->getMemUsed(CachedImage::STORAGE_CPU); m_GPUCacheUsed -= pImg->getMemUsed(CachedImage::STORAGE_GPU); } else { // Cache full, but everything's in use. break; } } assertValid(); checkGPUUnload(); }
int BucketBasics::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, int *unusedCount) { { bool f = false; assert( f = true ); massert( 10281 , "assert is misdefined", f); } killCurrentOp.checkForInterrupt(); assertValid(order, true); // if( bt_fv==0 ) // return; if ( bt_dmp ) { out() << thisLoc.toString() << ' '; ((BtreeBucket *) this)->dump(); } // keycount int kc = 0; for ( int i = 0; i < n; i++ ) { _KeyNode& kn = k(i); if ( kn.isUsed() ) { kc++; } else { if ( unusedCount ) { ++( *unusedCount ); } } if ( !kn.prevChildBucket.isNull() ) { DiskLoc left = kn.prevChildBucket; BtreeBucket *b = left.btree(); wassert( b->parent == thisLoc ); kc += b->fullValidate(kn.prevChildBucket, order, unusedCount); } } if ( !nextChild.isNull() ) { BtreeBucket *b = nextChild.btree(); wassert( b->parent == thisLoc ); kc += b->fullValidate(nextChild, order, unusedCount); } return kc; }
CachedImagePtr ImageCache::getImage(const std::string& sFilename, TexCompression compression) { ImageMap::iterator it = m_pImageMap.find(sFilename); CachedImagePtr pImg; if (it == m_pImageMap.end()) { pImg = CachedImagePtr(new CachedImage(sFilename, compression)); m_pLRUList.push_front(pImg); m_pImageMap.insert(make_pair(sFilename, m_pLRUList.begin())); m_CPUCacheUsed += pImg->getMemUsed(CachedImage::STORAGE_CPU); checkCPUUnload(); } else { pImg = *(it->second); pImg->incBmpRef(compression); // Move item to front of list m_pLRUList.splice(m_pLRUList.begin(), m_pLRUList, it->second); } assertValid(); return pImg; }
// Documented in header. zvalue symtabFromZarray(zarray arr) { if (DAT_CONSTRUCTION_PARANOIA) { for (zint i = 0; i < arr.size; i++) { assertValid(arr.elems[i]); } } if (arr.size == 0) { return EMPTY_SYMBOL_TABLE; } else if ((arr.size & 1) != 0) { die("Odd argument count for symbol table construction."); } zvalue result = allocInstance(arr.size >> 1); SymbolTableInfo *info = getInfo(result); for (zint i = 0; i < arr.size; i += 2) { putInto(&result, &info, (zmapping) {arr.elems[i], arr.elems[i + 1]}); } return result; }
void ChunkRangeManager::reloadAll(const ChunkMap& chunks) { _ranges.clear(); _insertRange(chunks.begin(), chunks.end()); DEV assertValid(); }