//--------------------------------------------------------------------------------------------- void GLTexture::_createSurfaceList() { mSurfaceList.clear(); // For all faces and mipmaps, store surfaces as HardwarePixelBufferSharedPtr bool wantGeneratedMips = (mUsage & TU_AUTOMIPMAP)!=0; // Do mipmapping in software? (uses GLU) For some cards, this is still needed. Of course, // only when mipmap generation is desired. bool doSoftware = wantGeneratedMips && !mMipmapsHardwareGenerated && getNumMipmaps(); for(size_t face=0; face<getNumFaces(); face++) { for(size_t mip=0; mip<=getNumMipmaps(); mip++) { GLHardwarePixelBuffer *buf = new GLTextureBuffer(mName, getGLTextureTarget(), mTextureID, face, mip, static_cast<HardwareBuffer::Usage>(mUsage), doSoftware && mip==0, mHwGamma, mFSAA); mSurfaceList.push_back(HardwarePixelBufferSharedPtr(buf)); /// Check for error if(buf->getWidth()==0 || buf->getHeight()==0 || buf->getDepth()==0) { OGRE_EXCEPT( Exception::ERR_RENDERINGAPI_ERROR, "Zero sized texture surface on texture "+getName()+ " face "+StringConverter::toString(face)+ " mipmap "+StringConverter::toString(mip)+ ". Probably, the GL driver refused to create the texture.", "GLTexture::_createSurfaceList"); } } } }
void GLTexture::loadImpl() { if( mUsage & TU_RENDERTARGET ) { createRenderTexture(); return; } // Now the only copy is on the stack and will be cleaned in case of // exceptions being thrown from _loadImages LoadedImages loadedImages = mLoadedImages; mLoadedImages.setNull(); // Call internal _loadImages, not loadImage since that's external and // will determine load status etc again ConstImagePtrList imagePtrs; for (size_t i=0 ; i<loadedImages->size() ; ++i) { imagePtrs.push_back(&(*loadedImages)[i]); } _loadImages(imagePtrs); // Generate mipmaps after all texture levels have been loaded // This is required for compressed formats such as DXT // If we can do automip generation and the user desires this, do so if((mUsage & TU_AUTOMIPMAP) && mNumRequestedMipmaps && mMipmapsHardwareGenerated) { glGenerateMipmapEXT(getGLTextureTarget()); } }
void Image::copyUnProcessedChannels(const RectI& roi, const ImagePremultiplicationEnum outputPremult, const ImagePremultiplicationEnum originalImagePremult, const std::bitset<4> processChannels, const ImagePtr& originalImage, bool ignorePremult, const OSGLContextPtr& glContext) { int numComp = getComponents().getNumComponents(); if (numComp == 0) { return; } if ( (numComp == 1) && processChannels[3] ) { // 1 component is alpha return; } else if ( (numComp == 2) && processChannels[0] && processChannels[1] ) { return; } else if ( (numComp == 3) && processChannels[0] && processChannels[1] && processChannels[2] ) { return; } else if ( (numComp == 4) && processChannels[0] && processChannels[1] && processChannels[2] && processChannels[3] ) { return; } if ( originalImage && ( getMipMapLevel() != originalImage->getMipMapLevel() ) ) { qDebug() << "WARNING: attempting to call copyUnProcessedChannels on images with different mipMapLevel"; return; } QWriteLocker k(&_entryLock); assert( !originalImage || getBitDepth() == originalImage->getBitDepth() ); RectI srcRoi; roi.intersect(_bounds, &srcRoi); if (getStorageMode() == eStorageModeGLTex) { assert(glContext); if (glContext->isGPUContext()) { copyUnProcessedChannelsGL<GL_GPU>(roi, outputPremult, originalImagePremult, processChannels, originalImage, ignorePremult, glContext, _bounds, srcRoi, getGLTextureTarget(), getGLTextureID(), originalImage->getGLTextureID()); } else { copyUnProcessedChannelsGL<GL_CPU>(roi, outputPremult, originalImagePremult, processChannels, originalImage, ignorePremult, glContext, _bounds, srcRoi, getGLTextureTarget(), getGLTextureID(), originalImage->getGLTextureID()); } return; } bool premult = (outputPremult == eImagePremultiplicationPremultiplied); bool originalPremult = (originalImagePremult == eImagePremultiplicationPremultiplied); switch ( getBitDepth() ) { case eImageBitDepthByte: copyUnProcessedChannelsForDepth<unsigned char, 255>(premult, roi, processChannels, originalImage, originalPremult, ignorePremult); break; case eImageBitDepthShort: copyUnProcessedChannelsForDepth<unsigned short, 65535>(premult, roi, processChannels, originalImage, originalPremult, ignorePremult); break; case eImageBitDepthFloat: copyUnProcessedChannelsForDepth<float, 1>(premult, roi, processChannels, originalImage, originalPremult, ignorePremult); break; default: return; } } // copyUnProcessedChannels
//* Creation / loading methods ******************************************** void GLTexture::createInternalResourcesImpl(void) { if (!GLEW_VERSION_1_2 && mTextureType == TEX_TYPE_3D) OGRE_EXCEPT(Exception::ERR_NOT_IMPLEMENTED, "3D Textures not supported before OpenGL 1.2", "GLTexture::createInternalResourcesImpl"); // Convert to nearest power-of-two size if required mWidth = GLPixelUtil::optionalPO2(mWidth); mHeight = GLPixelUtil::optionalPO2(mHeight); mDepth = GLPixelUtil::optionalPO2(mDepth); // Adjust format if required mFormat = TextureManager::getSingleton().getNativeFormat(mTextureType, mFormat, mUsage); // Check requested number of mipmaps size_t maxMips = GLPixelUtil::getMaxMipmaps(mWidth, mHeight, mDepth, mFormat); mNumMipmaps = mNumRequestedMipmaps; if(mNumMipmaps>maxMips) mNumMipmaps = maxMips; // Generate texture name glGenTextures( 1, &mTextureID ); // Set texture type glBindTexture( getGLTextureTarget(), mTextureID ); // This needs to be set otherwise the texture doesn't get rendered if (GLEW_VERSION_1_2) glTexParameteri( getGLTextureTarget(), GL_TEXTURE_MAX_LEVEL, mNumMipmaps ); // Set some misc default parameters so NVidia won't complain, these can of course be changed later glTexParameteri(getGLTextureTarget(), GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(getGLTextureTarget(), GL_TEXTURE_MAG_FILTER, GL_NEAREST); if (GLEW_VERSION_1_2) { glTexParameteri(getGLTextureTarget(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(getGLTextureTarget(), GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } // If we can do automip generation and the user desires this, do so mMipmapsHardwareGenerated = Root::getSingleton().getRenderSystem()->getCapabilities()->hasCapability(RSC_AUTOMIPMAP); // NVIDIA 175.16 drivers break hardware mip generation for non-compressed // textures - disable until fixed // Leave hardware gen on compressed textures since that's the only way we // can realistically do it since GLU doesn't support DXT // However DON'T do this on Apple, their drivers aren't subject to this // problem yet and in fact software generation appears to cause a crash // in some cases which I've yet to track down #if OGRE_PLATFORM != OGRE_PLATFORM_APPLE if (Root::getSingleton().getRenderSystem()->getCapabilities()->getVendor() == GPU_NVIDIA && !PixelUtil::isCompressed(mFormat)) { mMipmapsHardwareGenerated = false; } #endif if((mUsage & TU_AUTOMIPMAP) && mNumRequestedMipmaps && mMipmapsHardwareGenerated) { glTexParameteri( getGLTextureTarget(), GL_GENERATE_MIPMAP, GL_TRUE ); } // Allocate internal buffer so that glTexSubImageXD can be used // Internal format GLenum format = GLPixelUtil::getClosestGLInternalFormat(mFormat, mHwGamma); size_t width = mWidth; size_t height = mHeight; size_t depth = mDepth; if(PixelUtil::isCompressed(mFormat)) { // Compressed formats size_t size = PixelUtil::getMemorySize(mWidth, mHeight, mDepth, mFormat); // Provide temporary buffer filled with zeroes as glCompressedTexImageXD does not // accept a 0 pointer like normal glTexImageXD // Run through this process for every mipmap to pregenerate mipmap piramid uint8 *tmpdata = new uint8[size]; memset(tmpdata, 0, size); for(size_t mip=0; mip<=mNumMipmaps; mip++) { size = PixelUtil::getMemorySize(width, height, depth, mFormat); switch(mTextureType) { case TEX_TYPE_1D: glCompressedTexImage1DARB(GL_TEXTURE_1D, mip, format, width, 0, size, tmpdata); break; case TEX_TYPE_2D: glCompressedTexImage2DARB(GL_TEXTURE_2D, mip, format, width, height, 0, size, tmpdata); break; case TEX_TYPE_3D: glCompressedTexImage3DARB(GL_TEXTURE_3D, mip, format, width, height, depth, 0, size, tmpdata); break; case TEX_TYPE_CUBE_MAP: for(int face=0; face<6; face++) { glCompressedTexImage2DARB(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, mip, format, width, height, 0, size, tmpdata); } break; }; if(width>1) width = width/2; if(height>1) height = height/2; if(depth>1) depth = depth/2; } delete [] tmpdata; } else { // Run through this process to pregenerate mipmap piramid for(size_t mip=0; mip<=mNumMipmaps; mip++) { // Normal formats switch(mTextureType) { case TEX_TYPE_1D: glTexImage1D(GL_TEXTURE_1D, mip, format, width, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_2D: glTexImage2D(GL_TEXTURE_2D, mip, format, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_3D: glTexImage3D(GL_TEXTURE_3D, mip, format, width, height, depth, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_CUBE_MAP: for(int face=0; face<6; face++) { glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, mip, format, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); } break; }; if(width>1) width = width/2; if(height>1) height = height/2; if(depth>1) depth = depth/2; } } _createSurfaceList(); // Get final internal format mFormat = getBuffer(0,0)->getFormat(); }
//* Creation / loading methods ******************************************** void GLTexture::createInternalResourcesImpl(void) { if (!GLEW_VERSION_1_2 && mTextureType == TEX_TYPE_3D) OGRE_EXCEPT(Exception::ERR_NOT_IMPLEMENTED, "3D Textures not supported before OpenGL 1.2", "GLTexture::createInternalResourcesImpl"); if (!GLEW_VERSION_2_0 && mTextureType == TEX_TYPE_2D_ARRAY) OGRE_EXCEPT(Exception::ERR_NOT_IMPLEMENTED, "2D texture arrays not supported before OpenGL 2.0", "GLTexture::createInternalResourcesImpl"); // Convert to nearest power-of-two size if required mWidth = GLPixelUtil::optionalPO2(mWidth); mHeight = GLPixelUtil::optionalPO2(mHeight); mDepth = GLPixelUtil::optionalPO2(mDepth); // Adjust format if required mFormat = TextureManager::getSingleton().getNativeFormat(mTextureType, mFormat, mUsage); // Check requested number of mipmaps size_t maxMips = GLPixelUtil::getMaxMipmaps(mWidth, mHeight, mDepth, mFormat); mNumMipmaps = mNumRequestedMipmaps; if(mNumMipmaps>maxMips) mNumMipmaps = maxMips; // Check if we can do HW mipmap generation mMipmapsHardwareGenerated = Root::getSingleton().getRenderSystem()->getCapabilities()->hasCapability(RSC_AUTOMIPMAP); // Generate texture name glGenTextures( 1, &mTextureID ); // Set texture type mGLSupport.getStateCacheManager()->bindGLTexture( getGLTextureTarget(), mTextureID ); // This needs to be set otherwise the texture doesn't get rendered if (GLEW_VERSION_1_2) mGLSupport.getStateCacheManager()->setTexParameteri(getGLTextureTarget(), GL_TEXTURE_MAX_LEVEL, mNumMipmaps); // Set some misc default parameters so NVidia won't complain, these can of course be changed later mGLSupport.getStateCacheManager()->setTexParameteri(getGLTextureTarget(), GL_TEXTURE_MIN_FILTER, GL_NEAREST); mGLSupport.getStateCacheManager()->setTexParameteri(getGLTextureTarget(), GL_TEXTURE_MAG_FILTER, GL_NEAREST); if (GLEW_VERSION_1_2) { mGLSupport.getStateCacheManager()->setTexParameteri(getGLTextureTarget(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); mGLSupport.getStateCacheManager()->setTexParameteri(getGLTextureTarget(), GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } if((mUsage & TU_AUTOMIPMAP) && mNumRequestedMipmaps && mMipmapsHardwareGenerated) { mGLSupport.getStateCacheManager()->setTexParameteri( getGLTextureTarget(), GL_GENERATE_MIPMAP, GL_TRUE ); } // Allocate internal buffer so that glTexSubImageXD can be used // Internal format GLenum format = GLPixelUtil::getClosestGLInternalFormat(mFormat, mHwGamma); uint32 width = mWidth; uint32 height = mHeight; uint32 depth = mDepth; if(PixelUtil::isCompressed(mFormat)) { // Compressed formats GLsizei size = static_cast<GLsizei>(PixelUtil::getMemorySize(mWidth, mHeight, mDepth, mFormat)); // Provide temporary buffer filled with zeroes as glCompressedTexImageXD does not // accept a 0 pointer like normal glTexImageXD // Run through this process for every mipmap to pregenerate mipmap piramid uint8 *tmpdata = new uint8[size]; memset(tmpdata, 0, size); for(uint8 mip=0; mip<=mNumMipmaps; mip++) { size = static_cast<GLsizei>(PixelUtil::getMemorySize(width, height, depth, mFormat)); switch(mTextureType) { case TEX_TYPE_1D: glCompressedTexImage1DARB(GL_TEXTURE_1D, mip, format, width, 0, size, tmpdata); break; case TEX_TYPE_2D: glCompressedTexImage2DARB(GL_TEXTURE_2D, mip, format, width, height, 0, size, tmpdata); break; case TEX_TYPE_2D_ARRAY: case TEX_TYPE_3D: glCompressedTexImage3DARB(getGLTextureTarget(), mip, format, width, height, depth, 0, size, tmpdata); break; case TEX_TYPE_CUBE_MAP: for(int face=0; face<6; face++) { glCompressedTexImage2DARB(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, mip, format, width, height, 0, size, tmpdata); } break; case TEX_TYPE_2D_RECT: break; }; if(width>1) width = width/2; if(height>1) height = height/2; if(depth>1 && mTextureType != TEX_TYPE_2D_ARRAY) depth = depth/2; } delete [] tmpdata; } else { // Run through this process to pregenerate mipmap pyramid for(uint8 mip=0; mip<=mNumMipmaps; mip++) { // Normal formats switch(mTextureType) { case TEX_TYPE_1D: glTexImage1D(GL_TEXTURE_1D, mip, format, width, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_2D: glTexImage2D(GL_TEXTURE_2D, mip, format, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_2D_ARRAY: case TEX_TYPE_3D: glTexImage3D(getGLTextureTarget(), mip, format, width, height, depth, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_CUBE_MAP: for(int face=0; face<6; face++) { glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, mip, format, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); } break; case TEX_TYPE_2D_RECT: break; }; if(width>1) width = width/2; if(height>1) height = height/2; if(depth>1 && mTextureType != TEX_TYPE_2D_ARRAY) depth = depth/2; } } _createSurfaceList(); // Get final internal format mFormat = getBuffer(0,0)->getFormat(); }
void Image::applyMaskMix(const RectI& roi, const Image* maskImg, const Image* originalImg, bool masked, bool maskInvert, float mix, const OSGLContextPtr& glContext) { ///!masked && mix == 1 has nothing to do if ( !masked && (mix == 1) ) { return; } QWriteLocker k(&_entryLock); boost::shared_ptr<QReadLocker> originalLock; boost::shared_ptr<QReadLocker> maskLock; if (originalImg) { originalLock.reset( new QReadLocker(&originalImg->_entryLock) ); } if (maskImg) { maskLock.reset( new QReadLocker(&maskImg->_entryLock) ); } RectI realRoI; roi.intersect(_bounds, &realRoI); assert( !originalImg || getBitDepth() == originalImg->getBitDepth() ); assert( !masked || !maskImg || maskImg->getComponents() == ImageComponents::getAlphaComponents() ); if (getStorageMode() == eStorageModeGLTex) { assert(glContext); assert(originalImg->getStorageMode() == eStorageModeGLTex); boost::shared_ptr<GLShader> shader = glContext->getOrCreateDefaultShader(OSGLContext::eDefaultGLShaderCopyUnprocessedChannels); assert(shader); GLuint fboID = glContext->getFBOId(); glBindFramebuffer(GL_FRAMEBUFFER, fboID); int target = getGLTextureTarget(); glEnable(target); glActiveTexture(GL_TEXTURE0); glBindTexture( target, getGLTextureID() ); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, getGLTextureID(), 0 /*LoD*/); glCheckFramebufferError(); glActiveTexture(GL_TEXTURE1); glBindTexture( target, originalImg->getGLTextureID() ); glActiveTexture(GL_TEXTURE2); glBindTexture(target, maskImg ? maskImg->getGLTextureID() : 0); glViewport( realRoI.x1 - _bounds.x1, realRoI.y1 - _bounds.y1, realRoI.width(), realRoI.height() ); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho( realRoI.x1, realRoI.x2, realRoI.y1, realRoI.y2, -10.0 * (realRoI.y2 - realRoI.y1), 10.0 * (realRoI.y2 - realRoI.y1) ); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glCheckError(); // Compute the texture coordinates to match the srcRoi Point srcTexCoords[4], vertexCoords[4]; vertexCoords[0].x = realRoI.x1; vertexCoords[0].y = realRoI.y1; srcTexCoords[0].x = (realRoI.x1 - _bounds.x1) / (double)_bounds.width(); srcTexCoords[0].y = (realRoI.y1 - _bounds.y1) / (double)_bounds.height(); vertexCoords[1].x = realRoI.x2; vertexCoords[1].y = realRoI.y1; srcTexCoords[1].x = (realRoI.x2 - _bounds.x1) / (double)_bounds.width(); srcTexCoords[1].y = (realRoI.y1 - _bounds.y1) / (double)_bounds.height(); vertexCoords[2].x = realRoI.x2; vertexCoords[2].y = realRoI.y2; srcTexCoords[2].x = (realRoI.x2 - _bounds.x1) / (double)_bounds.width(); srcTexCoords[2].y = (realRoI.y2 - _bounds.y1) / (double)_bounds.height(); vertexCoords[3].x = realRoI.x1; vertexCoords[3].y = realRoI.y2; srcTexCoords[3].x = (realRoI.x1 - _bounds.x1) / (double)_bounds.width(); srcTexCoords[3].y = (realRoI.y2 - _bounds.y1) / (double)_bounds.height(); shader->bind(); shader->setUniform("originalImageTex", 1); shader->setUniform("maskImageTex", 2); shader->setUniform("outputImageTex", 0); shader->setUniform("mixValue", mix); shader->setUniform("maskEnabled", maskImg ? 1 : 0); glBegin(GL_POLYGON); for (int i = 0; i < 4; ++i) { glTexCoord2d(srcTexCoords[i].x, srcTexCoords[i].y); glVertex2d(vertexCoords[i].x, vertexCoords[i].y); } glEnd(); shader->unbind(); glBindTexture(target, 0); glActiveTexture(GL_TEXTURE1); glBindTexture(target, 0); glActiveTexture(GL_TEXTURE0); glBindTexture(target, 0); glCheckError(); return; } int srcNComps = originalImg ? (int)originalImg->getComponentsCount() : 0; //assert(0 < srcNComps && srcNComps <= 4); switch (srcNComps) { //case 0: // applyMaskMixForSrcComponents<0>(realRoI, maskImg, originalImg, masked, maskInvert, mix); // break; case 1: applyMaskMixForSrcComponents<1>(realRoI, maskImg, originalImg, masked, maskInvert, mix); break; case 2: applyMaskMixForSrcComponents<2>(realRoI, maskImg, originalImg, masked, maskInvert, mix); break; case 3: applyMaskMixForSrcComponents<3>(realRoI, maskImg, originalImg, masked, maskInvert, mix); break; case 4: applyMaskMixForSrcComponents<4>(realRoI, maskImg, originalImg, masked, maskInvert, mix); break; default: break; } } // applyMaskMix
//* Creation / loading methods ******************************************** void GLTexture::createInternalResourcesImpl(void) { // Convert to nearest power-of-two size if required mWidth = GLPixelUtil::optionalPO2(mWidth); mHeight = GLPixelUtil::optionalPO2(mHeight); mDepth = GLPixelUtil::optionalPO2(mDepth); // Adjust format if required mFormat = TextureManager::getSingleton().getNativeFormat(mTextureType, mFormat, mUsage); // Check requested number of mipmaps size_t maxMips = GLPixelUtil::getMaxMipmaps(mWidth, mHeight, mDepth, mFormat); mNumMipmaps = mNumRequestedMipmaps; if(mNumMipmaps>maxMips) mNumMipmaps = maxMips; // Generate texture name glGenTextures( 1, &mTextureID ); // Set texture type glBindTexture( getGLTextureTarget(), mTextureID ); // This needs to be set otherwise the texture doesn't get rendered glTexParameteri( getGLTextureTarget(), GL_TEXTURE_MAX_LEVEL, mNumMipmaps ); // Set some misc default parameters so NVidia won't complain, these can of course be changed later glTexParameteri(getGLTextureTarget(), GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(getGLTextureTarget(), GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(getGLTextureTarget(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(getGLTextureTarget(), GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // If we can do automip generation and the user desires this, do so mMipmapsHardwareGenerated = Root::getSingleton().getRenderSystem()->getCapabilities()->hasCapability(RSC_AUTOMIPMAP); if((mUsage & TU_AUTOMIPMAP) && mNumRequestedMipmaps && mMipmapsHardwareGenerated) { glTexParameteri( getGLTextureTarget(), GL_GENERATE_MIPMAP, GL_TRUE ); } // Allocate internal buffer so that glTexSubImageXD can be used // Internal format GLenum format = GLPixelUtil::getClosestGLInternalFormat(mFormat); size_t width = mWidth; size_t height = mHeight; size_t depth = mDepth; if(PixelUtil::isCompressed(mFormat)) { // Compressed formats size_t size = PixelUtil::getMemorySize(mWidth, mHeight, mDepth, mFormat); // Provide temporary buffer filled with zeroes as glCompressedTexImageXD does not // accept a 0 pointer like normal glTexImageXD // Run through this process for every mipmap to pregenerate mipmap piramid uint8 *tmpdata = new uint8[size]; memset(tmpdata, 0, size); for(size_t mip=0; mip<=mNumMipmaps; mip++) { size = PixelUtil::getMemorySize(width, height, depth, mFormat); switch(mTextureType) { case TEX_TYPE_1D: glCompressedTexImage1DARB(GL_TEXTURE_1D, mip, format, width, 0, size, tmpdata); break; case TEX_TYPE_2D: glCompressedTexImage2DARB(GL_TEXTURE_2D, mip, format, width, height, 0, size, tmpdata); break; case TEX_TYPE_3D: glCompressedTexImage3DARB(GL_TEXTURE_3D, mip, format, width, height, depth, 0, size, tmpdata); break; case TEX_TYPE_CUBE_MAP: for(int face=0; face<6; face++) { glCompressedTexImage2DARB(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, mip, format, width, height, 0, size, tmpdata); } break; }; if(width>1) width = width/2; if(height>1) height = height/2; if(depth>1) depth = depth/2; } delete [] tmpdata; } else { // Run through this process to pregenerate mipmap piramid for(size_t mip=0; mip<=mNumMipmaps; mip++) { // Normal formats switch(mTextureType) { case TEX_TYPE_1D: glTexImage1D(GL_TEXTURE_1D, mip, format, width, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_2D: glTexImage2D(GL_TEXTURE_2D, mip, format, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_3D: glTexImage3D(GL_TEXTURE_3D, mip, format, width, height, depth, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); break; case TEX_TYPE_CUBE_MAP: for(int face=0; face<6; face++) { glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, mip, format, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); } break; }; if(width>1) width = width/2; if(height>1) height = height/2; if(depth>1) depth = depth/2; } } _createSurfaceList(); // Get final internal format mFormat = getBuffer(0,0)->getFormat(); }