//--------------------------------------------------------------------- Codec::DecodeResult PVRTCCodec::decodeV2(DataStreamPtr& stream) const { PVRTCTexHeaderV2 header; uint32 flags = 0, formatFlags = 0; size_t numFaces = 1; // Assume one face until we know otherwise ImageData *imgData = OGRE_NEW ImageData(); MemoryDataStreamPtr output; // Read the PVRTC header stream->read(&header, sizeof(PVRTCTexHeaderV2)); // Get format flags flags = header.flags; flipEndian(reinterpret_cast<void*>(flags), sizeof(uint32)); formatFlags = flags & PVR_TEXTURE_FLAG_TYPE_MASK; uint32 bitmaskAlpha = header.bitmaskAlpha; flipEndian(reinterpret_cast<void*>(bitmaskAlpha), sizeof(uint32)); if (formatFlags == kPVRTextureFlagTypePVRTC_4 || formatFlags == kPVRTextureFlagTypePVRTC_2) { if (formatFlags == kPVRTextureFlagTypePVRTC_4) { imgData->format = bitmaskAlpha ? PF_PVRTC_RGBA4 : PF_PVRTC_RGB4; } else if (formatFlags == kPVRTextureFlagTypePVRTC_2) { imgData->format = bitmaskAlpha ? PF_PVRTC_RGBA2 : PF_PVRTC_RGB2; } imgData->depth = 1; imgData->width = header.width; imgData->height = header.height; imgData->num_mipmaps = static_cast<ushort>(header.numMipmaps); // PVRTC is a compressed format imgData->flags |= IF_COMPRESSED; } // Calculate total size from number of mipmaps, faces and size imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces, imgData->width, imgData->height, imgData->depth, imgData->format); // Bind output buffer output.bind(OGRE_NEW MemoryDataStream(imgData->size)); // Now deal with the data void *destPtr = output->getPtr(); stream->read(destPtr, imgData->size); destPtr = static_cast<void*>(static_cast<uchar*>(destPtr)); DecodeResult ret; ret.first = output; ret.second = CodecDataPtr(imgData); return ret; }
//--------------------------------------------------------------------- Codec::DecodeResult STBIImageCodec::decode(DataStreamPtr& input) const { // Buffer stream into memory (TODO: override IO functions instead?) MemoryDataStream memStream(input, true); int width, height, components; stbi_uc* pixelData = stbi_load_from_memory(memStream.getPtr(), static_cast<int>(memStream.size()), &width, &height, &components, 0); if (!pixelData) { OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, "Error decoding image: " + String(stbi_failure_reason()), "STBIImageCodec::decode"); } SharedPtr<ImageData> imgData(OGRE_NEW ImageData()); MemoryDataStreamPtr output; imgData->depth = 1; // only 2D formats handled by this codec imgData->width = width; imgData->height = height; imgData->num_mipmaps = 0; // no mipmaps in non-DDS imgData->flags = 0; switch( components ) { case 1: imgData->format = PF_BYTE_L; break; case 2: imgData->format = PF_BYTE_LA; break; case 3: imgData->format = PF_BYTE_RGB; break; case 4: imgData->format = PF_BYTE_RGBA; break; default: stbi_image_free(pixelData); OGRE_EXCEPT(Exception::ERR_ITEM_NOT_FOUND, "Unknown or unsupported image format", "STBIImageCodec::decode"); break; } size_t dstPitch = imgData->width * PixelUtil::getNumElemBytes(imgData->format); imgData->size = dstPitch * imgData->height; output.bind(OGRE_NEW MemoryDataStream(pixelData, imgData->size, true)); DecodeResult ret; ret.first = output; ret.second = imgData; return ret; }
//--------------------------------------------------------------------- void STBIImageCodec::encodeToFile(MemoryDataStreamPtr& input, const String& outFileName, Codec::CodecDataPtr& pData) const { MemoryDataStreamPtr data = encode(input, pData).staticCast<MemoryDataStream>(); std::ofstream f(outFileName.c_str(), std::ios::out | std::ios::binary); if(!f.is_open()) { OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, "could not open file", "STBIImageCodec::encodeToFile" ) ; } f.write((char*)data->getPtr(), data->size()); }
//--------------------------------------------------------------------- void ILImageCodec::codeToFile(MemoryDataStreamPtr& input, const String& outFileName, Codec::CodecDataPtr& pData) const { ILuint ImageName; ilGenImages( 1, &ImageName ); ilBindImage( ImageName ); ImageData* pImgData = static_cast< ImageData * >( pData.getPointer() ); PixelBox src(pImgData->width, pImgData->height, pImgData->depth, pImgData->format, input->getPtr()); // Convert image from OGRE to current IL image ILUtil::fromOgre(src); iluFlipImage(); // Implicitly pick DevIL codec ilSaveImage(const_cast< char * >( outFileName.c_str() ) ); // Check if everything was ok ILenum PossibleError = ilGetError() ; if( PossibleError != IL_NO_ERROR ) { ilDeleteImages(1, &ImageName); OGRE_EXCEPT( Exception::ERR_NOT_IMPLEMENTED, "IL Error, could not save file: " + outFileName, iluErrorString(PossibleError) ) ; } ilDeleteImages(1, &ImageName); }
//--------------------------------------------------------------------- DataStreamPtr STBIImageCodec::encode(MemoryDataStreamPtr& input, Codec::CodecDataPtr& pData) const { if(mType != "png") { OGRE_EXCEPT(Exception::ERR_NOT_IMPLEMENTED, "currently only encoding to PNG supported", "STBIImageCodec::encode" ) ; } ImageData* pImgData = static_cast<ImageData*>(pData.getPointer()); int channels = PixelUtil::getComponentCount(pImgData->format); int len; uchar *data = stbi_write_png_to_mem(input->getPtr(), pImgData->width*channels, pImgData->width, pImgData->height, channels, &len); if (!data) { OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, "Error encoding image: " + String(stbi_failure_reason()), "STBIImageCodec::encode"); } return DataStreamPtr(new MemoryDataStream(data, len, true)); }
//-------------------------------------------------------------------------- void Texture::_loadImages( const ConstImagePtrList& images ) { if(images.size() < 1) OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Cannot load empty vector of images", "Texture::loadImages"); // Set desired texture size and properties from images[0] mSrcWidth = mWidth = images[0]->getWidth(); mSrcHeight = mHeight = images[0]->getHeight(); mSrcDepth = mDepth = images[0]->getDepth(); // Get source image format and adjust if required mSrcFormat = images[0]->getFormat(); if (mTreatLuminanceAsAlpha && mSrcFormat == PF_L8) { mSrcFormat = PF_A8; } if (mDesiredFormat != PF_UNKNOWN) { // If have desired format, use it mFormat = mDesiredFormat; } else { // Get the format according with desired bit depth mFormat = PixelUtil::getFormatForBitDepths(mSrcFormat, mDesiredIntegerBitDepth, mDesiredFloatBitDepth); } // The custom mipmaps in the image have priority over everything size_t imageMips = images[0]->getNumMipmaps(); if(imageMips > 0) { mNumMipmaps = mNumRequestedMipmaps = images[0]->getNumMipmaps(); // Disable flag for auto mip generation mUsage &= ~TU_AUTOMIPMAP; } // Create the texture createInternalResources(); // Check if we're loading one image with multiple faces // or a vector of images representing the faces size_t faces; bool multiImage; // Load from multiple images? if(images.size() > 1) { faces = images.size(); multiImage = true; } else { faces = images[0]->getNumFaces(); multiImage = false; } // Check wether number of faces in images exceeds number of faces // in this texture. If so, clamp it. if(faces > getNumFaces()) faces = getNumFaces(); if (TextureManager::getSingleton().getVerbose()) { // Say what we're doing StringUtil::StrStreamType str; str << "Texture: " << mName << ": Loading " << faces << " faces" << "(" << PixelUtil::getFormatName(images[0]->getFormat()) << "," << images[0]->getWidth() << "x" << images[0]->getHeight() << "x" << images[0]->getDepth() << ") with "; if (!(mMipmapsHardwareGenerated && mNumMipmaps == 0)) str << mNumMipmaps; if(mUsage & TU_AUTOMIPMAP) { if (mMipmapsHardwareGenerated) str << " hardware"; str << " generated mipmaps"; } else { str << " custom mipmaps"; } if(multiImage) str << " from multiple Images."; else str << " from Image."; // Scoped { // Print data about first destination surface HardwarePixelBufferSharedPtr buf = getBuffer(0, 0); str << " Internal format is " << PixelUtil::getFormatName(buf->getFormat()) << "," << buf->getWidth() << "x" << buf->getHeight() << "x" << buf->getDepth() << "."; } LogManager::getSingleton().logMessage( LML_NORMAL, str.str()); } // Main loading loop // imageMips == 0 if the image has no custom mipmaps, otherwise contains the number of custom mips for(size_t mip = 0; mip<=imageMips; ++mip) { for(size_t i = 0; i < faces; ++i) { PixelBox src; if(multiImage) { // Load from multiple images src = images[i]->getPixelBox(0, mip); } else { // Load from faces of images[0] src = images[0]->getPixelBox(i, mip); } // Sets to treated format in case is difference src.format = mSrcFormat; if(mGamma != 1.0f) { // Apply gamma correction // Do not overwrite original image but do gamma correction in temporary buffer MemoryDataStreamPtr buf; // for scoped deletion of conversion buffer buf.bind(OGRE_NEW MemoryDataStream( PixelUtil::getMemorySize( src.getWidth(), src.getHeight(), src.getDepth(), src.format))); PixelBox corrected = PixelBox(src.getWidth(), src.getHeight(), src.getDepth(), src.format, buf->getPtr()); PixelUtil::bulkPixelConversion(src, corrected); Image::applyGamma(static_cast<uint8*>(corrected.data), mGamma, corrected.getConsecutiveSize(), static_cast<uchar>(PixelUtil::getNumElemBits(src.format))); // Destination: entire texture. blitFromMemory does the scaling to // a power of two for us when needed getBuffer(i, mip)->blitFromMemory(corrected); } else { // Destination: entire texture. blitFromMemory does the scaling to // a power of two for us when needed getBuffer(i, mip)->blitFromMemory(src); } } } // Update size (the final size, not including temp space) mSize = getNumFaces() * PixelUtil::getMemorySize(mWidth, mHeight, mDepth, mFormat); }
//--------------------------------------------------------------------- Codec::DecodeResult FreeImageCodec::decode(DataStreamPtr& input) const { // Set error handler FreeImage_SetOutputMessage(FreeImageLoadErrorHandler); // Buffer stream into memory (TODO: override IO functions instead?) MemoryDataStream memStream(input, true); FIMEMORY* fiMem = FreeImage_OpenMemory(memStream.getPtr(), static_cast<DWORD>(memStream.size())); FIBITMAP* fiBitmap = FreeImage_LoadFromMemory( (FREE_IMAGE_FORMAT)mFreeImageType, fiMem); if (!fiBitmap) { OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, "Error decoding image", "FreeImageCodec::decode"); } ImageData* imgData = OGRE_NEW ImageData(); MemoryDataStreamPtr output; imgData->depth = 1; // only 2D formats handled by this codec imgData->width = FreeImage_GetWidth(fiBitmap); imgData->height = FreeImage_GetHeight(fiBitmap); imgData->num_mipmaps = 0; // no mipmaps in non-DDS imgData->flags = 0; // Must derive format first, this may perform conversions FREE_IMAGE_TYPE imageType = FreeImage_GetImageType(fiBitmap); FREE_IMAGE_COLOR_TYPE colourType = FreeImage_GetColorType(fiBitmap); unsigned bpp = FreeImage_GetBPP(fiBitmap); switch(imageType) { case FIT_UNKNOWN: case FIT_COMPLEX: case FIT_UINT32: case FIT_INT32: case FIT_DOUBLE: default: OGRE_EXCEPT(Exception::ERR_ITEM_NOT_FOUND, "Unknown or unsupported image format", "FreeImageCodec::decode"); break; case FIT_BITMAP: // Standard image type // Perform any colour conversions for greyscale if (colourType == FIC_MINISWHITE || colourType == FIC_MINISBLACK) { FIBITMAP* newBitmap = FreeImage_ConvertToGreyscale(fiBitmap); // free old bitmap and replace FreeImage_Unload(fiBitmap); fiBitmap = newBitmap; // get new formats bpp = FreeImage_GetBPP(fiBitmap); colourType = FreeImage_GetColorType(fiBitmap); } // Perform any colour conversions for RGB else if (bpp < 8 || colourType == FIC_PALETTE || colourType == FIC_CMYK) { FIBITMAP* newBitmap = NULL; if (FreeImage_IsTransparent(fiBitmap)) { // convert to 32 bit to preserve the transparency // (the alpha byte will be 0 if pixel is transparent) newBitmap = FreeImage_ConvertTo32Bits(fiBitmap); } else { // no transparency - only 3 bytes are needed newBitmap = FreeImage_ConvertTo24Bits(fiBitmap); } // free old bitmap and replace FreeImage_Unload(fiBitmap); fiBitmap = newBitmap; // get new formats bpp = FreeImage_GetBPP(fiBitmap); colourType = FreeImage_GetColorType(fiBitmap); } // by this stage, 8-bit is greyscale, 16/24/32 bit are RGB[A] switch(bpp) { case 8: imgData->format = PF_L8; break; case 16: // Determine 555 or 565 from green mask // cannot be 16-bit greyscale since that's FIT_UINT16 if(FreeImage_GetGreenMask(fiBitmap) == FI16_565_GREEN_MASK) { imgData->format = PF_R5G6B5; } else { // FreeImage doesn't support 4444 format so must be 1555 imgData->format = PF_A1R5G5B5; } break; case 24: // FreeImage differs per platform // PF_BYTE_BGR[A] for little endian (== PF_ARGB native) // PF_BYTE_RGB[A] for big endian (== PF_RGBA native) #if FREEIMAGE_COLORORDER == FREEIMAGE_COLORORDER_RGB imgData->format = PF_BYTE_RGB; #else imgData->format = PF_BYTE_BGR; #endif break; case 32: #if FREEIMAGE_COLORORDER == FREEIMAGE_COLORORDER_RGB imgData->format = PF_BYTE_RGBA; #else imgData->format = PF_BYTE_BGRA; #endif break; }; break; case FIT_UINT16: case FIT_INT16: // 16-bit greyscale imgData->format = PF_L16; break; case FIT_FLOAT: // Single-component floating point data imgData->format = PF_FLOAT32_R; break; case FIT_RGB16: imgData->format = PF_SHORT_RGB; break; case FIT_RGBA16: imgData->format = PF_SHORT_RGBA; break; case FIT_RGBF: imgData->format = PF_FLOAT32_RGB; break; case FIT_RGBAF: imgData->format = PF_FLOAT32_RGBA; break; }; unsigned char* srcData = FreeImage_GetBits(fiBitmap); unsigned srcPitch = FreeImage_GetPitch(fiBitmap); // Final data - invert image and trim pitch at the same time size_t dstPitch = imgData->width * PixelUtil::getNumElemBytes(imgData->format); imgData->size = dstPitch * imgData->height; // Bind output buffer output.bind(OGRE_NEW MemoryDataStream(imgData->size)); uchar* pSrc; uchar* pDst = output->getPtr(); for (size_t y = 0; y < imgData->height; ++y) { pSrc = srcData + (imgData->height - y - 1) * srcPitch; memcpy(pDst, pSrc, dstPitch); pDst += dstPitch; } FreeImage_Unload(fiBitmap); FreeImage_CloseMemory(fiMem); DecodeResult ret; ret.first = output; ret.second = CodecDataPtr(imgData); return ret; }
//----------------------------------------------------------------------------- void D3D10HardwarePixelBuffer::blitFromMemory(const PixelBox &src, const Image::Box &dstBox) { bool isDds = false; switch(mFormat) { case PF_DXT1: case PF_DXT2: case PF_DXT3: case PF_DXT4: case PF_DXT5: isDds = true; break; default: break; } if (isDds && (dstBox.getWidth() % 4 != 0 || dstBox.getHeight() % 4 != 0 )) { return; } // for scoped deletion of conversion buffer MemoryDataStreamPtr buf; PixelBox converted = src; D3D10_BOX dstBoxDx10 = OgreImageBoxToDx10Box(dstBox); // convert to pixelbuffer's native format if necessary if (src.format != mFormat) { buf.bind(new MemoryDataStream( PixelUtil::getMemorySize(src.getWidth(), src.getHeight(), src.getDepth(), mFormat))); converted = PixelBox(src.getWidth(), src.getHeight(), src.getDepth(), mFormat, buf->getPtr()); PixelUtil::bulkPixelConversion(src, converted); } // In d3d10 the Row Pitch is defined as: "The size of one row of the source data" and not // the same as the OGRE row pitch - meaning that we need to multiple the OGRE row pitch // with the size in bytes of the element to get the d3d10 row pitch. UINT d3dRowPitch = static_cast<UINT>(converted.rowPitch) * static_cast<UINT>(PixelUtil::getNumElemBytes(mFormat)); switch(mParentTexture->getTextureType()) { case TEX_TYPE_1D: { mDevice->UpdateSubresource( mParentTexture->GetTex1D(), 0, &dstBoxDx10, converted.data, 0, 0 ); if (mDevice.isError()) { String errorDescription = mDevice.getErrorDescription(); OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "D3D10 device cannot update 1d subresource\nError Description:" + errorDescription, "D3D10HardwarePixelBuffer::blitFromMemory"); } } break; case TEX_TYPE_CUBE_MAP: case TEX_TYPE_2D: { mDevice->UpdateSubresource( mParentTexture->GetTex2D(), static_cast<UINT>(mSubresourceIndex), &dstBoxDx10, converted.data, d3dRowPitch, mFace ); if (mDevice.isError()) { String errorDescription = mDevice.getErrorDescription(); OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "D3D10 device cannot update 2d subresource\nError Description:" + errorDescription, "D3D10HardwarePixelBuffer::blitFromMemory"); } } break; case TEX_TYPE_3D: { mDevice->UpdateSubresource( mParentTexture->GetTex2D(), static_cast<UINT>(mSubresourceIndex), &dstBoxDx10, converted.data, d3dRowPitch, static_cast<UINT>(converted.slicePitch) ); if (mDevice.isError()) { String errorDescription = mDevice.getErrorDescription(); OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "D3D10 device cannot update 3d subresource\nError Description:" + errorDescription, "D3D10HardwarePixelBuffer::blitFromMemory"); } } break; } if (!isDds) { _genMipmaps(); } }
//----------------------------------------------------------------------------- void D3D11HardwarePixelBuffer::blitFromMemory(const PixelBox &src, const Image::Box &dstBox) { bool isDds = false; switch(mFormat) { case PF_DXT1: case PF_DXT2: case PF_DXT3: case PF_DXT4: case PF_DXT5: isDds = true; break; default: break; } if (isDds && (dstBox.getWidth() % 4 != 0 || dstBox.getHeight() % 4 != 0 )) { return; } // for scoped deletion of conversion buffer MemoryDataStreamPtr buf; PixelBox converted = src; D3D11_BOX dstBoxDx11 = OgreImageBoxToDx11Box(dstBox); dstBoxDx11.front = 0; dstBoxDx11.back = converted.getDepth(); // convert to pixelbuffer's native format if necessary if (src.format != mFormat) { buf.bind(new MemoryDataStream( PixelUtil::getMemorySize(src.getWidth(), src.getHeight(), src.getDepth(), mFormat))); converted = PixelBox(src.getWidth(), src.getHeight(), src.getDepth(), mFormat, buf->getPtr()); PixelUtil::bulkPixelConversion(src, converted); } if (mUsage & HBU_DYNAMIC) { size_t sizeinbytes; if (PixelUtil::isCompressed(converted.format)) { // D3D wants the width of one row of cells in bytes if (converted.format == PF_DXT1) { // 64 bits (8 bytes) per 4x4 block sizeinbytes = std::max<size_t>(1, converted.getWidth() / 4) * std::max<size_t>(1, converted.getHeight() / 4) * 8; } else { // 128 bits (16 bytes) per 4x4 block sizeinbytes = std::max<size_t>(1, converted.getWidth() / 4) * std::max<size_t>(1, converted.getHeight() / 4) * 16; } } else { sizeinbytes = converted.getHeight() * converted.getWidth() * PixelUtil::getNumElemBytes(converted.format); } const Ogre::PixelBox &locked = lock(dstBox, HBL_DISCARD); memcpy(locked.data, converted.data, sizeinbytes); unlock(); } else { size_t rowWidth; if (PixelUtil::isCompressed(converted.format)) { // D3D wants the width of one row of cells in bytes if (converted.format == PF_DXT1) { // 64 bits (8 bytes) per 4x4 block rowWidth = (converted.rowPitch / 4) * 8; } else { // 128 bits (16 bytes) per 4x4 block rowWidth = (converted.rowPitch / 4) * 16; } } else { rowWidth = converted.rowPitch * PixelUtil::getNumElemBytes(converted.format); } switch(mParentTexture->getTextureType()) { case TEX_TYPE_1D: { D3D11RenderSystem* rsys = reinterpret_cast<D3D11RenderSystem*>(Root::getSingleton().getRenderSystem()); if (rsys->_getFeatureLevel() >= D3D_FEATURE_LEVEL_10_0) { mDevice.GetImmediateContext()->UpdateSubresource( mParentTexture->GetTex1D(), 0, &dstBoxDx11, converted.data, rowWidth, 0 ); if (mDevice.isError()) { String errorDescription = mDevice.getErrorDescription(); OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "D3D11 device cannot update 1d subresource\nError Description:" + errorDescription, "D3D11HardwarePixelBuffer::blitFromMemory"); } break; // For Feature levels that do not support 1D textures, revert to creating a 2D texture. } } case TEX_TYPE_CUBE_MAP: case TEX_TYPE_2D: { mDevice.GetImmediateContext()->UpdateSubresource( mParentTexture->GetTex2D(), D3D11CalcSubresource(static_cast<UINT>(mSubresourceIndex), mFace, mParentTexture->getNumMipmaps()+1), &dstBoxDx11, converted.data, rowWidth, 0 ); if (mDevice.isError()) { String errorDescription = mDevice.getErrorDescription(); OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "D3D11 device cannot update 2d subresource\nError Description:" + errorDescription, "D3D11HardwarePixelBuffer::blitFromMemory"); } } break; case TEX_TYPE_2D_ARRAY: { mDevice.GetImmediateContext()->UpdateSubresource( mParentTexture->GetTex2D(), D3D11CalcSubresource(static_cast<UINT>(mSubresourceIndex), src.front, mParentTexture->getNumMipmaps()+1), &dstBoxDx11, converted.data, rowWidth, 0 ); if (mDevice.isError()) { String errorDescription = mDevice.getErrorDescription(); OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "D3D11 device cannot update 2d array subresource\nError Description:" + errorDescription, "D3D11HardwarePixelBuffer::blitFromMemory"); } } break; case TEX_TYPE_3D: { // copied from dx9 size_t sliceWidth; if (PixelUtil::isCompressed(converted.format)) { // D3D wants the width of one slice of cells in bytes if (converted.format == PF_DXT1) { // 64 bits (8 bytes) per 4x4 block sliceWidth = (converted.slicePitch / 16) * 8; } else { // 128 bits (16 bytes) per 4x4 block sliceWidth = (converted.slicePitch / 16) * 16; } } else { sliceWidth = converted.slicePitch * PixelUtil::getNumElemBytes(converted.format); } mDevice.GetImmediateContext()->UpdateSubresource( mParentTexture->GetTex3D(), static_cast<UINT>(mSubresourceIndex), &dstBoxDx11, converted.data, rowWidth, sliceWidth ); if (mDevice.isError()) { String errorDescription = mDevice.getErrorDescription(); OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "D3D11 device cannot update 3d subresource\nError Description:" + errorDescription, "D3D11HardwarePixelBuffer::blitFromMemory"); } } break; } if (!isDds) { _genMipmaps(); } } }
//--------------------------------------------------------------------- bool ETCCodec::decodePKM(DataStreamPtr& stream, DecodeResult& result) const { PKMHeader header; // Read the ETC header stream->read(&header, sizeof(PKMHeader)); if (PKM_MAGIC != FOURCC(header.name[0], header.name[1], header.name[2], header.name[3]) ) // "PKM 10" return false; uint16 width = (header.iWidthMSB << 8) | header.iWidthLSB; uint16 height = (header.iHeightMSB << 8) | header.iHeightLSB; uint16 paddedWidth = (header.iPaddedWidthMSB << 8) | header.iPaddedWidthLSB; uint16 paddedHeight = (header.iPaddedHeightMSB << 8) | header.iPaddedHeightLSB; uint16 type = (header.iTextureTypeMSB << 8) | header.iTextureTypeLSB; ImageData *imgData = OGRE_NEW ImageData(); imgData->depth = 1; imgData->width = width; imgData->height = height; // File version 2.0 supports ETC2 in addition to ETC1 if(header.version[0] == '2' && header.version[1] == '0') { switch (type) { case 0: imgData->format = PF_ETC1_RGB8; break; // GL_COMPRESSED_RGB8_ETC2 case 1: imgData->format = PF_ETC2_RGB8; break; // GL_COMPRESSED_RGBA8_ETC2_EAC case 3: imgData->format = PF_ETC2_RGBA8; break; // GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 case 4: imgData->format = PF_ETC2_RGB8A1; break; // Default case is ETC1 default: imgData->format = PF_ETC1_RGB8; break; } } else imgData->format = PF_ETC1_RGB8; // ETC has no support for mipmaps - malideveloper.com has a example // where the load mipmap levels from different external files imgData->num_mipmaps = 0; // ETC is a compressed format imgData->flags |= IF_COMPRESSED; // Calculate total size from number of mipmaps, faces and size imgData->size = (paddedWidth * paddedHeight) >> 1; // Bind output buffer MemoryDataStreamPtr output; output.bind(OGRE_NEW MemoryDataStream(imgData->size)); // Now deal with the data void *destPtr = output->getPtr(); stream->read(destPtr, imgData->size); destPtr = static_cast<void*>(static_cast<uchar*>(destPtr)); DecodeResult ret; ret.first = output; ret.second = CodecDataPtr(imgData); return true; }
//----------------------------------------------------------------------- void Image::scale(const PixelBox &src, const PixelBox &scaled, Filter filter) { assert(PixelUtil::isAccessible(src.format)); assert(PixelUtil::isAccessible(scaled.format)); #ifdef NEWSCALING MemoryDataStreamPtr buf; // For auto-delete PixelBox temp; switch (filter) { case FILTER_NEAREST: if(src.format == scaled.format) { // No intermediate buffer needed temp = scaled; } else { // Allocate temporary buffer of destination size in source format temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format); buf.bind(new MemoryDataStream(temp.getConsecutiveSize())); temp.data = buf->getPtr(); } // super-optimized: no conversion switch (PixelUtil::getNumElemBytes(src.format)) { case 1: NearestResampler<1>::scale(src, temp); break; case 2: NearestResampler<2>::scale(src, temp); break; case 3: NearestResampler<3>::scale(src, temp); break; case 4: NearestResampler<4>::scale(src, temp); break; case 6: NearestResampler<6>::scale(src, temp); break; case 8: NearestResampler<8>::scale(src, temp); break; case 12: NearestResampler<12>::scale(src, temp); break; case 16: NearestResampler<16>::scale(src, temp); break; default: // never reached assert(false); } if(temp.data != scaled.data) { // Blit temp buffer PixelUtil::bulkPixelConversion(temp, scaled); } break; case FILTER_LINEAR: case FILTER_BILINEAR: switch (src.format) { case PF_L8: case PF_A8: case PF_BYTE_LA: case PF_R8G8B8: case PF_B8G8R8: case PF_R8G8B8A8: case PF_B8G8R8A8: case PF_A8B8G8R8: case PF_A8R8G8B8: case PF_X8B8G8R8: case PF_X8R8G8B8: if(src.format == scaled.format) { // No intermediate buffer needed temp = scaled; } else { // Allocate temp buffer of destination size in source format temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format); buf.bind(new MemoryDataStream(temp.getConsecutiveSize())); temp.data = buf->getPtr(); } // super-optimized: byte-oriented math, no conversion switch (PixelUtil::getNumElemBytes(src.format)) { case 1: LinearResampler_Byte<1>::scale(src, temp); break; case 2: LinearResampler_Byte<2>::scale(src, temp); break; case 3: LinearResampler_Byte<3>::scale(src, temp); break; case 4: LinearResampler_Byte<4>::scale(src, temp); break; default: // never reached assert(false); } if(temp.data != scaled.data) { // Blit temp buffer PixelUtil::bulkPixelConversion(temp, scaled); } break; case PF_FLOAT32_RGB: case PF_FLOAT32_RGBA: if (scaled.format == PF_FLOAT32_RGB || scaled.format == PF_FLOAT32_RGBA) { // float32 to float32, avoid unpack/repack overhead LinearResampler_Float32::scale(src, scaled); break; } // else, fall through default: // non-optimized: floating-point math, performs conversion but always works LinearResampler::scale(src, scaled); } break; default: // fall back to old, slow, wildly incorrect DevIL code #endif #if OGRE_NO_DEVIL == 0 ILuint ImageName; ilGenImages( 1, &ImageName ); ilBindImage( ImageName ); // Convert image from OGRE to current IL image ILUtil::fromOgre(src); // set filter iluImageParameter(ILU_FILTER, getILFilter(filter)); // do the scaling if(!iluScale(scaled.getWidth(), scaled.getHeight(), scaled.getDepth())) { OGRE_EXCEPT( Exception::ERR_INTERNAL_ERROR, iluErrorString(ilGetError()), "Image::scale" ) ; } ILUtil::toOgre(scaled); ilDeleteImages(1, &ImageName); // return to default filter iluImageParameter(ILU_FILTER, ILU_NEAREST); #else OGRE_EXCEPT( Exception::UNIMPLEMENTED_FEATURE, "Scaling algorithm not implemented without DevIL", "Image::scale" ) ; #endif #ifdef NEWSCALING } #endif }
Codec::DecodeResult EXRCodec::decode(DataStreamPtr& input) const { ImageData * imgData = new ImageData; MemoryDataStreamPtr output; try { // Make a mutable clone of input to be able to change file pointer MemoryDataStream myIn(input); // Now we can simulate an OpenEXR file with that O_IStream str(myIn, "SomeChunk.exr"); InputFile file(str); Box2i dw = file.header().dataWindow(); int width = dw.max.x - dw.min.x + 1; int height = dw.max.y - dw.min.y + 1; int components = 3; // Alpha channel present? const ChannelList &channels = file.header().channels(); if(channels.findChannel("A")) components = 4; // Allocate memory output.bind(new MemoryDataStream(width*height*components*4)); // Construct frame buffer uchar *pixels = output->getPtr(); FrameBuffer frameBuffer; frameBuffer.insert("R", // name Slice (FLOAT, // type ((char *) pixels)+0, // base 4 * components, // xStride 4 * components * width)); // yStride frameBuffer.insert("G", // name Slice (FLOAT, // type ((char *) pixels)+4, // base 4 * components, // xStride 4 * components * width)); // yStride frameBuffer.insert("B", // name Slice (FLOAT, // type ((char *) pixels)+8, // base 4 * components, // xStride 4 * components * width)); // yStride if(components==4) { frameBuffer.insert("A", // name Slice (FLOAT, // type ((char *) pixels)+12, // base 4 * components, // xStride 4 * components * width)); // yStride } file.setFrameBuffer (frameBuffer); file.readPixels (dw.min.y, dw.max.y); imgData->format = components==3 ? PF_FLOAT32_RGB : PF_FLOAT32_RGBA; imgData->width = width; imgData->height = height; imgData->depth = 1; imgData->size = width*height*components*4; imgData->num_mipmaps = 0; imgData->flags = 0; } catch (const std::exception &exc) { delete imgData; throw(Exception(Exception::ERR_INTERNAL_ERROR, "OpenEXR Error", exc.what())); } DecodeResult ret; ret.first = output; ret.second = CodecDataPtr(imgData); return ret; }
//--------------------------------------------------------------------- Codec::DecodeResult PVRTCCodec::decodeV3(DataStreamPtr& stream) const { PVRTCTexHeaderV3 header; PVRTCMetadata metadata; uint32 flags = 0; size_t numFaces = 1; // Assume one face until we know otherwise ImageData *imgData = OGRE_NEW ImageData(); MemoryDataStreamPtr output; // Read the PVRTC header stream->read(&header, sizeof(PVRTCTexHeaderV3)); // Read the PVRTC metadata if(header.metaDataSize) { stream->read(&metadata, sizeof(PVRTCMetadata)); } // Identify the pixel format switch (header.pixelFormat) { case kPVRTC1_PF_2BPP_RGB: imgData->format = PF_PVRTC_RGB2; break; case kPVRTC1_PF_2BPP_RGBA: imgData->format = PF_PVRTC_RGBA2; break; case kPVRTC1_PF_4BPP_RGB: imgData->format = PF_PVRTC_RGB4; break; case kPVRTC1_PF_4BPP_RGBA: imgData->format = PF_PVRTC_RGBA4; break; case kPVRTC2_PF_2BPP: imgData->format = PF_PVRTC2_2BPP; break; case kPVRTC2_PF_4BPP: imgData->format = PF_PVRTC2_4BPP; break; } // Get format flags flags = header.flags; flipEndian(reinterpret_cast<void*>(flags), sizeof(uint32)); imgData->depth = header.depth; imgData->width = header.width; imgData->height = header.height; imgData->num_mipmaps = static_cast<ushort>(header.mipMapCount); // PVRTC is a compressed format imgData->flags |= IF_COMPRESSED; if(header.numFaces == 6) imgData->flags |= IF_CUBEMAP; if(header.depth > 1) imgData->flags |= IF_3D_TEXTURE; // Calculate total size from number of mipmaps, faces and size imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces, imgData->width, imgData->height, imgData->depth, imgData->format); // Bind output buffer output.bind(OGRE_NEW MemoryDataStream(imgData->size)); // Now deal with the data void *destPtr = output->getPtr(); uint width = imgData->width; uint height = imgData->height; uint depth = imgData->depth; // All mips for a surface, then each face for(size_t mip = 0; mip <= imgData->num_mipmaps; ++mip) { for(size_t surface = 0; surface < header.numSurfaces; ++surface) { for(size_t i = 0; i < numFaces; ++i) { // Load directly size_t pvrSize = PixelUtil::getMemorySize(width, height, depth, imgData->format); stream->read(destPtr, pvrSize); destPtr = static_cast<void*>(static_cast<uchar*>(destPtr) + pvrSize); } } // Next mip if(width!=1) width /= 2; if(height!=1) height /= 2; if(depth!=1) depth /= 2; } DecodeResult ret; ret.first = output; ret.second = CodecDataPtr(imgData); return ret; }
//--------------------------------------------------------------------- Codec::DecodeResult ILImageCodec::decode(DataStreamPtr& input) const { // DevIL variables ILuint ImageName; ILint ImageFormat, BytesPerPixel, ImageType; ImageData* imgData = new ImageData(); MemoryDataStreamPtr output; // Load the image ilGenImages( 1, &ImageName ); ilBindImage( ImageName ); // Put it right side up ilEnable(IL_ORIGIN_SET); ilSetInteger(IL_ORIGIN_MODE, IL_ORIGIN_UPPER_LEFT); // Keep DXTC(compressed) data if present ilSetInteger(IL_KEEP_DXTC_DATA, IL_TRUE); // Load image from stream, cache into memory MemoryDataStream memInput(input); ilLoadL( mIlType, memInput.getPtr(), static_cast< ILuint >(memInput.size())); // Check if everything was ok ILenum PossibleError = ilGetError() ; if( PossibleError != IL_NO_ERROR ) { OGRE_EXCEPT( Exception::ERR_NOT_IMPLEMENTED, "IL Error", iluErrorString(PossibleError) ) ; } ImageFormat = ilGetInteger( IL_IMAGE_FORMAT ); ImageType = ilGetInteger( IL_IMAGE_TYPE ); // Convert image if ImageType is incompatible with us (double or long) if(ImageType != IL_BYTE && ImageType != IL_UNSIGNED_BYTE && ImageType != IL_FLOAT && ImageType != IL_UNSIGNED_SHORT && ImageType != IL_SHORT) { ilConvertImage(ImageFormat, IL_FLOAT); ImageType = IL_FLOAT; } // Converted paletted images if(ImageFormat == IL_COLOUR_INDEX) { ilConvertImage(IL_BGRA, IL_UNSIGNED_BYTE); ImageFormat = IL_BGRA; ImageType = IL_UNSIGNED_BYTE; } // Now sets some variables BytesPerPixel = ilGetInteger( IL_IMAGE_BYTES_PER_PIXEL ); imgData->format = ILUtil::ilFormat2OgreFormat( ImageFormat, ImageType ); imgData->width = ilGetInteger( IL_IMAGE_WIDTH ); imgData->height = ilGetInteger( IL_IMAGE_HEIGHT ); imgData->depth = ilGetInteger( IL_IMAGE_DEPTH ); imgData->num_mipmaps = ilGetInteger ( IL_NUM_MIPMAPS ); imgData->flags = 0; if(imgData->format == PF_UNKNOWN) { std::stringstream err; err << "Unsupported devil format ImageFormat=" << std::hex << ImageFormat << " ImageType="<< ImageType << std::dec; ilDeleteImages( 1, &ImageName ); OGRE_EXCEPT( Exception::ERR_NOT_IMPLEMENTED, err.str(), "ILImageCodec::decode" ) ; } // Check for cubemap //ILuint cubeflags = ilGetInteger ( IL_IMAGE_CUBEFLAGS ); size_t numFaces = ilGetInteger ( IL_NUM_IMAGES ) + 1; if(numFaces == 6) imgData->flags |= IF_CUBEMAP; else numFaces = 1; // Support only 1 or 6 face images for now // Keep DXT data (if present at all and the GPU supports it) ILuint dxtFormat = ilGetInteger( IL_DXTC_DATA_FORMAT ); if(dxtFormat != IL_DXT_NO_COMP && Root::getSingleton().getRenderSystem()->getCapabilities()->hasCapability( RSC_TEXTURE_COMPRESSION_DXT )) { imgData->format = ILUtil::ilFormat2OgreFormat( dxtFormat, ImageType ); imgData->flags |= IF_COMPRESSED; // Validate that this devil version saves DXT mipmaps if(imgData->num_mipmaps>0) { ilBindImage(ImageName); ilActiveMipmap(1); if((size_t)ilGetInteger( IL_DXTC_DATA_FORMAT ) != dxtFormat) { imgData->num_mipmaps=0; LogManager::getSingleton().logMessage( "Warning: Custom mipmaps for compressed image "+input->getName()+" were ignored because they are not loaded by this DevIL version"); } } } // Calculate total size from number of mipmaps, faces and size imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces, imgData->width, imgData->height, imgData->depth, imgData->format); // Bind output buffer output.bind(new MemoryDataStream(imgData->size)); size_t offset = 0; // Dimensions of current mipmap size_t width = imgData->width; size_t height = imgData->height; size_t depth = imgData->depth; // Transfer data for(size_t mip=0; mip<=imgData->num_mipmaps; ++mip) { for(size_t i = 0; i < numFaces; ++i) { ilBindImage(ImageName); if(numFaces > 1) ilActiveImage(i); if(imgData->num_mipmaps > 0) ilActiveMipmap(mip); /// Size of this face size_t imageSize = PixelUtil::getMemorySize( width, height, depth, imgData->format); if(imgData->flags & IF_COMPRESSED) { // Compare DXT size returned by DevIL with our idea of the compressed size if(imageSize == ilGetDXTCData(NULL, 0, dxtFormat)) { // Retrieve data from DevIL ilGetDXTCData((unsigned char*)output->getPtr()+offset, imageSize, dxtFormat); } else { LogManager::getSingleton().logMessage( "Warning: compressed image "+input->getName()+" size mismatch, devilsize="+StringConverter::toString(ilGetDXTCData(NULL, 0, dxtFormat))+" oursize="+ StringConverter::toString(imageSize)); } } else { /// Retrieve data from DevIL PixelBox dst(width, height, depth, imgData->format, (unsigned char*)output->getPtr()+offset); ILUtil::toOgre(dst); } offset += imageSize; } /// Next mip if(width!=1) width /= 2; if(height!=1) height /= 2; if(depth!=1) depth /= 2; } // Restore IL state ilDisable(IL_ORIGIN_SET); ilDisable(IL_FORMAT_SET); ilDeleteImages( 1, &ImageName ); DecodeResult ret; ret.first = output; ret.second = CodecDataPtr(imgData); return ret; }
//----------------------------------------------------------------------------- // blitFromMemory doing hardware trilinear scaling void GLESTextureBuffer::blitFromMemory(const PixelBox &src_orig, const Image::Box &dstBox) { // Fall back to normal GLHardwarePixelBuffer::blitFromMemory in case // - FBO is not supported // - Either source or target is luminance due doesn't looks like supported by hardware // - the source dimensions match the destination ones, in which case no scaling is needed if(!GL_OES_framebuffer_object || PixelUtil::isLuminance(src_orig.format) || PixelUtil::isLuminance(mFormat) || (src_orig.getWidth() == dstBox.getWidth() && src_orig.getHeight() == dstBox.getHeight() && src_orig.getDepth() == dstBox.getDepth())) { GLESHardwarePixelBuffer::blitFromMemory(src_orig, dstBox); return; } if(!mBuffer.contains(dstBox)) OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Destination box out of range", "GLESTextureBuffer::blitFromMemory"); // For scoped deletion of conversion buffer MemoryDataStreamPtr buf; PixelBox src; // First, convert the srcbox to a OpenGL compatible pixel format if(GLESPixelUtil::getGLOriginFormat(src_orig.format) == 0) { // Convert to buffer internal format buf.bind(OGRE_NEW MemoryDataStream(PixelUtil::getMemorySize(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(), mFormat))); src = PixelBox(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(), mFormat, buf->getPtr()); PixelUtil::bulkPixelConversion(src_orig, src); } else { // No conversion needed src = src_orig; } // Create temporary texture to store source data GLuint id; GLenum target = GL_TEXTURE_2D; GLsizei width = GLESPixelUtil::optionalPO2(src.getWidth()); GLsizei height = GLESPixelUtil::optionalPO2(src.getHeight()); GLenum format = GLESPixelUtil::getClosestGLInternalFormat(src.format); GLenum datatype = GLESPixelUtil::getGLOriginDataType(src.format); // Generate texture name glGenTextures(1, &id); GL_CHECK_ERROR; // Set texture type glBindTexture(target, id); GL_CHECK_ERROR; // Set automatic mipmap generation; nice for minimisation glTexParameteri(target, GL_GENERATE_MIPMAP, GL_TRUE ); GL_CHECK_ERROR; // Allocate texture memory glTexImage2D(target, 0, format, width, height, 0, format, datatype, 0); GL_CHECK_ERROR; // GL texture buffer GLESTextureBuffer tex(BLANKSTRING, target, id, width, height, format, src.format, 0, 0, (Usage)(TU_AUTOMIPMAP|HBU_STATIC_WRITE_ONLY), false, false, 0); // Upload data to 0,0,0 in temporary texture Image::Box tempTarget(0, 0, 0, src.getWidth(), src.getHeight(), src.getDepth()); tex.upload(src, tempTarget); // Blit blitFromTexture(&tex, tempTarget, dstBox); // Delete temp texture glDeleteTextures(1, &id); GL_CHECK_ERROR; }
//--------------------------------------------------------------------- bool ETCCodec::decodeKTX(DataStreamPtr& stream, DecodeResult& result) const { KTXHeader header; // Read the ETC1 header stream->read(&header, sizeof(KTXHeader)); const uint8 KTXFileIdentifier[12] = { 0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A }; if (memcmp(KTXFileIdentifier, &header.identifier, sizeof(KTXFileIdentifier)) != 0 ) return false; if (header.endianness == KTX_ENDIAN_REF_REV) flipEndian(&header.glType, sizeof(uint32), 1); ImageData *imgData = OGRE_NEW ImageData(); imgData->depth = 1; imgData->width = header.pixelWidth; imgData->height = header.pixelHeight; imgData->num_mipmaps = static_cast<ushort>(header.numberOfMipmapLevels - 1); switch(header.glInternalFormat) { case 37492: // GL_COMPRESSED_RGB8_ETC2 imgData->format = PF_ETC2_RGB8; break; case 37496:// GL_COMPRESSED_RGBA8_ETC2_EAC imgData->format = PF_ETC2_RGBA8; break; case 37494: // GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 imgData->format = PF_ETC2_RGB8A1; break; case 35986: // ATC_RGB imgData->format = PF_ATC_RGB; break; case 35987: // ATC_RGB_Explicit imgData->format = PF_ATC_RGBA_EXPLICIT_ALPHA; break; case 34798: // ATC_RGB_Interpolated imgData->format = PF_ATC_RGBA_INTERPOLATED_ALPHA; break; case 33777: // DXT 1 imgData->format = PF_DXT1; break; case 33778: // DXT 3 imgData->format = PF_DXT3; break; case 33779: // DXT 5 imgData->format = PF_DXT5; break; default: imgData->format = PF_ETC1_RGB8; break; } imgData->flags = 0; if (header.glType == 0 || header.glFormat == 0) imgData->flags |= IF_COMPRESSED; size_t numFaces = 1; // Assume one face until we know otherwise // Calculate total size from number of mipmaps, faces and size imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces, imgData->width, imgData->height, imgData->depth, imgData->format); stream->skip(header.bytesOfKeyValueData); // Bind output buffer MemoryDataStreamPtr output; output.bind(OGRE_NEW MemoryDataStream(imgData->size)); // Now deal with the data uchar* destPtr = output->getPtr(); for (uint32 level = 0; level < header.numberOfMipmapLevels; ++level) { uint32 imageSize = 0; stream->read(&imageSize, sizeof(uint32)); stream->read(destPtr, imageSize); destPtr += imageSize; } result.first = output; result.second = CodecDataPtr(imgData); return true; }
//----------------------------------------------------------------------- void Image::scale(const PixelBox &src, const PixelBox &scaled, Filter filter) { assert(PixelUtil::isAccessible(src.format)); assert(PixelUtil::isAccessible(scaled.format)); MemoryDataStreamPtr buf; // For auto-delete PixelBox temp; switch (filter) { default: case FILTER_NEAREST: if(src.format == scaled.format) { // No intermediate buffer needed temp = scaled; } else { // Allocate temporary buffer of destination size in source format temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format); buf.bind(OGRE_NEW MemoryDataStream(temp.getConsecutiveSize())); temp.data = buf->getPtr(); } // super-optimized: no conversion switch (PixelUtil::getNumElemBytes(src.format)) { case 1: NearestResampler<1>::scale(src, temp); break; case 2: NearestResampler<2>::scale(src, temp); break; case 3: NearestResampler<3>::scale(src, temp); break; case 4: NearestResampler<4>::scale(src, temp); break; case 6: NearestResampler<6>::scale(src, temp); break; case 8: NearestResampler<8>::scale(src, temp); break; case 12: NearestResampler<12>::scale(src, temp); break; case 16: NearestResampler<16>::scale(src, temp); break; default: // never reached assert(false); } if(temp.data != scaled.data) { // Blit temp buffer PixelUtil::bulkPixelConversion(temp, scaled); } break; case FILTER_LINEAR: case FILTER_BILINEAR: switch (src.format) { case PF_L8: case PF_A8: case PF_BYTE_LA: case PF_R8G8B8: case PF_B8G8R8: case PF_R8G8B8A8: case PF_B8G8R8A8: case PF_A8B8G8R8: case PF_A8R8G8B8: case PF_X8B8G8R8: case PF_X8R8G8B8: if(src.format == scaled.format) { // No intermediate buffer needed temp = scaled; } else { // Allocate temp buffer of destination size in source format temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format); buf.bind(OGRE_NEW MemoryDataStream(temp.getConsecutiveSize())); temp.data = buf->getPtr(); } // super-optimized: byte-oriented math, no conversion switch (PixelUtil::getNumElemBytes(src.format)) { case 1: LinearResampler_Byte<1>::scale(src, temp); break; case 2: LinearResampler_Byte<2>::scale(src, temp); break; case 3: LinearResampler_Byte<3>::scale(src, temp); break; case 4: LinearResampler_Byte<4>::scale(src, temp); break; default: // never reached assert(false); } if(temp.data != scaled.data) { // Blit temp buffer PixelUtil::bulkPixelConversion(temp, scaled); } break; case PF_FLOAT32_RGB: case PF_FLOAT32_RGBA: if (scaled.format == PF_FLOAT32_RGB || scaled.format == PF_FLOAT32_RGBA) { // float32 to float32, avoid unpack/repack overhead LinearResampler_Float32::scale(src, scaled); break; } // else, fall through default: // non-optimized: floating-point math, performs conversion but always works LinearResampler::scale(src, scaled); } break; } }
//----------------------------------------------------------------------------- // blitFromMemory doing hardware trilinear scaling void GLES2TextureBuffer::blitFromMemory(const PixelBox &src_orig, const Image::Box &dstBox) { // Fall back to normal GLHardwarePixelBuffer::blitFromMemory in case // - FBO is not supported // - Either source or target is luminance due doesn't looks like supported by hardware // - the source dimensions match the destination ones, in which case no scaling is needed // TODO: Check that extension is NOT available if(PixelUtil::isLuminance(src_orig.format) || PixelUtil::isLuminance(mFormat) || (src_orig.getWidth() == dstBox.getWidth() && src_orig.getHeight() == dstBox.getHeight() && src_orig.getDepth() == dstBox.getDepth())) { GLES2HardwarePixelBuffer::blitFromMemory(src_orig, dstBox); return; } if(!mBuffer.contains(dstBox)) OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Destination box out of range", "GLES2TextureBuffer::blitFromMemory"); // For scoped deletion of conversion buffer MemoryDataStreamPtr buf; PixelBox src; // First, convert the srcbox to a OpenGL compatible pixel format if(GLES2PixelUtil::getGLOriginFormat(src_orig.format) == 0) { // Convert to buffer internal format buf.bind(OGRE_NEW MemoryDataStream(PixelUtil::getMemorySize(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(), mFormat))); src = PixelBox(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(), mFormat, buf->getPtr()); PixelUtil::bulkPixelConversion(src_orig, src); } else { // No conversion needed src = src_orig; } // Create temporary texture to store source data GLuint id; GLenum target = #if OGRE_NO_GLES3_SUPPORT == 0 (src.getDepth() != 1) ? GL_TEXTURE_3D : #endif GL_TEXTURE_2D; GLsizei width = GLES2PixelUtil::optionalPO2(src.getWidth()); GLsizei height = GLES2PixelUtil::optionalPO2(src.getHeight()); GLenum format = GLES2PixelUtil::getClosestGLInternalFormat(src.format); GLenum datatype = GLES2PixelUtil::getGLOriginDataType(src.format); // Generate texture name OGRE_CHECK_GL_ERROR(glGenTextures(1, &id)); // Set texture type OGRE_CHECK_GL_ERROR(glBindTexture(target, id)); #if GL_APPLE_texture_max_level && OGRE_PLATFORM != OGRE_PLATFORM_NACL OGRE_CHECK_GL_ERROR(glTexParameteri(target, GL_TEXTURE_MAX_LEVEL_APPLE, 1000 )); #elif OGRE_NO_GLES3_SUPPORT == 0 OGRE_CHECK_GL_ERROR(glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, 1000 )); #endif // Allocate texture memory #if OGRE_NO_GLES3_SUPPORT == 0 if(target == GL_TEXTURE_3D || target == GL_TEXTURE_2D_ARRAY) glTexImage3D(target, 0, src.format, src.getWidth(), src.getHeight(), src.getDepth(), 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); else #endif OGRE_CHECK_GL_ERROR(glTexImage2D(target, 0, format, width, height, 0, format, datatype, 0)); // GL texture buffer GLES2TextureBuffer tex(StringUtil::BLANK, target, id, width, height, format, src.format, 0, 0, (Usage)(TU_AUTOMIPMAP|HBU_STATIC_WRITE_ONLY), false, false, 0); // Upload data to 0,0,0 in temporary texture Image::Box tempTarget(0, 0, 0, src.getWidth(), src.getHeight(), src.getDepth()); tex.upload(src, tempTarget); // Blit blitFromTexture(&tex, tempTarget, dstBox); // Delete temp texture OGRE_CHECK_GL_ERROR(glDeleteTextures(1, &id)); }