Ejemplo n.º 1
0
    //---------------------------------------------------------------------
    DataStreamPtr Image::encode(const String& formatextension)
    {
        if( !mBuffer )
        {
            OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "No image data loaded", 
                "Image::encode");
        }

        Codec * pCodec = Codec::getCodec(formatextension);
        if( !pCodec )
            OGRE_EXCEPT(
            Exception::ERR_INVALIDPARAMS, 
            "Unable to encode image data as '" + formatextension + "' - invalid extension.",
            "Image::encode" );

        ImageCodec::ImageData* imgData = OGRE_NEW ImageCodec::ImageData();
        imgData->format = mFormat;
        imgData->height = mHeight;
        imgData->width = mWidth;
        imgData->depth = mDepth;
        // Wrap in CodecDataPtr, this will delete
        Codec::CodecDataPtr codeDataPtr(imgData);
        // Wrap memory, be sure not to delete when stream destroyed
        MemoryDataStreamPtr wrapper(OGRE_NEW MemoryDataStream(mBuffer, mBufSize, false));

        return pCodec->encode(wrapper, codeDataPtr);
    }
Ejemplo n.º 2
0
    //---------------------------------------------------------------------
    DataStreamPtr FreeImageCodec::code(MemoryDataStreamPtr& input, Codec::CodecDataPtr& pData) const
    {        
		// Set error handler
		FreeImage_SetOutputMessage(FreeImageSaveErrorHandler);

		FIBITMAP* fiBitmap = encode(input, pData);

		// open memory chunk allocated by FreeImage
		FIMEMORY* mem = FreeImage_OpenMemory();
		// write data into memory
		FreeImage_SaveToMemory((FREE_IMAGE_FORMAT)mFreeImageType, fiBitmap, mem);
		// Grab data information
		BYTE* data;
		DWORD size;
		FreeImage_AcquireMemory(mem, &data, &size);
		// Copy data into our own buffer
		// Because we're asking MemoryDataStream to free this, must create in a compatible way
		BYTE* ourData = OGRE_ALLOC_T(BYTE, size, MEMCATEGORY_GENERAL);
		memcpy(ourData, data, size);
		// Wrap data in stream, tell it to free on close 
		DataStreamPtr outstream(OGRE_NEW MemoryDataStream(ourData, size, true));
		// Now free FreeImage memory buffers
		FreeImage_CloseMemory(mem);
		// Unload bitmap
		FreeImage_Unload(fiBitmap);

		return outstream;


    }
	//---------------------------------------------------------------------
	void GpuProgramManager::loadMicrocodeCache( DataStreamPtr stream )
	{
		mMicrocodeCache.clear();

		// write the size of the array
		size_t sizeOfArray = 0;
		stream->read(&sizeOfArray, sizeof(size_t));
		
		// loop the array and load it

		for ( size_t i = 0 ; i < sizeOfArray ; i++ )
		{
			String nameOfShader;
			// loads the name of the shader
			size_t stringLength  = 0;
			stream->read(&stringLength, sizeof(size_t));
			nameOfShader.resize(stringLength);				
			stream->read(&nameOfShader[0], stringLength);

			// loads the microcode
			size_t microcodeLength = 0;
			stream->read(&microcodeLength, sizeof(size_t));		

			Microcode microcodeOfShader(OGRE_NEW MemoryDataStream(nameOfShader, microcodeLength)); 		
			microcodeOfShader->seek(0);
			stream->read(microcodeOfShader->getPtr(), microcodeLength);

			mMicrocodeCache.insert(make_pair(nameOfShader, microcodeOfShader));
		}
	}
Ejemplo n.º 4
0
    //---------------------------------------------------------------------
    void GpuProgramManager::loadMicrocodeCache( DataStreamPtr stream )
    {
        mMicrocodeCache.clear();

        // write the size of the array
        uint32 sizeOfArray = 0;
        stream->read(&sizeOfArray, sizeof(uint32));
        
        // loop the array and load it

        for ( uint32 i = 0 ; i < sizeOfArray ; i++ )
        {
            String nameOfShader;
            // loads the name of the shader
            uint32 stringLength  = 0;
            stream->read(&stringLength, sizeof(uint32));
            nameOfShader.resize(stringLength);              
            stream->read(&nameOfShader[0], stringLength);

            // loads the microcode
            uint32 microcodeLength = 0;
            stream->read(&microcodeLength, sizeof(uint32));     

            Microcode microcodeOfShader(OGRE_NEW MemoryDataStream(nameOfShader, microcodeLength));      
            microcodeOfShader->seek(0);
            stream->read(microcodeOfShader->getPtr(), microcodeLength);

            mMicrocodeCache.insert(make_pair(nameOfShader, microcodeOfShader));
        }

        // if cache is not modified, mark it as clean.
        mCacheDirty = false;
        
    }
Ejemplo n.º 5
0
	//-----------------------------------------------------------------------
	void ConfigFile::loadDirect(const String& filename, const String& separators, 
		bool trimWhitespace)
	{
#if OGRE_PLATFORM == OGRE_PLATFORM_NACL
        OGRE_EXCEPT(Exception::ERR_CANNOT_WRITE_TO_FILE, "loadDirect is not supported on NaCl - tried to open: " + filename,
            "ConfigFile::loadDirect");
#endif

        /* Open the configuration file */
		std::ifstream fp;
        // Always open in binary mode
		fp.open(filename.c_str(), std::ios::in | std::ios::binary);
		if(!fp)
			OGRE_EXCEPT(
			Exception::ERR_FILE_NOT_FOUND, "'" + filename + "' file not found!", "ConfigFile::load" );

		// Wrap as a stream
		DataStreamPtr stream(OGRE_NEW FileStreamDataStream(filename, &fp, false));

#if OGRE_PLATFORM == OGRE_PLATFORM_SYMBIAN
		// seems readLine doesn't work correctly in SYMBIAN with files
		DataStreamPtr memoryStream(OGRE_NEW MemoryDataStream(stream));
		stream = memoryStream;
#endif

		load(stream, separators, trimWhitespace);

	}
Ejemplo n.º 6
0
	void Texture::prepareImpl()
	{
		if (isManualLoaded())
			return;

		DataStreamPtr dataStream = ResourceGroupMgr::getSingltonPtr()->openResource(mName, mGroup);
		mPreparedData = MemoryDataStreamPtr(TITAN_NEW MemoryDataStream(dataStream));
	}
Ejemplo n.º 7
0
    //---------------------------------------------------------------------    
	Codec::DecodeResult PVRTCCodec::decodeV2(DataStreamPtr& stream) const
	{
		PVRTCTexHeaderV2 header;
        uint32 flags = 0, formatFlags = 0;
        size_t numFaces = 1; // Assume one face until we know otherwise

        ImageData *imgData = OGRE_NEW ImageData();
		MemoryDataStreamPtr output;

        // Read the PVRTC header
        stream->read(&header, sizeof(PVRTCTexHeaderV2));

        // Get format flags
        flags = header.flags;
        flipEndian(reinterpret_cast<void*>(flags), sizeof(uint32));
        formatFlags = flags & PVR_TEXTURE_FLAG_TYPE_MASK;

        uint32 bitmaskAlpha = header.bitmaskAlpha;
        flipEndian(reinterpret_cast<void*>(bitmaskAlpha), sizeof(uint32));

        if (formatFlags == kPVRTextureFlagTypePVRTC_4 || formatFlags == kPVRTextureFlagTypePVRTC_2)
        {
            if (formatFlags == kPVRTextureFlagTypePVRTC_4)
            {
                imgData->format = bitmaskAlpha ? PF_PVRTC_RGBA4 : PF_PVRTC_RGB4;
            }
            else if (formatFlags == kPVRTextureFlagTypePVRTC_2)
            {
                imgData->format = bitmaskAlpha ? PF_PVRTC_RGBA2 : PF_PVRTC_RGB2;
            }

            imgData->depth = 1;
            imgData->width = header.width;
            imgData->height = header.height;
            imgData->num_mipmaps = static_cast<ushort>(header.numMipmaps);

            // PVRTC is a compressed format
            imgData->flags |= IF_COMPRESSED;
        }

        // Calculate total size from number of mipmaps, faces and size
		imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces, 
                                             imgData->width, imgData->height, imgData->depth, imgData->format);

		// Bind output buffer
		output.bind(OGRE_NEW MemoryDataStream(imgData->size));

		// Now deal with the data
		void *destPtr = output->getPtr();
        stream->read(destPtr, imgData->size);
        destPtr = static_cast<void*>(static_cast<uchar*>(destPtr));

		DecodeResult ret;
		ret.first = output;
		ret.second = CodecDataPtr(imgData);

		return ret;
	}
Ejemplo n.º 8
0
    //---------------------------------------------------------------------
    Codec::DecodeResult STBIImageCodec::decode(DataStreamPtr& input) const
    {
        // Buffer stream into memory (TODO: override IO functions instead?)
        MemoryDataStream memStream(input, true);

        int width, height, components;
        stbi_uc* pixelData = stbi_load_from_memory(memStream.getPtr(),
                static_cast<int>(memStream.size()), &width, &height, &components, 0);

        if (!pixelData)
        {
            OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, 
                "Error decoding image: " + String(stbi_failure_reason()),
                "STBIImageCodec::decode");
        }

        SharedPtr<ImageData> imgData(OGRE_NEW ImageData());
        MemoryDataStreamPtr output;

        imgData->depth = 1; // only 2D formats handled by this codec
        imgData->width = width;
        imgData->height = height;
        imgData->num_mipmaps = 0; // no mipmaps in non-DDS 
        imgData->flags = 0;

        switch( components )
        {
            case 1:
                imgData->format = PF_BYTE_L;
                break;
            case 2:
                imgData->format = PF_BYTE_LA;
                break;
            case 3:
                imgData->format = PF_BYTE_RGB;
                break;
            case 4:
                imgData->format = PF_BYTE_RGBA;
                break;
            default:
                stbi_image_free(pixelData);
                OGRE_EXCEPT(Exception::ERR_ITEM_NOT_FOUND,
                            "Unknown or unsupported image format",
                            "STBIImageCodec::decode");
                break;
        }
        
        size_t dstPitch = imgData->width * PixelUtil::getNumElemBytes(imgData->format);
        imgData->size = dstPitch * imgData->height;
        output.bind(OGRE_NEW MemoryDataStream(pixelData, imgData->size, true));
        
        DecodeResult ret;
        ret.first = output;
        ret.second = imgData;
        return ret;
    }
    //-------------------------------------------------------------------------
    void HeightmapTerrainPageSource::loadHeightmap(void)
    {
        size_t imgSize;
        // Special-case RAW format
        if (mIsRaw)
        {
            // Image size comes from setting (since RAW is not self-describing)
            imgSize = mRawSize;
            
            // Load data
            mRawData.setNull();
            DataStreamPtr stream = 
                ResourceGroupManager::getSingleton().openResource(
                    mSource, ResourceGroupManager::getSingleton().getWorldResourceGroupName());
            mRawData = MemoryDataStreamPtr(OGRE_NEW MemoryDataStream(mSource, stream));

            // Validate size
            size_t numBytes = imgSize * imgSize * mRawBpp;
            if (mRawData->size() != numBytes)
            {
                shutdown();
                OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, 
                    "RAW size (" + StringConverter::toString(mRawData->size()) + 
                    ") does not agree with configuration settings.", 
                    "HeightmapTerrainPageSource::loadHeightmap");
            }
        }
        else
        {
            mImage.load(mSource, ResourceGroupManager::getSingleton().getWorldResourceGroupName());
            // Must be square (dimensions checked later)
            if ( mImage.getWidth() != mImage.getHeight())
            {
                shutdown();
                OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS,
                    "Heightmap must be square",
                    "HeightmapTerrainPageSource::loadHeightmap");
            }
            imgSize = mImage.getWidth();
        }
        //check to make sure it's the expected size
        if ( imgSize != mPageSize)
        {
            shutdown();
            String err = "Error: Invalid heightmap size : " +
                StringConverter::toString( imgSize ) +
                ". Should be " + StringConverter::toString(mPageSize);
            OGRE_EXCEPT( Exception::ERR_INVALIDPARAMS, err, 
                "HeightmapTerrainPageSource::loadHeightmap" );
        }

    }
Ejemplo n.º 10
0
    //-----------------------------------------------------------------------------
    void Image::save(const String& filename)
    {
        if( !mBuffer )
        {
            OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "No image data loaded", 
                "Image::save");
        }

        String strExt;
        size_t pos = filename.find_last_of(".");
        if( pos == String::npos )
            OGRE_EXCEPT(
            Exception::ERR_INVALIDPARAMS, 
            "Unable to save image file '" + filename + "' - invalid extension.",
            "Image::save" );

        while( pos != filename.length() - 1 )
            strExt += filename[++pos];

        Codec * pCodec = Codec::getCodec(strExt);
        if( !pCodec )
            OGRE_EXCEPT(
            Exception::ERR_INVALIDPARAMS, 
            "Unable to save image file '" + filename + "' - invalid extension.",
            "Image::save" );

        ImageCodec::ImageData* imgData = OGRE_NEW ImageCodec::ImageData();
        imgData->format = mFormat;
        imgData->height = mHeight;
        imgData->width = mWidth;
        imgData->depth = mDepth;
        imgData->size = mBufSize;
		imgData->num_mipmaps = mNumMipmaps;
        // Wrap in CodecDataPtr, this will delete
        Codec::CodecDataPtr codeDataPtr(imgData);
        // Wrap memory, be sure not to delete when stream destroyed
        MemoryDataStreamPtr wrapper(OGRE_NEW MemoryDataStream(mBuffer, mBufSize, false));

        pCodec->encodeToFile(wrapper, filename, codeDataPtr);
    }
    //-----------------------------------------------------------------------------  
    // blitFromMemory doing hardware trilinear scaling
    void GLESTextureBuffer::blitFromMemory(const PixelBox &src_orig, const Image::Box &dstBox)
    {
        // Fall back to normal GLHardwarePixelBuffer::blitFromMemory in case 
        // - FBO is not supported
        // - Either source or target is luminance due doesn't looks like supported by hardware
        // - the source dimensions match the destination ones, in which case no scaling is needed
        if(!GL_OES_framebuffer_object ||
           PixelUtil::isLuminance(src_orig.format) ||
           PixelUtil::isLuminance(mFormat) ||
           (src_orig.getWidth() == dstBox.getWidth() &&
            src_orig.getHeight() == dstBox.getHeight() &&
            src_orig.getDepth() == dstBox.getDepth()))
        {
            GLESHardwarePixelBuffer::blitFromMemory(src_orig, dstBox);
            return;
        }
        if(!mBuffer.contains(dstBox))
            OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Destination box out of range",
                        "GLESTextureBuffer::blitFromMemory");
        // For scoped deletion of conversion buffer
        MemoryDataStreamPtr buf;
        PixelBox src;
        
        // First, convert the srcbox to a OpenGL compatible pixel format
        if(GLESPixelUtil::getGLOriginFormat(src_orig.format) == 0)
        {
            // Convert to buffer internal format
            buf.bind(OGRE_NEW MemoryDataStream(PixelUtil::getMemorySize(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(),
                                                                   mFormat)));
            src = PixelBox(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(), mFormat, buf->getPtr());
            PixelUtil::bulkPixelConversion(src_orig, src);
        }
        else
        {
            // No conversion needed
            src = src_orig;
        }
        
        // Create temporary texture to store source data
        GLuint id;
        GLenum target = GL_TEXTURE_2D;
        GLsizei width = GLESPixelUtil::optionalPO2(src.getWidth());
        GLsizei height = GLESPixelUtil::optionalPO2(src.getHeight());
        GLenum format = GLESPixelUtil::getClosestGLInternalFormat(src.format);
        GLenum datatype = GLESPixelUtil::getGLOriginDataType(src.format);
        // Generate texture name
        glGenTextures(1, &id);
        GL_CHECK_ERROR;
        
        // Set texture type
        glBindTexture(target, id);
        GL_CHECK_ERROR;
        
        // Set automatic mipmap generation; nice for minimisation
        glTexParameteri(target, GL_GENERATE_MIPMAP, GL_TRUE );
        GL_CHECK_ERROR;

        // Allocate texture memory
        glTexImage2D(target, 0, format, width, height, 0, format, datatype, 0);
        GL_CHECK_ERROR;

        // GL texture buffer
        GLESTextureBuffer tex(BLANKSTRING, target, id, width, height, format, src.format,
                              0, 0, (Usage)(TU_AUTOMIPMAP|HBU_STATIC_WRITE_ONLY), false, false, 0);
        
        // Upload data to 0,0,0 in temporary texture
        Image::Box tempTarget(0, 0, 0, src.getWidth(), src.getHeight(), src.getDepth());
        tex.upload(src, tempTarget);
        
        // Blit
        blitFromTexture(&tex, tempTarget, dstBox);
        
        // Delete temp texture
        glDeleteTextures(1, &id);
        GL_CHECK_ERROR;
    }
Ejemplo n.º 12
0
	//---------------------------------------------------------------------    
	Codec::DecodeResult PVRTCCodec::decodeV3(DataStreamPtr& stream) const
	{
		PVRTCTexHeaderV3 header;
        PVRTCMetadata metadata;
        uint32 flags = 0;
        size_t numFaces = 1; // Assume one face until we know otherwise

        ImageData *imgData = OGRE_NEW ImageData();
		MemoryDataStreamPtr output;

        // Read the PVRTC header
        stream->read(&header, sizeof(PVRTCTexHeaderV3));

        // Read the PVRTC metadata
        if(header.metaDataSize)
        {
            stream->read(&metadata, sizeof(PVRTCMetadata));
        }

        // Identify the pixel format
        switch (header.pixelFormat)
        {
            case kPVRTC1_PF_2BPP_RGB:
                imgData->format = PF_PVRTC_RGB2;
                break;
            case kPVRTC1_PF_2BPP_RGBA:
                imgData->format = PF_PVRTC_RGBA2;
                break;
            case kPVRTC1_PF_4BPP_RGB:
                imgData->format = PF_PVRTC_RGB4;
                break;
            case kPVRTC1_PF_4BPP_RGBA:
                imgData->format = PF_PVRTC_RGBA4;
                break;
            case kPVRTC2_PF_2BPP:
                imgData->format = PF_PVRTC2_2BPP;
                break;
            case kPVRTC2_PF_4BPP:
                imgData->format = PF_PVRTC2_4BPP;
                break;
        }

        // Get format flags
        flags = header.flags;
        flipEndian(reinterpret_cast<void*>(flags), sizeof(uint32));

        imgData->depth = header.depth;
        imgData->width = header.width;
        imgData->height = header.height;
        imgData->num_mipmaps = static_cast<ushort>(header.mipMapCount);

        // PVRTC is a compressed format
        imgData->flags |= IF_COMPRESSED;

        if(header.numFaces == 6)
            imgData->flags |= IF_CUBEMAP;

        if(header.depth > 1)
            imgData->flags |= IF_3D_TEXTURE;

        // Calculate total size from number of mipmaps, faces and size
		imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces, 
                                             imgData->width, imgData->height, imgData->depth, imgData->format);

		// Bind output buffer
		output.bind(OGRE_NEW MemoryDataStream(imgData->size));

		// Now deal with the data
		void *destPtr = output->getPtr();
        
        uint width = imgData->width;
        uint height = imgData->height;
        uint depth = imgData->depth;

        // All mips for a surface, then each face
        for(size_t mip = 0; mip <= imgData->num_mipmaps; ++mip)
		{
            for(size_t surface = 0; surface < header.numSurfaces; ++surface)
            {
                for(size_t i = 0; i < numFaces; ++i)
                {
                    // Load directly
                    size_t pvrSize = PixelUtil::getMemorySize(width, height, depth, imgData->format);
                    stream->read(destPtr, pvrSize);
                    destPtr = static_cast<void*>(static_cast<uchar*>(destPtr) + pvrSize);
                }
            }

            // Next mip
            if(width!=1) width /= 2;
            if(height!=1) height /= 2;
            if(depth!=1) depth /= 2;
		}

        DecodeResult ret;
		ret.first = output;
		ret.second = CodecDataPtr(imgData);

		return ret;
	}
Ejemplo n.º 13
0
	//--------------------------------------------------------------------------
    void Texture::_loadImages( const ConstImagePtrList& images )
    {
		if(images.size() < 1)
			OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Cannot load empty vector of images",
			 "Texture::loadImages");
        
		// Set desired texture size and properties from images[0]
		mSrcWidth = mWidth = images[0]->getWidth();
		mSrcHeight = mHeight = images[0]->getHeight();
		mSrcDepth = mDepth = images[0]->getDepth();

        // Get source image format and adjust if required
        mSrcFormat = images[0]->getFormat();
        if (mTreatLuminanceAsAlpha && mSrcFormat == PF_L8)
        {
            mSrcFormat = PF_A8;
        }

        if (mDesiredFormat != PF_UNKNOWN)
        {
            // If have desired format, use it
            mFormat = mDesiredFormat;
        }
        else
        {
            // Get the format according with desired bit depth
            mFormat = PixelUtil::getFormatForBitDepths(mSrcFormat, mDesiredIntegerBitDepth, mDesiredFloatBitDepth);
        }

		// The custom mipmaps in the image have priority over everything
        size_t imageMips = images[0]->getNumMipmaps();

		if(imageMips > 0) {
			mNumMipmaps = mNumRequestedMipmaps = images[0]->getNumMipmaps();
			// Disable flag for auto mip generation
			mUsage &= ~TU_AUTOMIPMAP;
		}
		
        // Create the texture
        createInternalResources();
		// Check if we're loading one image with multiple faces
		// or a vector of images representing the faces
		size_t faces;
		bool multiImage; // Load from multiple images?
		if(images.size() > 1)
		{
			faces = images.size();
			multiImage = true;
		}
		else
		{
			faces = images[0]->getNumFaces();
			multiImage = false;
		}
		
		// Check wether number of faces in images exceeds number of faces
		// in this texture. If so, clamp it.
		if(faces > getNumFaces())
			faces = getNumFaces();
		
        if (TextureManager::getSingleton().getVerbose()) {
            // Say what we're doing
            StringUtil::StrStreamType str;
            str << "Texture: " << mName << ": Loading " << faces << " faces"
                << "(" << PixelUtil::getFormatName(images[0]->getFormat()) << "," <<
                images[0]->getWidth() << "x" << images[0]->getHeight() << "x" << images[0]->getDepth() <<
                ") with ";
            if (!(mMipmapsHardwareGenerated && mNumMipmaps == 0))
                str << mNumMipmaps;
            if(mUsage & TU_AUTOMIPMAP)
            {
                if (mMipmapsHardwareGenerated)
                    str << " hardware";

                str << " generated mipmaps";
            }
            else
            {
                str << " custom mipmaps";
            }
            if(multiImage)
                str << " from multiple Images.";
            else
                str << " from Image.";
            // Scoped
            {
                // Print data about first destination surface
                HardwarePixelBufferSharedPtr buf = getBuffer(0, 0); 
                str << " Internal format is " << PixelUtil::getFormatName(buf->getFormat()) << 
                "," << buf->getWidth() << "x" << buf->getHeight() << "x" << buf->getDepth() << ".";
            }
            LogManager::getSingleton().logMessage( 
                    LML_NORMAL, str.str());
        }
		
		// Main loading loop
        // imageMips == 0 if the image has no custom mipmaps, otherwise contains the number of custom mips
        for(size_t mip = 0; mip<=imageMips; ++mip)
        {
            for(size_t i = 0; i < faces; ++i)
            {
                PixelBox src;
                if(multiImage)
                {
                    // Load from multiple images
                    src = images[i]->getPixelBox(0, mip);
                }
                else
                {
                    // Load from faces of images[0]
                    src = images[0]->getPixelBox(i, mip);
                }
    
                // Sets to treated format in case is difference
                src.format = mSrcFormat;

                if(mGamma != 1.0f) {
                    // Apply gamma correction
                    // Do not overwrite original image but do gamma correction in temporary buffer
                    MemoryDataStreamPtr buf; // for scoped deletion of conversion buffer
                    buf.bind(OGRE_NEW MemoryDataStream(
                        PixelUtil::getMemorySize(
                            src.getWidth(), src.getHeight(), src.getDepth(), src.format)));
                    
                    PixelBox corrected = PixelBox(src.getWidth(), src.getHeight(), src.getDepth(), src.format, buf->getPtr());
                    PixelUtil::bulkPixelConversion(src, corrected);
                    
                    Image::applyGamma(static_cast<uint8*>(corrected.data), mGamma, corrected.getConsecutiveSize(), 
                        static_cast<uchar>(PixelUtil::getNumElemBits(src.format)));
    
                    // Destination: entire texture. blitFromMemory does the scaling to
                    // a power of two for us when needed
                    getBuffer(i, mip)->blitFromMemory(corrected);
                }
                else 
                {
                    // Destination: entire texture. blitFromMemory does the scaling to
                    // a power of two for us when needed
                    getBuffer(i, mip)->blitFromMemory(src);
                }
                
            }
        }
        // Update size (the final size, not including temp space)
        mSize = getNumFaces() * PixelUtil::getMemorySize(mWidth, mHeight, mDepth, mFormat);

    }
Ejemplo n.º 14
0
    //---------------------------------------------------------------------
    Codec::DecodeResult FreeImageCodec::decode(DataStreamPtr& input) const
    {
		// Set error handler
		FreeImage_SetOutputMessage(FreeImageLoadErrorHandler);

		// Buffer stream into memory (TODO: override IO functions instead?)
		MemoryDataStream memStream(input, true);

		FIMEMORY* fiMem = 
			FreeImage_OpenMemory(memStream.getPtr(), static_cast<DWORD>(memStream.size()));

		FIBITMAP* fiBitmap = FreeImage_LoadFromMemory(
			(FREE_IMAGE_FORMAT)mFreeImageType, fiMem);
		if (!fiBitmap)
		{
			OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, 
				"Error decoding image", 
				"FreeImageCodec::decode");
		}


		ImageData* imgData = OGRE_NEW ImageData();
		MemoryDataStreamPtr output;

		imgData->depth = 1; // only 2D formats handled by this codec
		imgData->width = FreeImage_GetWidth(fiBitmap);
		imgData->height = FreeImage_GetHeight(fiBitmap);
        imgData->num_mipmaps = 0; // no mipmaps in non-DDS 
        imgData->flags = 0;

		// Must derive format first, this may perform conversions
		
		FREE_IMAGE_TYPE imageType = FreeImage_GetImageType(fiBitmap);
		FREE_IMAGE_COLOR_TYPE colourType = FreeImage_GetColorType(fiBitmap);
		unsigned bpp = FreeImage_GetBPP(fiBitmap);

		switch(imageType)
		{
		case FIT_UNKNOWN:
		case FIT_COMPLEX:
		case FIT_UINT32:
		case FIT_INT32:
		case FIT_DOUBLE:
        default:
			OGRE_EXCEPT(Exception::ERR_ITEM_NOT_FOUND, 
				"Unknown or unsupported image format", 
				"FreeImageCodec::decode");
				
			break;
		case FIT_BITMAP:
			// Standard image type
			// Perform any colour conversions for greyscale
			if (colourType == FIC_MINISWHITE || colourType == FIC_MINISBLACK)
			{
				FIBITMAP* newBitmap = FreeImage_ConvertToGreyscale(fiBitmap);
				// free old bitmap and replace
				FreeImage_Unload(fiBitmap);
				fiBitmap = newBitmap;
				// get new formats
				bpp = FreeImage_GetBPP(fiBitmap);
				colourType = FreeImage_GetColorType(fiBitmap);
			}
			// Perform any colour conversions for RGB
			else if (bpp < 8 || colourType == FIC_PALETTE || colourType == FIC_CMYK)
			{
				FIBITMAP* newBitmap =  NULL;	
				if (FreeImage_IsTransparent(fiBitmap))
				{
					// convert to 32 bit to preserve the transparency 
					// (the alpha byte will be 0 if pixel is transparent)
					newBitmap = FreeImage_ConvertTo32Bits(fiBitmap);
				}
				else
				{
					// no transparency - only 3 bytes are needed
					newBitmap = FreeImage_ConvertTo24Bits(fiBitmap);
				}

				// free old bitmap and replace
				FreeImage_Unload(fiBitmap);
				fiBitmap = newBitmap;
				// get new formats
				bpp = FreeImage_GetBPP(fiBitmap);
				colourType = FreeImage_GetColorType(fiBitmap);
			}

			// by this stage, 8-bit is greyscale, 16/24/32 bit are RGB[A]
			switch(bpp)
			{
			case 8:
				imgData->format = PF_L8;
				break;
			case 16:
				// Determine 555 or 565 from green mask
				// cannot be 16-bit greyscale since that's FIT_UINT16
				if(FreeImage_GetGreenMask(fiBitmap) == FI16_565_GREEN_MASK)
				{
					imgData->format = PF_R5G6B5;
				}
				else
				{
					// FreeImage doesn't support 4444 format so must be 1555
					imgData->format = PF_A1R5G5B5;
				}
				break;
			case 24:
				// FreeImage differs per platform
				//     PF_BYTE_BGR[A] for little endian (== PF_ARGB native)
				//     PF_BYTE_RGB[A] for big endian (== PF_RGBA native)
#if FREEIMAGE_COLORORDER == FREEIMAGE_COLORORDER_RGB
				imgData->format = PF_BYTE_RGB;
#else
				imgData->format = PF_BYTE_BGR;
#endif
				break;
			case 32:
#if FREEIMAGE_COLORORDER == FREEIMAGE_COLORORDER_RGB
				imgData->format = PF_BYTE_RGBA;
#else
				imgData->format = PF_BYTE_BGRA;
#endif
				break;
				
				
			};
			break;
		case FIT_UINT16:
		case FIT_INT16:
			// 16-bit greyscale
			imgData->format = PF_L16;
			break;
		case FIT_FLOAT:
			// Single-component floating point data
			imgData->format = PF_FLOAT32_R;
			break;
		case FIT_RGB16:
			imgData->format = PF_SHORT_RGB;
			break;
		case FIT_RGBA16:
			imgData->format = PF_SHORT_RGBA;
			break;
		case FIT_RGBF:
			imgData->format = PF_FLOAT32_RGB;
			break;
		case FIT_RGBAF:
			imgData->format = PF_FLOAT32_RGBA;
			break;
			
			
		};

		unsigned char* srcData = FreeImage_GetBits(fiBitmap);
		unsigned srcPitch = FreeImage_GetPitch(fiBitmap);

		// Final data - invert image and trim pitch at the same time
		size_t dstPitch = imgData->width * PixelUtil::getNumElemBytes(imgData->format);
		imgData->size = dstPitch * imgData->height;
        // Bind output buffer
        output.bind(OGRE_NEW MemoryDataStream(imgData->size));

		uchar* pSrc;
		uchar* pDst = output->getPtr();
		for (size_t y = 0; y < imgData->height; ++y)
		{
			pSrc = srcData + (imgData->height - y - 1) * srcPitch;
			memcpy(pDst, pSrc, dstPitch);
			pDst += dstPitch;
		}

		
		FreeImage_Unload(fiBitmap);
		FreeImage_CloseMemory(fiMem);


        DecodeResult ret;
        ret.first = output;
        ret.second = CodecDataPtr(imgData);
		return ret;

    }
Ejemplo n.º 15
0
    //---------------------------------------------------------------------
	bool ETCCodec::decodePKM(DataStreamPtr& stream, DecodeResult& result) const
	{
        PKMHeader header;

        // Read the ETC header
        stream->read(&header, sizeof(PKMHeader));

        if (PKM_MAGIC != FOURCC(header.name[0], header.name[1], header.name[2], header.name[3]) ) // "PKM 10"
            return false;

        uint16 width = (header.iWidthMSB << 8) | header.iWidthLSB;
        uint16 height = (header.iHeightMSB << 8) | header.iHeightLSB;
        uint16 paddedWidth = (header.iPaddedWidthMSB << 8) | header.iPaddedWidthLSB;
        uint16 paddedHeight = (header.iPaddedHeightMSB << 8) | header.iPaddedHeightLSB;
        uint16 type = (header.iTextureTypeMSB << 8) | header.iTextureTypeLSB;

        ImageData *imgData = OGRE_NEW ImageData();
        imgData->depth = 1;
        imgData->width = width;
        imgData->height = height;

        // File version 2.0 supports ETC2 in addition to ETC1
        if(header.version[0] == '2' && header.version[1] == '0')
        {
            switch (type) {
                case 0:
                    imgData->format = PF_ETC1_RGB8;
                    break;

                    // GL_COMPRESSED_RGB8_ETC2
                case 1:
                    imgData->format = PF_ETC2_RGB8;
                    break;

                    // GL_COMPRESSED_RGBA8_ETC2_EAC
                case 3:
                    imgData->format = PF_ETC2_RGBA8;
                    break;

                    // GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2
                case 4:
                    imgData->format = PF_ETC2_RGB8A1;
                    break;

                    // Default case is ETC1
                default:
                    imgData->format = PF_ETC1_RGB8;
                    break;
            }
        }
        else
            imgData->format = PF_ETC1_RGB8;

        // ETC has no support for mipmaps - malideveloper.com has a example
        // where the load mipmap levels from different external files
        imgData->num_mipmaps = 0;

        // ETC is a compressed format
        imgData->flags |= IF_COMPRESSED;

        // Calculate total size from number of mipmaps, faces and size
		imgData->size = (paddedWidth * paddedHeight) >> 1;

		// Bind output buffer
		MemoryDataStreamPtr output;
		output.bind(OGRE_NEW MemoryDataStream(imgData->size));

		// Now deal with the data
		void *destPtr = output->getPtr();
        stream->read(destPtr, imgData->size);
        destPtr = static_cast<void*>(static_cast<uchar*>(destPtr));
        
		DecodeResult ret;
		ret.first = output;
		ret.second = CodecDataPtr(imgData);

        return true;
    }
Ejemplo n.º 16
0
    //---------------------------------------------------------------------
	bool ETCCodec::decodeKTX(DataStreamPtr& stream, DecodeResult& result) const
	{
        KTXHeader header;
        // Read the ETC1 header
        stream->read(&header, sizeof(KTXHeader));

		const uint8 KTXFileIdentifier[12] = { 0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A };
		if (memcmp(KTXFileIdentifier, &header.identifier, sizeof(KTXFileIdentifier)) != 0 )
			return false;

		if (header.endianness == KTX_ENDIAN_REF_REV)
			flipEndian(&header.glType, sizeof(uint32), 1);

        ImageData *imgData = OGRE_NEW ImageData();
        imgData->depth = 1;
        imgData->width = header.pixelWidth;
        imgData->height = header.pixelHeight;
		imgData->num_mipmaps = static_cast<ushort>(header.numberOfMipmapLevels - 1);

		switch(header.glInternalFormat)
		{
		case 37492: // GL_COMPRESSED_RGB8_ETC2
            imgData->format = PF_ETC2_RGB8;
			break;
        case 37496:// GL_COMPRESSED_RGBA8_ETC2_EAC
			imgData->format = PF_ETC2_RGBA8;
            break;
		case 37494: // GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2
            imgData->format = PF_ETC2_RGB8A1;
			break;
		case 35986: // ATC_RGB
			imgData->format = PF_ATC_RGB;
			break;
		case 35987: // ATC_RGB_Explicit
			imgData->format = PF_ATC_RGBA_EXPLICIT_ALPHA;
			break;
		case 34798: // ATC_RGB_Interpolated
			imgData->format = PF_ATC_RGBA_INTERPOLATED_ALPHA;
			break;
		case 33777: // DXT 1
	        imgData->format = PF_DXT1;
			break;
		case 33778: // DXT 3
	        imgData->format = PF_DXT3;
			break;
		case 33779: // DXT 5
	        imgData->format = PF_DXT5;
			break;
		default:		
	        imgData->format = PF_ETC1_RGB8;
			break;
		}
		
		imgData->flags = 0;
		if (header.glType == 0 || header.glFormat == 0)
	        imgData->flags |= IF_COMPRESSED;

		size_t numFaces = 1; // Assume one face until we know otherwise
                             // Calculate total size from number of mipmaps, faces and size
		imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces,
                                             imgData->width, imgData->height, imgData->depth, imgData->format);

		stream->skip(header.bytesOfKeyValueData);

		// Bind output buffer
		MemoryDataStreamPtr output;
		output.bind(OGRE_NEW MemoryDataStream(imgData->size));

		// Now deal with the data
		uchar* destPtr = output->getPtr();
		for (uint32 level = 0; level < header.numberOfMipmapLevels; ++level)
		{
			uint32 imageSize = 0;
			stream->read(&imageSize, sizeof(uint32));
			stream->read(destPtr, imageSize);
			destPtr += imageSize;
		}

		result.first = output;
		result.second = CodecDataPtr(imgData);
        
		return true;
	}
Ejemplo n.º 17
0
Ogre::Codec::DecodeResult MTGACodec::decode(Ogre::DataStreamPtr& stream) const
{
	// Read 4 character code
	mtga_header_s hdr;
	//uint32 fileType;
	stream->read(&hdr, sizeof(hdr));
	if (LodResource_Bitmap != hdr.magic)
	{
		OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, 
			"This is not a MTGA file!", "MTGACodec::decode");
	}
	
	mtga_pal_s pal[256];
	assert(stream->size() == sizeof(hdr)+hdr.uncompressedSize + sizeof(pal));

	stream->seek(sizeof(mtga_header_s)+hdr.uncompressedSize);
	stream->read(pal,sizeof(pal));

	bool isTransparent = false;
	mtga_pal_s& clr = pal[0];
	if( (clr.r == 0 && clr.g >250 && clr.b > 250) || (clr.r > 250 && clr.g ==0 && clr.b > 250))
		isTransparent = true;


	Ogre::ImageCodec::ImageData* imgData = OGRE_NEW Ogre::ImageCodec::ImageData();
	imgData->format = PF_BYTE_BGRA;
	imgData->width = hdr.width;
	imgData->height = hdr.height;
	imgData->num_mipmaps = 3;
	imgData->depth = 1;

	imgData->size = Image::calculateSize(imgData->num_mipmaps, 1, 
			imgData->width, imgData->height, imgData->depth, imgData->format);
	
	Ogre::MemoryDataStreamPtr pixelData;
	
	pixelData.bind(OGRE_NEW MemoryDataStream(imgData->size));

	// Now deal with the data
	unsigned char* destPtr = pixelData->getPtr();
	stream->seek(sizeof(mtga_header_s));
	size_t width = hdr.width;
	size_t height = hdr.height;
	for(size_t mip = 0; mip <= imgData->num_mipmaps; ++mip)
	{
			for (size_t y = 0; y < height; y ++)
			{
				for (size_t x = 0; x < width; x ++)
				{
					unsigned char idx;
					stream->read(&idx,1);
					mtga_pal_s& clr = pal[idx];
					assert(destPtr-pixelData->getPtr() < imgData->size);
					
					*destPtr++ = clr.b;
					*destPtr++ = clr.g;
					*destPtr++ = clr.r;
					*destPtr++ = (idx == 0 && isTransparent)?0:255;
				}
			}
			width /=2;
			height /=2;

	}
	


	DecodeResult ret;
	ret.first = pixelData;
	ret.second = CodecDataPtr(imgData);
	return ret;
}
Ejemplo n.º 18
0
 //---------------------------------------------------------------------
 GpuProgramManager::Microcode GpuProgramManager::createMicrocode( const uint32 size ) const
 {   
     return Microcode(OGRE_NEW MemoryDataStream(size));  
 }
Ejemplo n.º 19
0
    //-----------------------------------------------------------------------
    void Image::scale(const PixelBox &src, const PixelBox &scaled, Filter filter) 
    {
        assert(PixelUtil::isAccessible(src.format));
        assert(PixelUtil::isAccessible(scaled.format));
        MemoryDataStreamPtr buf; // For auto-delete
        PixelBox temp;
        switch (filter) 
        {
        default:
        case FILTER_NEAREST:
            if(src.format == scaled.format) 
            {
                // No intermediate buffer needed
                temp = scaled;
            }
            else
            {
                // Allocate temporary buffer of destination size in source format 
                temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format);
                buf.bind(OGRE_NEW MemoryDataStream(temp.getConsecutiveSize()));
                temp.data = buf->getPtr();
            }
            // super-optimized: no conversion
            switch (PixelUtil::getNumElemBytes(src.format)) 
            {
            case 1: NearestResampler<1>::scale(src, temp); break;
            case 2: NearestResampler<2>::scale(src, temp); break;
            case 3: NearestResampler<3>::scale(src, temp); break;
            case 4: NearestResampler<4>::scale(src, temp); break;
            case 6: NearestResampler<6>::scale(src, temp); break;
            case 8: NearestResampler<8>::scale(src, temp); break;
            case 12: NearestResampler<12>::scale(src, temp); break;
            case 16: NearestResampler<16>::scale(src, temp); break;
            default:
                // never reached
                assert(false);
            }
            if(temp.data != scaled.data)
            {
                // Blit temp buffer
                PixelUtil::bulkPixelConversion(temp, scaled);
            }
            break;

        case FILTER_LINEAR:
        case FILTER_BILINEAR:
            switch (src.format) 
            {
            case PF_L8: case PF_A8: case PF_BYTE_LA:
            case PF_R8G8B8: case PF_B8G8R8:
            case PF_R8G8B8A8: case PF_B8G8R8A8:
            case PF_A8B8G8R8: case PF_A8R8G8B8:
            case PF_X8B8G8R8: case PF_X8R8G8B8:
                if(src.format == scaled.format) 
                {
                    // No intermediate buffer needed
                    temp = scaled;
                }
                else
                {
                    // Allocate temp buffer of destination size in source format 
                    temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format);
                    buf.bind(OGRE_NEW MemoryDataStream(temp.getConsecutiveSize()));
                    temp.data = buf->getPtr();
                }
                // super-optimized: byte-oriented math, no conversion
                switch (PixelUtil::getNumElemBytes(src.format)) 
                {
                case 1: LinearResampler_Byte<1>::scale(src, temp); break;
                case 2: LinearResampler_Byte<2>::scale(src, temp); break;
                case 3: LinearResampler_Byte<3>::scale(src, temp); break;
                case 4: LinearResampler_Byte<4>::scale(src, temp); break;
                default:
                    // never reached
                    assert(false);
                }
                if(temp.data != scaled.data)
                {
                    // Blit temp buffer
                    PixelUtil::bulkPixelConversion(temp, scaled);
                }
                break;
            case PF_FLOAT32_RGB:
            case PF_FLOAT32_RGBA:
                if (scaled.format == PF_FLOAT32_RGB || scaled.format == PF_FLOAT32_RGBA)
                {
                    // float32 to float32, avoid unpack/repack overhead
                    LinearResampler_Float32::scale(src, scaled);
                    break;
                }
                // else, fall through
            default:
                // non-optimized: floating-point math, performs conversion but always works
                LinearResampler::scale(src, scaled);
            }
            break;
        }
    }
Ejemplo n.º 20
0
    //-----------------------------------------------------------------------------  
    // blitFromMemory doing hardware trilinear scaling
    void GLES2TextureBuffer::blitFromMemory(const PixelBox &src_orig, const Image::Box &dstBox)
    {
        // Fall back to normal GLHardwarePixelBuffer::blitFromMemory in case 
        // - FBO is not supported
        // - Either source or target is luminance due doesn't looks like supported by hardware
        // - the source dimensions match the destination ones, in which case no scaling is needed
        // TODO: Check that extension is NOT available
        if(PixelUtil::isLuminance(src_orig.format) ||
           PixelUtil::isLuminance(mFormat) ||
           (src_orig.getWidth() == dstBox.getWidth() &&
            src_orig.getHeight() == dstBox.getHeight() &&
            src_orig.getDepth() == dstBox.getDepth()))
        {
            GLES2HardwarePixelBuffer::blitFromMemory(src_orig, dstBox);
            return;
        }
        if(!mBuffer.contains(dstBox))
            OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Destination box out of range",
                        "GLES2TextureBuffer::blitFromMemory");
        // For scoped deletion of conversion buffer
        MemoryDataStreamPtr buf;
        PixelBox src;
        
        // First, convert the srcbox to a OpenGL compatible pixel format
        if(GLES2PixelUtil::getGLOriginFormat(src_orig.format) == 0)
        {
            // Convert to buffer internal format
            buf.bind(OGRE_NEW MemoryDataStream(PixelUtil::getMemorySize(src_orig.getWidth(), src_orig.getHeight(),
                                                                        src_orig.getDepth(), mFormat)));
            src = PixelBox(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(), mFormat, buf->getPtr());
            PixelUtil::bulkPixelConversion(src_orig, src);
        }
        else
        {
            // No conversion needed
            src = src_orig;
        }
        
        // Create temporary texture to store source data
        GLuint id;
        GLenum target =
#if OGRE_NO_GLES3_SUPPORT == 0
        (src.getDepth() != 1) ? GL_TEXTURE_3D :
#endif
            GL_TEXTURE_2D;

        GLsizei width = GLES2PixelUtil::optionalPO2(src.getWidth());
        GLsizei height = GLES2PixelUtil::optionalPO2(src.getHeight());
        GLenum format = GLES2PixelUtil::getClosestGLInternalFormat(src.format);
        GLenum datatype = GLES2PixelUtil::getGLOriginDataType(src.format);

        // Generate texture name
        OGRE_CHECK_GL_ERROR(glGenTextures(1, &id));
        
        // Set texture type
        OGRE_CHECK_GL_ERROR(glBindTexture(target, id));

#if GL_APPLE_texture_max_level && OGRE_PLATFORM != OGRE_PLATFORM_NACL
        OGRE_CHECK_GL_ERROR(glTexParameteri(target, GL_TEXTURE_MAX_LEVEL_APPLE, 1000 ));
#elif OGRE_NO_GLES3_SUPPORT == 0
        OGRE_CHECK_GL_ERROR(glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, 1000 ));
#endif

        // Allocate texture memory
#if OGRE_NO_GLES3_SUPPORT == 0
        if(target == GL_TEXTURE_3D || target == GL_TEXTURE_2D_ARRAY)
            glTexImage3D(target, 0, src.format, src.getWidth(), src.getHeight(), src.getDepth(), 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
        else
#endif
            OGRE_CHECK_GL_ERROR(glTexImage2D(target, 0, format, width, height, 0, format, datatype, 0));

        // GL texture buffer
        GLES2TextureBuffer tex(StringUtil::BLANK, target, id, width, height, format, src.format,
                              0, 0, (Usage)(TU_AUTOMIPMAP|HBU_STATIC_WRITE_ONLY), false, false, 0);
        
        // Upload data to 0,0,0 in temporary texture
        Image::Box tempTarget(0, 0, 0, src.getWidth(), src.getHeight(), src.getDepth());
        tex.upload(src, tempTarget);
        
        // Blit
        blitFromTexture(&tex, tempTarget, dstBox);
        
        // Delete temp texture
        OGRE_CHECK_GL_ERROR(glDeleteTextures(1, &id));
    }