Exemplo n.º 1
0
//-----------------------------------------------------------------------------
// Generate spheremap based on the current images (only works for cubemaps)
// The look dir indicates the direction of the center of the sphere
//-----------------------------------------------------------------------------
void CVTFTexture::GenerateSpheremap( LookDir_t lookDir )
{
	if (!IsCubeMap())
		return;

	Assert( m_Format == IMAGE_FORMAT_RGBA8888 );

	// We'll be doing our work in IMAGE_FORMAT_RGBA8888 mode 'cause it's easier
	unsigned char *pCubeMaps[6];

	// Allocate the bits for the spheremap
	int iMemRequired = ImageLoader::GetMemRequired( m_nWidth, m_nHeight, IMAGE_FORMAT_RGBA8888, false );
	unsigned char *pSphereMapBits = (unsigned char *)MemAllocScratch(iMemRequired);

	// Generate a spheremap for each frame of the cubemap
	for (int iFrame = 0; iFrame < m_nFrameCount; ++iFrame)
	{
		// Point to our own textures (highest mip level)
		for (int iFace = 0; iFace < 6; ++iFace)
		{
			pCubeMaps[iFace] = ImageData( iFrame, iFace, 0 );
		}

		// Compute the spheremap of the top LOD
		ComputeSpheremapFrame( pCubeMaps, pSphereMapBits, lookDir );

		// Compute the mip levels of the spheremap, converting from RGBA8888 to our format
		unsigned char *pFinalSphereMapBits = ImageData( iFrame, CUBEMAP_FACE_SPHEREMAP, 0 );
		ImageLoader::GenerateMipmapLevels( pSphereMapBits, pFinalSphereMapBits, 
			m_nWidth, m_nHeight, m_Format, 2.2, 2.2 );
	}

	// Free memory
	MemFreeScratch();
}
Exemplo n.º 2
0
    ImageData NativeTextureGLES::lock(lock_flags flags, const Rect* src)
    {
        assert(_lockFlags == 0);


        _lockFlags = flags;
        Rect r(0, 0, _width, _height);

        if (src)
            r = *src;

        OX_ASSERT(r.getX() + r.getWidth() <= _width);
        OX_ASSERT(r.getY() + r.getHeight() <= _height);

        _lockRect = r;

        assert(_lockFlags != 0);

        if (_lockRect.isEmpty())
        {
            OX_ASSERT(!"_lockRect.IsEmpty()");
            return ImageData();
        }

        if (_data.empty())
        {
            //_data.resize(_width)
        }

        ImageData im =  ImageData(_width, _height, (int)(_data.size() / _height), _format, &_data.front());
        return im.getRect(_lockRect);
    }
Exemplo n.º 3
0
//-----------------------------------------------------------------------------
// Generates mipmaps from the base mip levels
//-----------------------------------------------------------------------------
void CVTFTexture::GenerateMipmaps()
{
	Assert( m_Format == IMAGE_FORMAT_RGBA8888 );

	// FIXME: Should we be doing anything special for normalmaps
	// other than a final normalization pass?
	// FIXME: I don't think that we ever normalize the top mip level!!!!!!
	bool bNormalMap = ( Flags() & ( TEXTUREFLAGS_NORMAL | TEXTUREFLAGS_NORMALTODUDV ) ) != 0;
	for (int iMipLevel = 1; iMipLevel < m_nMipCount; ++iMipLevel)
	{
		int nMipWidth, nMipHeight;
		ComputeMipLevelDimensions( iMipLevel, &nMipWidth, &nMipHeight );
		float mipColorScale = 1.0f;
		if( Flags() & TEXTUREFLAGS_PREMULTCOLORBYONEOVERMIPLEVEL )
		{
			mipColorScale = 1.0f / ( float )( 1 << iMipLevel );
		}

		for (int iFrame = 0; iFrame < m_nFrameCount; ++iFrame)
		{
			for (int iFace = 0; iFace < m_nFaceCount; ++iFace)
			{
				unsigned char *pSrcLevel = ImageData( iFrame, iFace, 0 );
				unsigned char *pDstLevel = ImageData( iFrame, iFace, iMipLevel );
				ImageLoader::ResampleRGBA8888( pSrcLevel, pDstLevel, m_nWidth, m_nHeight,
					nMipWidth, nMipHeight, 2.2, 2.2, mipColorScale, bNormalMap );

				if( m_nFlags & TEXTUREFLAGS_NORMAL )
				{
					ImageLoader::NormalizeNormalMapRGBA8888( pDstLevel, nMipWidth * nMipHeight );
				}
			}
		}
	}
}
Exemplo n.º 4
0
int test_perlin()
{
	std::size_t const Size = 256;

	{
		std::vector<glm::byte> ImageData(Size * Size * 3);
		
		for(std::size_t y = 0; y < Size; ++y)
		for(std::size_t x = 0; x < Size; ++x)
		{
			ImageData[(x + y * Size) * 3 + 0] = glm::byte(glm::perlin(glm::vec2(x / 64.f, y / 64.f)) * 128.f + 127.f);
			ImageData[(x + y * Size) * 3 + 1] = ImageData[(x + y * Size) * 3 + 0];
			ImageData[(x + y * Size) * 3 + 2] = ImageData[(x + y * Size) * 3 + 0];
		}

		gli::texture2D Texture(1);
		Texture[0] = gli::image2D(glm::uvec2(Size), gli::RGB8U);
		memcpy(Texture[0].data(), &ImageData[0], ImageData.size());
		gli::saveDDS9(Texture, "texture_perlin2d_256.dds");
	}

	{
		std::vector<glm::byte> ImageData(Size * Size * 3);
		
		for(std::size_t y = 0; y < Size; ++y)
		for(std::size_t x = 0; x < Size; ++x)
		{
			ImageData[(x + y * Size) * 3 + 0] = glm::byte(glm::perlin(glm::vec3(x / 64.f, y / 64.f, 0.5f)) * 128.f + 127.f);
			ImageData[(x + y * Size) * 3 + 1] = ImageData[(x + y * Size) * 3 + 0];
			ImageData[(x + y * Size) * 3 + 2] = ImageData[(x + y * Size) * 3 + 0];
		}

		gli::texture2D Texture(1);
		Texture[0] = gli::image2D(glm::uvec2(Size), gli::RGB8U);
		memcpy(Texture[0].data(), &ImageData[0], ImageData.size());
		gli::saveDDS9(Texture, "texture_perlin3d_256.dds");
	}
	
	{
		std::vector<glm::byte> ImageData(Size * Size * 3);
		
		for(std::size_t y = 0; y < Size; ++y)
		for(std::size_t x = 0; x < Size; ++x)
		{
			ImageData[(x + y * Size) * 3 + 0] = glm::byte(glm::perlin(glm::vec4(x / 64.f, y / 64.f, 0.5f, 0.5f)) * 128.f + 127.f);
			ImageData[(x + y * Size) * 3 + 1] = ImageData[(x + y * Size) * 3 + 0];
			ImageData[(x + y * Size) * 3 + 2] = ImageData[(x + y * Size) * 3 + 0];
		}

		gli::texture2D Texture(1);
		Texture[0] = gli::image2D(glm::uvec2(Size), gli::RGB8U);
		memcpy(Texture[0].data(), &ImageData[0], ImageData.size());
		gli::saveDDS9(Texture, "texture_perlin4d_256.dds");
	}

	return 0;
}
Exemplo n.º 5
0
/** Calls TWAIN to actually get the image */
bool KSaneWidgetPrivate::GetImage(TW_IMAGEINFO& info)
{
    TW_MEMREF pdata;
    CallTwainProc(&m_AppId, &m_Source, DG_IMAGE, DAT_IMAGENATIVEXFER, MSG_GET, &pdata);

    switch(m_returnCode)
    {
        case TWRC_XFERDONE:
            //qDebug()<< "GetImage:TWRC_XFERDONE";
            ImageData(pdata, info);
            break;

        case TWRC_CANCEL:
            //qDebug()<< "GetImage:TWRC_CANCEL";
            break;

        case TWRC_FAILURE:
            //qDebug()<< "GetImage:TWRC_FAILURE";
            CancelTransfer();
            return false;
            break;
    }

    GlobalFree(pdata);
    return EndTransfer();
}
Exemplo n.º 6
0
void CVTFTexture::PutOneOverMipLevelInAlpha()
{
	Assert( m_Format == IMAGE_FORMAT_RGBA8888 );

	for (int iMipLevel = 0; iMipLevel < m_nMipCount; ++iMipLevel)
	{
		int nMipWidth, nMipHeight;
		ComputeMipLevelDimensions( iMipLevel, &nMipWidth, &nMipHeight );
		int size = nMipWidth * nMipHeight;
		unsigned char ooMipLevel = ( unsigned char )( 255.0f * ( 1.0f / ( float )( 1 << iMipLevel ) ) );

		for (int iFrame = 0; iFrame < m_nFrameCount; ++iFrame)
		{
			for (int iFace = 0; iFace < m_nFaceCount; ++iFace)
			{
				unsigned char *pDstLevel = ImageData( iFrame, iFace, iMipLevel );
				unsigned char *pDst;
				for( pDst = pDstLevel; pDst < pDstLevel + size * 4; pDst += 4 )
				{
					pDst[3] = ooMipLevel;
				}
			}
		}
	}
}
Exemplo n.º 7
0
void SmallImageTest()
{
	CPUTracker *tracker = new CPUTracker(50,50, 16);

	GenerateTestImage(ImageData(tracker->srcImage, tracker->GetWidth(), tracker->GetHeight()), tracker->width/2,tracker->height/2, 9, 0.0f);
	FloatToJPEGFile("smallimg.jpg", tracker->srcImage, tracker->width, tracker->height);

	vector2f com = tracker->ComputeMeanAndCOM(0);
	dbgout(SPrintf("COM: %f,%f\n", com.x, com.y));
	
	vector2f initial(25,25);
	bool boundaryHit = false;
	vector2f xcor = tracker->ComputeXCorInterpolated(initial, 2, 16, boundaryHit);
	dbgout(SPrintf("XCor: %f,%f\n", xcor.x, xcor.y));
	//assert(fabsf(xcor.x-15.0f) < 1e-6 && fabsf(xcor.y-15.0f) < 1e-6);

	int I=4;
	vector2f pos = initial;
	for (int i=0;i<I;i++) {
		bool bhit;
		vector2f np = tracker->ComputeQI(pos, 1, 32, 4, 1, 1, 16, bhit);
		dbgprintf("qi[%d]. New=%.4f, %.4f;\tOld=%.4f, %.4f\n", i, np.x, np.y, pos.x, pos.y);
	}


	FloatToJPEGFile("debugimg.jpg", tracker->GetDebugImage(), tracker->width, tracker->height);
	delete tracker;
}
Exemplo n.º 8
0
//-----------------------------------------------------------------------------
// Computes the reflectivity
//-----------------------------------------------------------------------------
void CVTFTexture::ComputeReflectivity( )
{
	Assert( m_Format == IMAGE_FORMAT_RGBA8888 );

	int divisor = 0;
	m_vecReflectivity.Init( 0.0f, 0.0f, 0.0f );
	for( int iFrame = 0; iFrame < m_nFrameCount; ++iFrame )
	{
		for( int iFace = 0; iFace < m_nFaceCount; ++iFace )
		{
			Vector vecFaceReflect;
			unsigned char* pSrc = ImageData( iFrame, iFace, 0 );
			int nNumPixels = m_nWidth * m_nHeight;

			VectorClear( vecFaceReflect );
			for (int i = 0; i < nNumPixels; ++i, pSrc += 4 )
			{
				vecFaceReflect[0] += TextureToLinear( pSrc[0] );
				vecFaceReflect[1] += TextureToLinear( pSrc[1] );
				vecFaceReflect[2] += TextureToLinear( pSrc[2] );
			}	

			vecFaceReflect /= nNumPixels;

			m_vecReflectivity += vecFaceReflect;
			++divisor;
		}
	}
	m_vecReflectivity /= divisor;
}
Exemplo n.º 9
0
void TestBoundCheck()
{
	CPUTracker *tracker = new CPUTracker(32,32, 16);
	bool boundaryHit;

	for (int i=0;i<10;i++) {
		float xp = tracker->GetWidth()/2+(rand_uniform<float>() - 0.5) * 20;
		float yp = tracker->GetHeight()/2+(rand_uniform<float>() - 0.5) * 20;
		
		GenerateTestImage(ImageData(tracker->srcImage, tracker->GetWidth(), tracker->GetHeight()), xp, yp, 1, 0.0f);

		vector2f com = tracker->ComputeMeanAndCOM();
		dbgout(SPrintf("COM: %f,%f\n", com.x-xp, com.y-yp));
	
		vector2f initial = com;
		boundaryHit=false;
		vector2f xcor = tracker->ComputeXCorInterpolated(initial, 3, 16, boundaryHit);
		dbgprintf("XCor: %f,%f. Err: %d\n", xcor.x-xp, xcor.y-yp, boundaryHit);

		boundaryHit=false;
		vector2f qi = tracker->ComputeQI(initial, 3, 64, 32, ANGSTEPF, 1, 10, boundaryHit);
		dbgprintf("QI: %f,%f. Err: %d\n", qi.x-xp, qi.y-yp, boundaryHit);
	}

	delete tracker;
}
Exemplo n.º 10
0
ImageData Context2D::getImageData(qreal sx, qreal sy, qreal sw, qreal sh)
{
    Q_UNUSED(sx);
    Q_UNUSED(sy);
    Q_UNUSED(sw);
    Q_UNUSED(sh);
    return ImageData();
}
Exemplo n.º 11
0
    //---------------------------------------------------------------------    
	Codec::DecodeResult PVRTCCodec::decodeV2(DataStreamPtr& stream) const
	{
		PVRTCTexHeaderV2 header;
        uint32 flags = 0, formatFlags = 0;
        size_t numFaces = 1; // Assume one face until we know otherwise

        ImageData *imgData = OGRE_NEW ImageData();
		MemoryDataStreamPtr output;

        // Read the PVRTC header
        stream->read(&header, sizeof(PVRTCTexHeaderV2));

        // Get format flags
        flags = header.flags;
        flipEndian(reinterpret_cast<void*>(flags), sizeof(uint32));
        formatFlags = flags & PVR_TEXTURE_FLAG_TYPE_MASK;

        uint32 bitmaskAlpha = header.bitmaskAlpha;
        flipEndian(reinterpret_cast<void*>(bitmaskAlpha), sizeof(uint32));

        if (formatFlags == kPVRTextureFlagTypePVRTC_4 || formatFlags == kPVRTextureFlagTypePVRTC_2)
        {
            if (formatFlags == kPVRTextureFlagTypePVRTC_4)
            {
                imgData->format = bitmaskAlpha ? PF_PVRTC_RGBA4 : PF_PVRTC_RGB4;
            }
            else if (formatFlags == kPVRTextureFlagTypePVRTC_2)
            {
                imgData->format = bitmaskAlpha ? PF_PVRTC_RGBA2 : PF_PVRTC_RGB2;
            }

            imgData->depth = 1;
            imgData->width = header.width;
            imgData->height = header.height;
            imgData->num_mipmaps = static_cast<ushort>(header.numMipmaps);

            // PVRTC is a compressed format
            imgData->flags |= IF_COMPRESSED;
        }

        // Calculate total size from number of mipmaps, faces and size
		imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces, 
                                             imgData->width, imgData->height, imgData->depth, imgData->format);

		// Bind output buffer
		output.bind(OGRE_NEW MemoryDataStream(imgData->size));

		// Now deal with the data
		void *destPtr = output->getPtr();
        stream->read(destPtr, imgData->size);
        destPtr = static_cast<void*>(static_cast<uchar*>(destPtr));

		DecodeResult ret;
		ret.first = output;
		ret.second = CodecDataPtr(imgData);

		return ret;
	}
Exemplo n.º 12
0
	ImageData LoadImage(const std::string& filename) {
		std::string ext = utils::getFileExtension(filename);
		if (ext == "tga") {
			//targa file
			auto tgaData = Targa::LoadTarga(filename);
			return ImageData(tgaData);
		}
		throw utils::not_supported_error("the extension: " + ext + " is not supported");
	}
Exemplo n.º 13
0
    //---------------------------------------------------------------------
    Codec::DecodeResult STBIImageCodec::decode(DataStreamPtr& input) const
    {
        // Buffer stream into memory (TODO: override IO functions instead?)
        MemoryDataStream memStream(input, true);

        int width, height, components;
        stbi_uc* pixelData = stbi_load_from_memory(memStream.getPtr(),
                static_cast<int>(memStream.size()), &width, &height, &components, 0);

        if (!pixelData)
        {
            OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, 
                "Error decoding image: " + String(stbi_failure_reason()),
                "STBIImageCodec::decode");
        }

        SharedPtr<ImageData> imgData(OGRE_NEW ImageData());
        MemoryDataStreamPtr output;

        imgData->depth = 1; // only 2D formats handled by this codec
        imgData->width = width;
        imgData->height = height;
        imgData->num_mipmaps = 0; // no mipmaps in non-DDS 
        imgData->flags = 0;

        switch( components )
        {
            case 1:
                imgData->format = PF_BYTE_L;
                break;
            case 2:
                imgData->format = PF_BYTE_LA;
                break;
            case 3:
                imgData->format = PF_BYTE_RGB;
                break;
            case 4:
                imgData->format = PF_BYTE_RGBA;
                break;
            default:
                stbi_image_free(pixelData);
                OGRE_EXCEPT(Exception::ERR_ITEM_NOT_FOUND,
                            "Unknown or unsupported image format",
                            "STBIImageCodec::decode");
                break;
        }
        
        size_t dstPitch = imgData->width * PixelUtil::getNumElemBytes(imgData->format);
        imgData->size = dstPitch * imgData->height;
        output.bind(OGRE_NEW MemoryDataStream(pixelData, imgData->size, true));
        
        DecodeResult ret;
        ret.first = output;
        ret.second = imgData;
        return ret;
    }
Exemplo n.º 14
0
//-----------------------------------------------------------------------------
// Unserialization of image data
//-----------------------------------------------------------------------------
bool CVTFTexture::LoadImageData( CUtlBuffer &buf, const VTFFileHeader_t &header, int nSkipMipLevels )
{
	// Fix up the mip count + size based on how many mip levels we skip...
	if (nSkipMipLevels > 0)
	{
		Assert( m_nMipCount > nSkipMipLevels );
		if (header.numMipLevels < nSkipMipLevels)
		{
			// NOTE: This can only happen with older format .vtf files
			Warning("Warning! Encountered old format VTF file; please rebuild it!\n");
			return false;
		}

		ComputeMipLevelDimensions( nSkipMipLevels, &m_nWidth, &m_nHeight );
		m_nMipCount -= nSkipMipLevels;
	}

	// read the texture image (including mipmaps if they are there and needed.)
	int iImageSize = ComputeFaceSize( );
	iImageSize *= m_nFaceCount * m_nFrameCount;

	// For backwards compatibility, we don't read in the spheremap fallback on
	// older format .VTF files...
	int nFacesToRead = m_nFaceCount;
	if (IsCubeMap())
	{
		if ((header.version[0] == 7) && (header.version[1] < 1))
			nFacesToRead = 6;
	}

	// NOTE: We load the bits this way because we store the bits in memory
	// differently that the way they are stored on disk; we store on disk
	// differently so we can only load up 
	// NOTE: The smallest mip levels are stored first!!
	AllocateImageData( iImageSize );
	for (int iMip = m_nMipCount; --iMip >= 0; )
	{
		// NOTE: This is for older versions...
		if (header.numMipLevels - nSkipMipLevels <= iMip)
			continue;

		int iMipSize = ComputeMipSize( iMip );

		for (int iFrame = 0; iFrame < m_nFrameCount; ++iFrame)
		{
			for (int iFace = 0; iFace < nFacesToRead; ++iFace)
			{
				unsigned char *pMipBits = ImageData( iFrame, iFace, iMip );
				buf.Get( pMipBits, iMipSize );
			}
		}
	}

	return buf.IsValid();
}
Exemplo n.º 15
0
Face Face::fromFace(const libface::Face& f, ImageOwnershipMode mode)
{
    Image image;
    switch (mode)
    {
        case ShallowCopy:
            image = ImageData(f.takeFace());
            break;
        case DeepCopy:
            image = ImageData(cvCloneImage(f.getFace()));
            break;
        case IgnoreData:
            break;
    }

    QRect rect = QRect(QPoint(f.getX1(), f.getY1()), QPoint(f.getX2(), f.getY2()));
    Face face(rect, image);
    face.setId(f.getId());
    return face;
}
Exemplo n.º 16
0
Image* SVGImageCache::lookupOrCreateBitmapImageForRenderer(const RenderObject* renderer)
{
    if (!renderer)
        return Image::nullImage();

    const CachedImageClient* client = renderer;

    // The cache needs to know the size of the renderer before querying an image for it.
    SizeAndScalesMap::iterator sizeIt = m_sizeAndScalesMap.find(renderer);
    if (sizeIt == m_sizeAndScalesMap.end())
        return Image::nullImage();

    IntSize size = sizeIt->second.size;
    float zoom = sizeIt->second.zoom;
    float scale = sizeIt->second.scale;

    // FIXME (85335): This needs to take CSS transform scale into account as well.
    Page* page = renderer->document()->page();
    if (!scale)
        scale = page->deviceScaleFactor() * page->pageScaleFactor();

    ASSERT(!size.isEmpty());

    // Lookup image for client in cache and eventually update it.
    ImageDataMap::iterator it = m_imageDataMap.find(client);
    if (it != m_imageDataMap.end()) {
        ImageData& data = it->second;

        // Common case: image size & zoom remained the same.
        if (data.sizeAndScales.size == size && data.sizeAndScales.zoom == zoom && data.sizeAndScales.scale == scale)
            return data.image.get();

        // If the image size for the client changed, we have to delete the buffer, remove the item from the cache and recreate it.
        delete data.buffer;
        m_imageDataMap.remove(it);
    }

    FloatSize scaledSize(size);
    scaledSize.scale(scale);

    // Create and cache new image and image buffer at requested size.
    OwnPtr<ImageBuffer> newBuffer = ImageBuffer::create(expandedIntSize(scaledSize), 1);
    if (!newBuffer)
        return Image::nullImage();

    m_svgImage->drawSVGToImageBuffer(newBuffer.get(), size, zoom, scale, SVGImage::DontClearImageBuffer);

    RefPtr<Image> newImage = newBuffer->copyImage(CopyBackingStore);
    Image* newImagePtr = newImage.get();
    ASSERT(newImagePtr);

    m_imageDataMap.add(client, ImageData(newBuffer.leakPtr(), newImage.release(), SizeAndScales(size, zoom, scale)));
    return newImagePtr;
}
Exemplo n.º 17
0
//---------------------------------------------------------------------------
void File_Dpx::Data_Parse()
{
    if (!IsDpx) // Is Cineon
    {
        switch (Element_Code)
        {
            case Pos_GenericSection   : GenericSectionHeader_Cineon(); break;
            case Pos_IndustrySpecific : IndustrySpecificHeader_Cineon(); break;
            case Pos_UserDefined      : UserDefinedHeader_Cineon(); break;
            case Pos_Padding          : Padding(); break;
            case Pos_ImageData        : ImageData(); break;
            default                   : ;
        }
    }
    else
    {
        switch (Element_Code)
        {
            case Pos_GenericSection   : GenericSectionHeader_Dpx(); break;
            case Pos_IndustrySpecific : IndustrySpecificHeader_Dpx(); break;
            case Pos_UserDefined      : UserDefinedHeader_Dpx(); break;
            case Pos_Padding          : Padding(); break;
            case Pos_ImageData        : ImageData(); break;
            default                   : ;
        }
    }

    do
        Sizes_Pos++; //We go automaticly to the next block
    while (Sizes_Pos<Sizes.size() && Sizes[Sizes_Pos]==0);
    if (Sizes_Pos>=Sizes.size())
    {
        Sizes.clear();
        Sizes_Pos=0;

        if (!Status[IsFilled])
            Fill();
        if (File_Offset+Buffer_Offset+Element_Size<Config->File_Current_Size)
            GoTo(Config->File_Current_Size);
    }
}
Exemplo n.º 18
0
ImageData ReadJPEGFile(const char*fn)
{
	int w, h;
	uchar* imgdata;
	std::vector<uchar> jpgdata = ReadToByteBuffer(fn);
	ReadJPEGFile(&jpgdata[0], jpgdata.size(), &imgdata, &w,&h);

	float* fbuf = new float[w*h];
	for (int x=0;x<w*h;x++)
		fbuf[x] = imgdata[x]/255.0f;
	delete[] imgdata;

	return ImageData(fbuf,w,h);
}
Exemplo n.º 19
0
//-----------------------------------------------------------------------------
// Returns a pointer to the data associated with a particular frame, face, mip level, and offset
//-----------------------------------------------------------------------------
unsigned char *CVTFTexture::ImageData( int iFrame, int iFace, int iMipLevel, int x, int y )
{
#ifdef _DEBUG
	int nWidth, nHeight;
	ComputeMipLevelDimensions( iMipLevel, &nWidth, &nHeight );
	Assert( (x >= 0) && (x <= nWidth) && (y >= 0) && (y <= nHeight) );
#endif

	int nRowBytes = RowSizeInBytes( iMipLevel );
	int nTexelBytes = ImageLoader::SizeInBytes( m_Format );

	unsigned char *pMipBits = ImageData( iFrame, iFace, iMipLevel );
	pMipBits += y * nRowBytes + x * nTexelBytes;
	return pMipBits;
}
Exemplo n.º 20
0
//-----------------------------------------------------------------------------
// Computes the alpha flags
//-----------------------------------------------------------------------------
void CVTFTexture::ComputeAlphaFlags()
{
	Assert( m_Format == IMAGE_FORMAT_RGBA8888 );

	m_nFlags &= ~(TEXTUREFLAGS_EIGHTBITALPHA | TEXTUREFLAGS_ONEBITALPHA);
	
	if( TEXTUREFLAGS_ONEOVERMIPLEVELINALPHA )
	{
		m_nFlags |= TEXTUREFLAGS_EIGHTBITALPHA;
		return;
	}
	
	for( int iFrame = 0; iFrame < m_nFrameCount; ++iFrame )
	{
		for( int iFace = 0; iFace < m_nFaceCount; ++iFace )
		{
			// If we're all 0 or all 255, assume it's opaque
			bool bHasZero = false;
			bool bHas255 = false;

			unsigned char* pSrcBits = ImageData( iFrame, iFace, 0 );
			int nNumPixels = m_nWidth * m_nHeight;
			while (--nNumPixels >= 0)
			{
				if (pSrcBits[3] == 0)
					bHasZero = true;
				else if (pSrcBits[3] == 255)
					bHas255 = true;
				else
				{
					// Have grey at all? 8 bit alpha baby
					m_nFlags &= ~TEXTUREFLAGS_ONEBITALPHA;
					m_nFlags |= TEXTUREFLAGS_EIGHTBITALPHA;
					return;
				}

				pSrcBits += 4;
			}

			// If we have both 0 at 255, we're at least one-bit alpha
			if (bHasZero && bHas255)
			{
				m_nFlags |= TEXTUREFLAGS_ONEBITALPHA;
			}
		}
	}
}
Exemplo n.º 21
0
ImageDecoderQt::ReadContext::ReadResult
        ImageDecoderQt::ReadContext::read(bool allDataReceived)
{
    // Complete mode: Read only all all data received
    if (m_loadMode == LoadComplete && !allDataReceived)
        return ReadPartial;

    // Attempt to read out all images
    while (true) {
        if (m_target.empty() || m_target.back().m_imageState == ImageComplete) {
            // Start a new image.
            if (!m_reader.canRead())
                return ReadEOF;

            // Attempt to construct an empty image of the matching size and format
            // for efficient reading
            QImage newImage = m_dataFormat != QImage::Format_Invalid  ?
                          QImage(m_size,m_dataFormat) : QImage();
            m_target.push_back(ImageData(newImage));
        }

        // read chunks
        switch (readImageLines(m_target.back())) {
        case IncrementalReadFailed:
            m_target.pop_back();
            return ReadFailed;
        case IncrementalReadPartial:
            return ReadPartial;
        case IncrementalReadComplete:
            m_target.back().m_imageState = ImageComplete;
            //store for next
            m_dataFormat = m_target.back().m_image.format();
            m_size = m_target.back().m_image.size();
            const bool supportsAnimation = m_reader.supportsAnimation();

            if (debugImageDecoderQt)
                qDebug() << "readImage(): #" << m_target.size() << " complete, " << m_size << " format " << m_dataFormat
                <<  " supportsAnimation=" <<  supportsAnimation ;
            // No point in readinfg further
            if (!supportsAnimation)
                return ReadComplete;

            break;
        }
    }
    return ReadComplete;
}
Exemplo n.º 22
0
//-----------------------------------------------------------------------------
// Generate the low-res image bits
//-----------------------------------------------------------------------------
bool CVTFTexture::ConstructLowResImage()
{
	Assert( m_Format == IMAGE_FORMAT_RGBA8888 );
	Assert( m_pLowResImageData );

	CUtlMemory<unsigned char> lowResSizeImage;
	lowResSizeImage.EnsureCapacity( m_nLowResImageWidth * m_nLowResImageHeight * 4 );
	unsigned char *tmpImage = lowResSizeImage.Base();
	if( !ImageLoader::ResampleRGBA8888( ImageData(0, 0, 0), tmpImage, 
		m_nWidth, m_nHeight, m_nLowResImageWidth, m_nLowResImageHeight, 2.2f, 2.2f ) )
	{
		return false;
	}
	
	// convert to the low-res size version with the correct image format
	return ImageLoader::ConvertImageFormat( tmpImage, IMAGE_FORMAT_RGBA8888, 
		m_pLowResImageData, m_LowResImageFormat, m_nLowResImageWidth, m_nLowResImageHeight ); 
}
Exemplo n.º 23
0
//---------------------------------------------------------------------------
void File_Exr::Data_Parse()
{
         if (name_End==0)
        ImageData();
    else if (name=="channels" && type=="chlist")
        channels();
    else if (name=="comments" && type=="string")
        comments();
    else if (name=="compression" && type=="compression" && Element_Size==1)
        compression();
    else if (name=="dataWindow" && type=="box2i" && Element_Size==16)
        dataWindow();
    else if (name=="displayWindow" && type=="box2i" && Element_Size==16)
        displayWindow();
    else if (name=="pixelAspectRatio" && type=="float" && Element_Size==4)
        pixelAspectRatio();
    else
        Skip_XX(Element_Size,                                   "value");
}
Exemplo n.º 24
0
CDLL_EXPORT ROIPosition* QTrkFindBeads(float* image, int w,int h, int smpCornerPosX, int smpCornerPosY, int roi, float imgRelDist, float acceptance)
{
	BeadFinder::Config cfg;
	cfg.img_distance = imgRelDist;
	cfg.roi = roi;
	cfg.similarity = acceptance;
	ImageData img = ImageData(image, w,h);
	ImageData sampleImg = img.subimage(smpCornerPosX, smpCornerPosY, roi,roi);
	auto results = BeadFinder::Find(&img, sampleImg.data, &cfg);
	sampleImg.free();

	ROIPosition *output=new ROIPosition[results.size()];
	for (int i=0;i<results.size();i++)
	{
		output[i].x = results[i].x;
		output[i].y = results[i].y;
	}

	return output;
}
Exemplo n.º 25
0
    void NativeTextureGLES::unlock()
    {
        if (!_lockFlags)
            return;

        if (_lockFlags & lock_write)
        {
            glBindTexture(GL_TEXTURE_2D, (GLuint) _id);
            GLenum er = glGetError();

            ImageData src = ImageData(_width, _height, (int)(_data.size() / _height), _format, &_data.front());
            ImageData locked = src.getRect(_lockRect);

            //glPixelStorei (GL_UNPACK_ALIGNMENT,  1);//byte align
            er = glGetError();

            //todo add EXT_unpack_subimage support

            MemoryTexture mt;
            mt.init(_lockRect.getWidth(), _lockRect.getHeight(), _format);
            ImageData q = mt.lock();
            operations::copy(locked, q);
            mt.unlock();

            glPixel glp = SurfaceFormat2GL(_format);



            glTexSubImage2D(GL_TEXTURE_2D, 0,
                            _lockRect.getX(), _lockRect.getY(), _lockRect.getWidth(), _lockRect.getHeight(),
                            glp.format, glp.type, locked.data);

            er = glGetError();

            _lockFlags = 0;
        }

        CHECKGL();
    }
Exemplo n.º 26
0
//-----------------------------------------------------------------------------
// Fixes the cubemap faces orientation from our standard to what the material system needs
//-----------------------------------------------------------------------------
void CVTFTexture::FixCubemapFaceOrientation( )
{
	if (!IsCubeMap())
		return;

	Assert( m_Format == IMAGE_FORMAT_RGBA8888 );

	for (int iMipLevel = 0; iMipLevel < m_nMipCount; ++iMipLevel)
	{
		int iMipSize, iTemp;
		ComputeMipLevelDimensions( iMipLevel, &iMipSize, &iTemp );
		Assert( iMipSize == iTemp );

		for (int iFrame = 0; iFrame < m_nFrameCount; ++iFrame)
		{
			for (int iFace = 0; iFace < 6; ++iFace)
			{
				FixCubeMapFacing( ImageData( iFrame, iFace, iMipLevel ), iFace, iMipSize, m_Format );
			}
		}
	}
}
Exemplo n.º 27
0
//-----------------------------------------------------------------------------
// Serialization of image data
//-----------------------------------------------------------------------------
bool CVTFTexture::WriteImageData( CUtlBuffer &buf )
{
	// NOTE: We load the bits this way because we store the bits in memory
	// differently that the way they are stored on disk; we store on disk
	// differently so we can only load up 
	// NOTE: The smallest mip levels are stored first!!
	for (int iMip = m_nMipCount; --iMip >= 0; )
	{
		int iMipSize = ComputeMipSize( iMip );

		for (int iFrame = 0; iFrame < m_nFrameCount; ++iFrame)
		{
			for (int iFace = 0; iFace < m_nFaceCount; ++iFace)
			{
				unsigned char *pMipBits = ImageData( iFrame, iFace, iMip );
				buf.Put( pMipBits, iMipSize );
			}
		}
	}

	return buf.IsValid();
}
Exemplo n.º 28
0
void Sculpture::sculpt(const ImageData& imgData, const Cube& bBox) {
    int width = imgData.image.cols;
    int height = imgData.image.rows;
    cameras.push_back(ImageData(imgData));
    boundingBox = bBox;
    for (int i = 0 ; i < SCULPTURE_SIZE ; i++) {
        for (int j = 0 ; j < SCULPTURE_SIZE; j++) {
            for (int k = 0 ; k < SCULPTURE_SIZE; k++) {
                if (!this->getBit(i, j, k)) continue;
                double x = i, y = j, z = k;
                this->getProjection(x, y, z, imgData.P);
                if (x < width && x >= 0 && y < height && y >= 0) {
                    if (pointOffSilhouette(imgData.silhouette,x,y)) {
                        this->unsetBit(i, j, k);
                    }
                } else {
                    this->unsetBit(i, j, k);
                }
            }
        }
    }
}
Exemplo n.º 29
0
void PixelationErrorTest()
{
	CPUTracker *tracker = new CPUTracker(128,128, 64);

	float X = tracker->GetWidth()/2;
	float Y = tracker->GetHeight()/2;
	int N = 20;
	for (int x=0;x<N;x++)  {
		float xpos = X + 2.0f * x / (float)N;
		GenerateTestImage(ImageData(tracker->srcImage, tracker->GetWidth(), tracker->GetHeight()), xpos, X, 1, 0.0f);

		vector2f com = tracker->ComputeMeanAndCOM();
		//dbgout(SPrintf("COM: %f,%f\n", com.x, com.y));

		vector2f initial(X,Y);
		bool boundaryHit = false;
		vector2f xcorInterp = tracker->ComputeXCorInterpolated(initial, 3, 32, boundaryHit);
		vector2f qipos = tracker->ComputeQI(initial, 3, tracker->GetWidth(), 128, 1, 2.0f, tracker->GetWidth()/2-10, boundaryHit);
		dbgprintf("xpos:%f, COM err: %f, XCorInterp err: %f. QI err: %f\n", xpos, com.x-xpos, xcorInterp.x-xpos, qipos.x-xpos);

	}
	delete tracker;
}
Exemplo n.º 30
0
	ImageData::ImageData(const ImageData &b, void *Data)
	{
		*this = ImageData(b.w, b.h, b.pitch, b.format, Data);
	}