Ejemplo n.º 1
0
void Renderer2dApp::draw()
{
	// Render using CoreGraphics on the mac
#if defined( CINDER_COCOA )
	CGContextRef context = cocoa::getWindowContext();
	CGColorSpaceRef baseSpace = CGColorSpaceCreateDeviceRGB();
	CGFloat colors[8] = { 0, 0, 0, 1, 0.866, 0.866, 0.866, 1 };
	CGGradientRef gradient = CGGradientCreateWithColorComponents( baseSpace, colors, NULL, 2 );
	::CGColorSpaceRelease( baseSpace ), baseSpace = NULL;
	::CGContextDrawLinearGradient( context, gradient, CGPointMake( 0, 0 ), CGPointMake( 0, getWindowHeight() ), 0 );
	::CGGradientRelease(gradient), gradient = NULL;

	// CoreGraphics is "upside down" by default; setup CTM to flip and center it
	ivec2 imgSize( ::CGImageGetWidth( mImage ), ::CGImageGetHeight( mImage ) );
	ivec2 centerMargin( ( getWindowWidth() - imgSize.x ) / 2, ( getWindowHeight() - imgSize.y ) / 2 );	
	::CGContextTranslateCTM( context, centerMargin.x, imgSize.y + centerMargin.y );
	::CGContextScaleCTM( context, 1.0, -1.0 );
	::CGContextDrawImage( context, CGRectMake( 0, 0, imgSize.x, imgSize.y ), mImage );
#elif defined( CINDER_MSW ) // Render using GDI+ on Windows
	Gdiplus::Graphics graphics( getWindow()->getDc() );
	Gdiplus::LinearGradientBrush brush( Gdiplus::Rect( 0, 0, getWindowWidth(), getWindowHeight() ),
		Gdiplus::Color( 0, 0, 0 ), Gdiplus::Color( 220, 220, 220 ), Gdiplus::LinearGradientModeVertical );
	graphics.FillRectangle( &brush, 0, 0, getWindowWidth(), getWindowHeight() ); 
	graphics.DrawImage( mImage, ( getWindowWidth() - mImageSurface.getWidth() ) / 2, ( getWindowHeight() - mImageSurface.getHeight() ) / 2,
		mImageSurface.getWidth(), mImageSurface.getHeight() );
#endif
}
Ejemplo n.º 2
0
Texture::Texture( const Surface8u &surface, Format format )
	: mObj( shared_ptr<Obj>( new Obj( surface.getWidth(), surface.getHeight() ) ) )
{
	if( format.mInternalFormat < 0 )
		format.mInternalFormat = surface.hasAlpha() ? GL_RGBA : GL_RGB;
	mObj->mInternalFormat = format.mInternalFormat;
	mObj->mTarget = format.mTarget;

	GLint dataFormat;
	GLenum type;
	SurfaceChannelOrderToDataFormatAndType( surface.getChannelOrder(), &dataFormat, &type );

	init( surface.getData(), surface.getRowBytes() / surface.getChannelOrder().getPixelInc(), dataFormat, type, format );	
}
void ocvPerspectiveApp::updateImage()
{
	cv::Mat input( toOcv( mInputImage ) ), output;

	cv::Point2f src[4];
	src[0] = cv::Point2f( 0, 0 );
	src[1] = cv::Point2f( mInputImage.getWidth(), 0 );
	src[2] = cv::Point2f( mInputImage.getWidth(), mInputImage.getHeight() );
	src[3] = cv::Point2f( 0, mInputImage.getHeight() );
	
	cv::Point2f dst[4];
	for( int i = 0; i < 4; ++i )
		dst[i] = toOcv( mPoints[i] );
	
	cv::Mat warpMatrix = cv::getPerspectiveTransform( src, dst );
	cv::warpPerspective( input, output, warpMatrix, toOcv( getWindowSize() ), cv::INTER_CUBIC );

	mTexture = gl::Texture( fromOcv( output ) );
}
Ejemplo n.º 4
0
	void Kinect::pixelToVideoSurface( Surface8u &surface, uint8_t *buffer )
	{
		if ( mNewVideoFrame ) {
			return;
		}

		int32_t height = surface.getHeight();
		int32_t width = surface.getWidth();
		int32_t size = width * height * 4;

		// Swap red/blue channels
		for ( int32_t i = 0; i < size; i += 4 ) {
			uint8_t b = buffer[ i ];
			buffer[ i ] = buffer[ i + 2 ];
			buffer[ i + 2 ] = b;
		}

		memcpy( surface.getData(), buffer, size );
		mNewVideoFrame = true;
	}
Ejemplo n.º 5
0
void cApp::setup(){
    
    mPln.setSeed( 345 );
    mPln.setOctaves( 4 );
    
    openDir();
    
    fs::path path = dir/("f_00000.png");
    sur = Surface8u( loadImage( path) );
    int w = sur.getWidth();
    int h = sur.getHeight();
    
    pcam = CameraPersp(w, h, 50, 1, 10000);
    camUi.setCamera( &pcam );
    mExp.setup( w, h, 0, 3000-1, GL_RGB, mt::getRenderPath(), 0 );
    setWindowSize( w*0.5, h*0.5 );
    setWindowPos(0, 0);
    
    
#ifdef RENDER
    mExp.startRender();
#endif
}
Ejemplo n.º 6
0
void Fluid2DCamAppApp::update()
{
	
	if( mCapture && mCapture.checkNewFrame() ) {
		if( ! mTexCam ) {
			mTexCam = gl::Texture( mCapture.getSurface() );
		}

		// Flip the image
		if( ! mFlipped ) {
			Surface8u srcImg = mCapture.getSurface();
			mFlipped = Surface8u( srcImg.getWidth(), srcImg.getHeight(), srcImg.hasAlpha(), srcImg.getChannelOrder() );
		}
		Surface8u srcImg = mCapture.getSurface();
		mFlipped = Surface8u( srcImg.getWidth(), srcImg.getHeight(), srcImg.hasAlpha(), srcImg.getChannelOrder() );
		for( int y = 0; y < mCapture.getHeight(); ++y ) {
			const Color8u* src = (const Color8u*)(srcImg.getData() + (y + 1)*srcImg.getRowBytes() - srcImg.getPixelInc());
			Color8u* dst = (Color8u*)(mFlipped.getData() + y*mFlipped.getRowBytes());
			for( int x = 0; x < mCapture.getWidth(); ++x ) {
				*dst = *src;
				++dst;
				--src;
			} 
		}
		
		// Create scaled image
		if( ! mCurScaled  ) {
			mCurScaled = Surface8u( mFlipped.getWidth()/kFlowScale, mFlipped.getHeight()/kFlowScale, mFlipped.hasAlpha(), mFlipped.getChannelOrder() );
		}		
		ip::resize( mFlipped, &mCurScaled );

		// Optical flow 
		if( mCurScaled && mPrvScaled ) {
			mPrvCvData = mCurCvData;
			mCurCvData = cv::Mat( toOcv( Channel( mCurScaled ) ) );

			if( mPrvCvData.data && mCurCvData.data ) {
				int pyrLvels		= 3;
				int winSize			= 3;
				int iters			= 5;
				int poly_n			= 7;
				double poly_sigma	= 1.5;
				cv::calcOpticalFlowFarneback( mPrvCvData, mCurCvData, mFlow, 0.5, pyrLvels, 2*winSize + 1, iters, poly_n, poly_sigma, cv::OPTFLOW_FARNEBACK_GAUSSIAN );

				if( mFlow.data ) {
					if( mFlowVectors.empty() ) {
						mFlowVectors.resize( mCurScaled.getWidth()*mCurScaled.getHeight() );
					}
					
					//memset( &mFlowVectors[0], 0, mCurScaled.getWidth()*mCurScaled.getHeight()*sizeof( Vec2f ) );
					mNumActiveFlowVectors = 0;
					for( int j = 0; j < mCurScaled.getHeight(); ++j ) {
						for( int i = 0; i < mCurScaled.getWidth(); ++i ) {
							const float* fptr = reinterpret_cast<float*>(mFlow.data + j*mFlow.step + i*sizeof(float)*2);
							//
							Vec2f v = Vec2f( fptr[0], fptr[1] ); 
							if( v.lengthSquared() >= mVelThreshold ) {
								if( mNumActiveFlowVectors >= (int)mFlowVectors.size() ) {
									mFlowVectors.push_back( std::make_pair( Vec2i( i, j ), v ) );
								}
								else {
									mFlowVectors[mNumActiveFlowVectors] = std::make_pair( Vec2i( i, j ), v );
								}
								++mNumActiveFlowVectors;
							}
						}
					}
				}
			}
		}

		// Update texture
		mTexCam.update( mFlipped );

		// Save previous frame
		if( ! mPrvScaled ) {
			mPrvScaled = Surface8u( mCurScaled.getWidth(), mCurScaled.getHeight(), mCurScaled.hasAlpha(), mCurScaled.getChannelOrder() );
		}
		memcpy( mPrvScaled.getData(), mCurScaled.getData(), mCurScaled.getHeight()*mCurScaled.getRowBytes() );
	}

	// Update fluid
	float dx = (mFluid2DResX - 2)/(float)(640/kFlowScale);
	float dy = (mFluid2DResY - 2)/(float)(480/kFlowScale);
	for( int i = 0; i < mNumActiveFlowVectors; ++i ) {
		Vec2f P = mFlowVectors[i].first;
		const Vec2f& v = mFlowVectors[i].second;
		mFluid2D.splatDensity( P.x*dx + 1, P.y*dy + 1, mDenScale*v.lengthSquared() );
		mFluid2D.splatVelocity( P.x*dx + 1, P.y*dy + 1, v*mVelScale );
	}
	mFluid2D.step();

	// Update velocity
	const Vec2f* srcVel0 = mFluid2D.dbgVel0().data();
	const Vec2f* srcVel1 = mFluid2D.dbgVel1().data();
	Colorf* dstVel0 = (Colorf*)mSurfVel0.getData();
	Colorf* dstVel1 = (Colorf*)mSurfVel1.getData();
	for( int j = 0; j < mFluid2DResY; ++j ) {
		for( int i = 0; i < mFluid2DResX; ++i ) {
			*dstVel0 = Colorf( srcVel0->x, srcVel0->y, 0.0f );
			*dstVel1 = Colorf( srcVel1->x, srcVel1->y, 0.0f );
			++srcVel0;
			++srcVel1;
			++dstVel0;
			++dstVel1;
		}
	}
	
	// Update Density
	mChanDen0 = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgDen0().data() );
	mChanDen1 = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgDen1().data() );
	
	mTexDen0.update( mChanDen0 );
	mTexDen1.update( mChanDen1 );
	
	// Update velocity textures
	mTexVel0.update( mSurfVel0 );
	mTexVel1.update( mSurfVel1 );
	
	// Update Divergence
	mChanDiv = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgDivergence().data() );
	mTexDiv.update( mChanDiv );

	// Update Divergence
	mChanPrs = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgPressure().data() );
	mTexPrs.update( mChanPrs );

	// Update Curl, Curl Length
	mChanCurl = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgCurl().data() );
	mTexCurl.update( mChanCurl );
	mChanCurlLen = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgCurlLength().data() );
	mTexCurlLen.update( mChanCurlLen );
}
Ejemplo n.º 7
0
TextureCache::Obj::Obj( const Surface8u &prototypeSurface, const Texture::Format &format )
	: mWidth( prototypeSurface.getWidth() ), mHeight( prototypeSurface.getHeight() ), mFormat( format ), mNextId( 0 )
{
}