Пример #1
0
void FaceApp::draw()
{
	gl::setViewport( getWindowBounds() );
	gl::clear();
	gl::setMatricesWindow( getWindowSize() );

	if ( mSurface ) {	
		gl::color( Colorf::white() );
		gl::enable( GL_TEXTURE_2D );
		gl::TextureRef tex = gl::Texture::create( mSurface );
		gl::draw( tex, tex->getBounds(), Rectf( getWindowBounds() ) );
	
		gl::disable( GL_TEXTURE_2D );
		gl::pushMatrices();
		gl::scale( Vec2f( getWindowSize() ) / Vec2f( mSurface.getSize() ) );
		
		for ( const Kinect2::Face3d& face : mFaces3d ) {
			const TriMesh& mesh = face.getMesh();
			if ( mesh.getNumIndices() > 0 ) {
				vector<Vec2f> verts;
				for ( const Vec3f& i : mesh.getVertices() ) {
					Vec2f v = mDevice->mapCameraToColor( i );
					verts.push_back( v );
				}

				gl::lineWidth( 0.5f );
				gl::enableWireframe();
				TriMesh2d mesh2d;
				mesh2d.appendIndices( &mesh.getIndices()[ 0 ], mesh.getNumIndices() );
				mesh2d.appendVertices( &verts[ 0 ], mesh.getNumVertices() );
				gl::draw( mesh2d );
				gl::disableWireframe();
			}
		}
		
		if ( mEnabledFace3d ) {
			gl::color( Colorf( 1.0f, 0.0f, 0.0f ) );
		} else {
			gl::lineWidth( 2.0f );
		}
		for ( const Kinect2::Face2d& face : mFaces2d ) {
			if ( face.isTracked() ) {
				gl::drawStrokedRect( face.getBoundsColor() );
				for ( const Vec2f& i : face.getPointsColor() ) {
					gl::drawSolidCircle( i, 3.0f, 16 );
				}
			}
		}
		gl::popMatrices();
	}

	mParams->draw();
}
Пример #2
0
void Fluid2DCamAppApp::update()
{
	
	if( mCapture && mCapture.checkNewFrame() ) {
		if( ! mTexCam ) {
			mTexCam = gl::Texture( mCapture.getSurface() );
		}

		// Flip the image
		if( ! mFlipped ) {
			Surface8u srcImg = mCapture.getSurface();
			mFlipped = Surface8u( srcImg.getWidth(), srcImg.getHeight(), srcImg.hasAlpha(), srcImg.getChannelOrder() );
		}
		Surface8u srcImg = mCapture.getSurface();
		mFlipped = Surface8u( srcImg.getWidth(), srcImg.getHeight(), srcImg.hasAlpha(), srcImg.getChannelOrder() );
		for( int y = 0; y < mCapture.getHeight(); ++y ) {
			const Color8u* src = (const Color8u*)(srcImg.getData() + (y + 1)*srcImg.getRowBytes() - srcImg.getPixelInc());
			Color8u* dst = (Color8u*)(mFlipped.getData() + y*mFlipped.getRowBytes());
			for( int x = 0; x < mCapture.getWidth(); ++x ) {
				*dst = *src;
				++dst;
				--src;
			} 
		}
		
		// Create scaled image
		if( ! mCurScaled  ) {
			mCurScaled = Surface8u( mFlipped.getWidth()/kFlowScale, mFlipped.getHeight()/kFlowScale, mFlipped.hasAlpha(), mFlipped.getChannelOrder() );
		}		
		ip::resize( mFlipped, &mCurScaled );

		// Optical flow 
		if( mCurScaled && mPrvScaled ) {
			mPrvCvData = mCurCvData;
			mCurCvData = cv::Mat( toOcv( Channel( mCurScaled ) ) );

			if( mPrvCvData.data && mCurCvData.data ) {
				int pyrLvels		= 3;
				int winSize			= 3;
				int iters			= 5;
				int poly_n			= 7;
				double poly_sigma	= 1.5;
				cv::calcOpticalFlowFarneback( mPrvCvData, mCurCvData, mFlow, 0.5, pyrLvels, 2*winSize + 1, iters, poly_n, poly_sigma, cv::OPTFLOW_FARNEBACK_GAUSSIAN );

				if( mFlow.data ) {
					if( mFlowVectors.empty() ) {
						mFlowVectors.resize( mCurScaled.getWidth()*mCurScaled.getHeight() );
					}
					
					//memset( &mFlowVectors[0], 0, mCurScaled.getWidth()*mCurScaled.getHeight()*sizeof( Vec2f ) );
					mNumActiveFlowVectors = 0;
					for( int j = 0; j < mCurScaled.getHeight(); ++j ) {
						for( int i = 0; i < mCurScaled.getWidth(); ++i ) {
							const float* fptr = reinterpret_cast<float*>(mFlow.data + j*mFlow.step + i*sizeof(float)*2);
							//
							Vec2f v = Vec2f( fptr[0], fptr[1] ); 
							if( v.lengthSquared() >= mVelThreshold ) {
								if( mNumActiveFlowVectors >= (int)mFlowVectors.size() ) {
									mFlowVectors.push_back( std::make_pair( Vec2i( i, j ), v ) );
								}
								else {
									mFlowVectors[mNumActiveFlowVectors] = std::make_pair( Vec2i( i, j ), v );
								}
								++mNumActiveFlowVectors;
							}
						}
					}
				}
			}
		}

		// Update texture
		mTexCam.update( mFlipped );

		// Save previous frame
		if( ! mPrvScaled ) {
			mPrvScaled = Surface8u( mCurScaled.getWidth(), mCurScaled.getHeight(), mCurScaled.hasAlpha(), mCurScaled.getChannelOrder() );
		}
		memcpy( mPrvScaled.getData(), mCurScaled.getData(), mCurScaled.getHeight()*mCurScaled.getRowBytes() );
	}

	// Update fluid
	float dx = (mFluid2DResX - 2)/(float)(640/kFlowScale);
	float dy = (mFluid2DResY - 2)/(float)(480/kFlowScale);
	for( int i = 0; i < mNumActiveFlowVectors; ++i ) {
		Vec2f P = mFlowVectors[i].first;
		const Vec2f& v = mFlowVectors[i].second;
		mFluid2D.splatDensity( P.x*dx + 1, P.y*dy + 1, mDenScale*v.lengthSquared() );
		mFluid2D.splatVelocity( P.x*dx + 1, P.y*dy + 1, v*mVelScale );
	}
	mFluid2D.step();

	// Update velocity
	const Vec2f* srcVel0 = mFluid2D.dbgVel0().data();
	const Vec2f* srcVel1 = mFluid2D.dbgVel1().data();
	Colorf* dstVel0 = (Colorf*)mSurfVel0.getData();
	Colorf* dstVel1 = (Colorf*)mSurfVel1.getData();
	for( int j = 0; j < mFluid2DResY; ++j ) {
		for( int i = 0; i < mFluid2DResX; ++i ) {
			*dstVel0 = Colorf( srcVel0->x, srcVel0->y, 0.0f );
			*dstVel1 = Colorf( srcVel1->x, srcVel1->y, 0.0f );
			++srcVel0;
			++srcVel1;
			++dstVel0;
			++dstVel1;
		}
	}
	
	// Update Density
	mChanDen0 = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgDen0().data() );
	mChanDen1 = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgDen1().data() );
	
	mTexDen0.update( mChanDen0 );
	mTexDen1.update( mChanDen1 );
	
	// Update velocity textures
	mTexVel0.update( mSurfVel0 );
	mTexVel1.update( mSurfVel1 );
	
	// Update Divergence
	mChanDiv = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgDivergence().data() );
	mTexDiv.update( mChanDiv );

	// Update Divergence
	mChanPrs = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgPressure().data() );
	mTexPrs.update( mChanPrs );

	// Update Curl, Curl Length
	mChanCurl = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgCurl().data() );
	mTexCurl.update( mChanCurl );
	mChanCurlLen = Channel32f( mFluid2DResX, mFluid2DResY, mFluid2DResX*sizeof(float), 1, mFluid2D.dbgCurlLength().data() );
	mTexCurlLen.update( mChanCurlLen );
}
// Render
void KinectApp::draw()
{

	// Clear window
	gl::setViewport( getWindowBounds() );
	gl::clear( Colorf( 0.1f, 0.1f, 0.1f ) );

	// We're capturing
	if ( mKinect->isCapturing() ) {

		// Set up camera for 3D
		gl::setMatrices( mCamera );

		// Move skeletons down below the rest of the interface
		gl::pushMatrices();
		gl::translate( 0.0f, -0.62f, 0.0f );

		// Iterate through skeletons
		uint32_t i = 0;
		for ( vector<Skeleton>::const_iterator skeletonIt = mSkeletons.cbegin(); skeletonIt != mSkeletons.cend(); ++skeletonIt, i++ ) {

			// Skeleton is valid when all joints are present
			if ( skeletonIt->size() == JointName::NUI_SKELETON_POSITION_COUNT ) {

				// Set color
				gl::color( mKinect->getUserColor( i ) );

				// Draw joints
				for ( Skeleton::const_iterator jointIt = skeletonIt->cbegin(); jointIt != skeletonIt->cend(); ++jointIt ) {
					gl::drawSphere( jointIt->second * Vec3f( -1.0f, 1.0f, 1.0f ), 0.025f, 16 );
				}

				// Draw body
				for ( vector<vector<JointName> >::const_iterator segmentIt = mSegments.cbegin(); segmentIt != mSegments.cend(); ++segmentIt ) {
					drawSegment( * skeletonIt, * segmentIt );
				}

			}

		}

		// Switch to 2D
		gl::popMatrices();
		gl::setMatricesWindow( getWindowSize(), true );

		// Draw depth and video textures
		gl::color( Colorf::white() );
		if ( mDepthSurface ) {
			Area srcArea( 0, 0, mDepthSurface.getWidth(), mDepthSurface.getHeight() );
			Rectf destRect( 265.0f, 15.0f, 505.0f, 195.0f );
			gl::draw( gl::Texture( mDepthSurface ), srcArea, destRect );
		}
		if ( mVideoSurface ) {
			Area srcArea( 0, 0, mVideoSurface.getWidth(), mVideoSurface.getHeight() );
			Rectf destRect( 508.0f, 15.0f, 748.0f, 195.0f );
			gl::draw( gl::Texture( mVideoSurface ), srcArea, destRect);
		}

	}

	// Check audio data
	if ( mData != 0 ) {

		// Get dimensions
		int32_t dataSize = mInput->getDataSize();
		float scale = 240.0f / (float)dataSize;
		float height = 180.0f;
		Vec2f position( 751.0f, 15.0f );

		// Draw background
		gl::color( ColorAf::black() );
		Rectf background( position.x, position.y, position.x + 240.0f, position.y + 180.0f );
		gl::drawSolidRect( background );

		// Draw audio input
		gl::color( ColorAf::white() );
		PolyLine<Vec2f> mLine;
		for ( int32_t i = 0; i < dataSize; i++ ) {
			mLine.push_back( position + Vec2f( i * scale, math<float>::clamp( mData[ i ], -1.0f, 1.0f ) * height * 0.5f + height * 0.5f ) );
		}
		if ( mLine.size() > 0 ) {
			gl::draw( mLine );
		}

	}

	// Draw the interface
	params::InterfaceGl::draw();

}
void TextParticlesApp::setupVBO()
{
	if( mString.length() == 0 )
		return;
	
	// amount of pixels in the biffer is dependent on the text area
	int w = mTextSize.x, h = mTextSize.y;
	int totalParticles = w * h;
	mTextParticleCount = totalParticles;
	
	// INITIALIZE the particles
	vector<Particle> particles;
	particles.assign( totalParticles, Particle() );
	
	// CENTER of the text texture
	vec3 center = vec3(mTextSize.x/2.0f, mTextSize.y/2.0f, 0 ) + mCenter;
	for( int i = 0; i < particles.size(); ++i )
	{	// assign starting values to particles.
		int x = i % w;
		int y = floor( float(i) / float(w) );
		int z = 1.0;
		
		vec3 pos	   = vec3( x, y, z );
		vec3 dir	   = normalize( pos - center );
		vec3 offsetVel = ( dir * vec3( mStartVelocity ) );
		ColorA color   = mTextSurf.getPixel( ivec2( x, y ) );
		
		auto &p = particles.at( i );
		p.pos      = pos;
		p.texcoord = vec2( float(x) / float(w), float(y) / float(h) );
		p.ppos	   = p.pos - offsetVel;
		p.damping  = Rand::randFloat( mDampingBase, mDampingBase + 0.2f );
		p.color    = ColorA( Color(color), color.a );
		p.invmass  = Rand::randFloat( 0.1f, 1.0f );
	}
	
	// Create particle buffers on GPU and copy data into the first buffer.
	// Mark as static since we only write from the CPU once.
	mParticleBuffer[mSourceIndex] = gl::Vbo::create( GL_ARRAY_BUFFER, particles.size() * sizeof(Particle), particles.data(), GL_STATIC_DRAW );
	mParticleBuffer[mDestinationIndex] = gl::Vbo::create( GL_ARRAY_BUFFER, particles.size() * sizeof(Particle), nullptr, GL_STATIC_DRAW );
	
	for( int i = 0; i < 2; ++i )
	{	// Describe the particle layout for OpenGL.
		mAttributes[i] = gl::Vao::create();
		gl::ScopedVao vao( mAttributes[i] );
		
		// Define attributes as offsets into the bound particle buffer
		gl::ScopedBuffer buffer( mParticleBuffer[i] );
		gl::enableVertexAttribArray( 0 );
		gl::enableVertexAttribArray( 1 );
		gl::enableVertexAttribArray( 2 );
		gl::enableVertexAttribArray( 3 );
		gl::enableVertexAttribArray( 4 );
		gl::enableVertexAttribArray( 5 );
		gl::vertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)offsetof(Particle, pos ) );
		gl::vertexAttribPointer( 1, 4, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)offsetof(Particle, color ) );
		gl::vertexAttribPointer( 2, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)offsetof(Particle, ppos ) );
		gl::vertexAttribPointer( 3, 1, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)offsetof(Particle, damping ) );
		gl::vertexAttribPointer( 4, 2, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)offsetof(Particle, texcoord ) );
		gl::vertexAttribPointer( 5, 1, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)offsetof(Particle, invmass ) );
	}
	
	// ANIMATE mStep
	timeline().apply( &mStep, 1.0f, mStepMax, 1.0f );
	
	// As long as we're active, we'll draw the particles
	mActive = true;
}