//-----------------------------------------------------------------------------
bool HaarHandDetector::doUpdateDetection(ci::Surface pSurface)
{
	//Set a Scale for the image to increase the processing time
	int lScale = 2;

	//Create a GrayScale copy of the input image
	cv::Mat	lGrayScaleImage(toOcv(pSurface, CV_8UC1));

	//Scale the GrayScaleImage
	cv::Mat lSmallGrayScaleImage(pSurface.getHeight() / lScale, pSurface.getWidth() / lScale, CV_8UC1);
	cv::resize(lGrayScaleImage, lSmallGrayScaleImage, lSmallGrayScaleImage.size(), 0,0,cv::INTER_LINEAR);

	//Equalize the Histogram to improve Edges Detection
	cv::equalizeHist(lSmallGrayScaleImage, lSmallGrayScaleImage);

	//Clear out the previous Detected Closed Hands
	mClosedHands.clear();

	//Start the Detection 
	std::vector<cv::Rect> lCVRectangles;
	int lCount = 0;
	mClosedHandCascade.detectMultiScale(lSmallGrayScaleImage, lCVRectangles, 1.2f, 2, CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(24,24));

	for (std::vector<cv::Rect>::iterator lIter = lCVRectangles.begin(); lIter != lCVRectangles.end(); lIter++)
	{
		ci::Rectf lClosedHand(ci::fromOcv(*lIter));
		lClosedHand *= lScale;
		mClosedHands.push_back(lClosedHand);
		lCount++;
	}

	//Return detection success flag
	if (lCount > 0)
	{
		return true;
	}

	else
	{
		return false;
	}
	
}
Esempio n. 2
0
void KinectUser::update()
{
	mNIUserTracker.setSmoothing( mHandSmoothing );
	if ( mNI.checkNewVideoFrame() && mOutlineEnable )
	{
		// generate user outline shapes
		Surface8u maskSurface = mNIUserTracker.getUserMask();

		cv::Mat cvMask, cvMaskFiltered;
		cvMask = toOcv( Channel8u( maskSurface ) );
		cv::blur( cvMask, cvMaskFiltered, cv::Size( mOutlineBlurAmt, mOutlineBlurAmt ) );

		cv::Mat dilateElm = cv::getStructuringElement( cv::MORPH_RECT,
				cv::Size( mOutlineDilateAmt, mOutlineDilateAmt ) );
		cv::Mat erodeElm = cv::getStructuringElement( cv::MORPH_RECT,
				cv::Size( mOutlineErodeAmt, mOutlineErodeAmt ) );
		cv::erode( cvMaskFiltered, cvMaskFiltered, erodeElm, cv::Point( -1, -1 ), 1 );
		cv::dilate( cvMaskFiltered, cvMaskFiltered, dilateElm, cv::Point( -1, -1 ), 3 );
		cv::erode( cvMaskFiltered, cvMaskFiltered, erodeElm, cv::Point( -1, -1 ), 1 );
		cv::blur( cvMaskFiltered, cvMaskFiltered, cv::Size( mOutlineBlurAmt, mOutlineBlurAmt ) );

		cv::threshold( cvMaskFiltered, cvMaskFiltered, mOutlineThres, 255, CV_THRESH_BINARY);

		vector< vector< cv::Point > > contours;
		cv::findContours( cvMaskFiltered, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE );

		mShape.clear();
		for ( vector< vector< cv::Point > >::const_iterator it = contours.begin();
				it != contours.end(); ++it )
		{
			vector< cv::Point >::const_iterator pit = it->begin();

			if ( it->empty() )
				continue;

			mShape.moveTo( mOutputMapping.map( fromOcv( *pit ) ) );

			++pit;
			for ( ; pit != it->end(); ++pit )
			{
				mShape.lineTo( mOutputMapping.map( fromOcv( *pit ) ) );
			}
			mShape.close();
		}
	}

	// retrieve hand positions
	mHandPositions.clear();

	const XnSkeletonJoint jointIds[] = { XN_SKEL_LEFT_HAND, XN_SKEL_RIGHT_HAND };
	vector< unsigned > users = mNIUserTracker.getUsers();
	for ( vector< unsigned >::const_iterator it = users.begin(); it < users.end(); ++it )
	{
		unsigned userId = *it;
		for ( int i = 0; i < sizeof( jointIds ) / sizeof( jointIds[0] ); ++i )
		{
			Vec2f jointPos = mNIUserTracker.getJoint2d( userId, jointIds[i] );
			float conf = mNIUserTracker.getJointConfidance( userId, jointIds[i] );

			if ( conf > .9 )
			{
				mHandPositions.push_back( mOutputMapping.map( jointPos ) );
			}
		}
	}

#ifdef OUTLINE_SHADER
	// update vbo from shape points
	// based on the work of Paul Houx
	// https://forum.libcinder.org/topic/smooth-thick-lines-using-geometry-shader#23286000001297067
	if ( mOutlineEnable )
	{
		mVboMeshes.clear();

		for ( size_t i = 0; i < mShape.getNumContours(); ++i )
		{
			const Path2d &path = mShape.getContour( i );
			const vector< Vec2f > &points = path.getPoints();

			if ( points.size() > 1 )
			{
				// create a new vector that can contain 3D vertices
				vector< Vec3f > vertices;

				vertices.reserve( points.size() );

				// add all 2D points as 3D vertices
				vector< Vec2f >::const_iterator it;
				for ( it = points.begin() ; it != points.end(); ++it )
					vertices.push_back( Vec3f( *it ) );

				// now that we have a list of vertices, create the index buffer
				size_t n = vertices.size();

				vector< uint32_t > indices;
				indices.reserve( n * 4 );


				indices.push_back( n - 1 );
				indices.push_back( 0 );
				indices.push_back( 1 );
				indices.push_back( 2 );
				for ( size_t i = 1; i < vertices.size() - 2; ++i )
				{
					indices.push_back( i - 1 );
					indices.push_back( i );
					indices.push_back( i + 1 );
					indices.push_back( i + 2 );
				}
				indices.push_back( n - 3 );
				indices.push_back( n - 2 );
				indices.push_back( n - 1 );
				indices.push_back( 0 );

				indices.push_back( n - 2 );
				indices.push_back( n - 1 );
				indices.push_back( 0 );
				indices.push_back( 1 );


				// finally, create the mesh
				gl::VboMesh::Layout layout;
				layout.setStaticPositions();
				layout.setStaticIndices();
				gl::VboMesh vboMesh = gl::VboMesh( vertices.size(), indices.size(), layout, GL_LINES_ADJACENCY_EXT );
				vboMesh.bufferPositions( &(vertices.front()), vertices.size() );
				vboMesh.bufferIndices( indices );
				vboMesh.unbindBuffers();

				mVboMeshes.push_back( vboMesh );
			}
		}
	}
#endif
}
Esempio n. 3
0
void ShapeDetection::onDepth( openni::VideoFrameRef frame, const OpenNI::DeviceOptions& deviceOptions )
{
    // convert frame from the camera to an OpenCV matrix
    mInput = toOcv( OpenNI::toChannel16u(frame) );
    
    cv::Mat thresh;
    cv::Mat eightBit;
    cv::Mat withoutBlack;
    
    // remove black pixels from frame which get detected as noise
    withoutBlack = removeBlack( mInput, mNearLimit, mFarLimit );
    
    // convert matrix from 16 bit to 8 bit with some color compensation
    withoutBlack.convertTo( eightBit, CV_8UC3, 0.1/1.0 );
    
    // invert the image
    cv::bitwise_not( eightBit, eightBit );
    
    mContours.clear();
    mApproxContours.clear();
    
    // using a threshold to reduce noise
    cv::threshold( eightBit, thresh, mThresh, mMaxVal, CV_8U );
    
    // draw lines around shapes
    cv::findContours( thresh, mContours, mHierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE );
    
    vector<cv::Point> approx;
    // approx number of points per contour
    for ( int i = 0; i < mContours.size(); i++ ) {
        cv::approxPolyDP(mContours[i], approx, 1, true );
        mApproxContours.push_back( approx );
    }
    
    mShapes.clear();
    // get data that we can later compare
    mShapes = getEvaluationSet( mApproxContours, 75, 100000 );
    
    // find the nearest match for each shape
    for ( int i = 0; i < mTrackedShapes.size(); i++ ) {
        Shape* nearestShape = findNearestMatch( mTrackedShapes[i], mShapes, 5000 );
        
        // a tracked shape was found, update that tracked shape with the new shape
        if ( nearestShape != NULL ) {
            nearestShape->matchFound = true;
            mTrackedShapes[i].centroid = nearestShape->centroid;
            // get depth value from center point
            float centerDepth = (float)mInput.at<short>( mTrackedShapes[i].centroid.y, mTrackedShapes[i].centroid.x );
            // map 10 10000 to 0 1
            mTrackedShapes[i].depth = lmap( centerDepth, (float)mNearLimit, (float)mFarLimit, 0.0f, 1.0f );
            mTrackedShapes[i].lastFrameSeen = ci::app::getElapsedFrames();
            mTrackedShapes[i].hull.clear();
            mTrackedShapes[i].hull = nearestShape->hull;
            mTrackedShapes[i].motion = nearestShape->motion;
            Vec3f centerVec = Vec3f( mTrackedShapes[i].centroid.x, mTrackedShapes[i].centroid.y, 0.0f );
            mTrackedShapes[i].mTrailPoint.arrive(centerVec);
            mTrackedShapes[i].mTrailPoint.updateTrail();
        }
    }
    
    // if shape->matchFound is false, add it as a new shape
    for ( int i = 0; i < mShapes.size(); i++ ) {
        if( mShapes[i].matchFound == false ){
            // assign an unique ID
            mShapes[i].ID = shapeUID;
            mShapes[i].lastFrameSeen = ci::app::getElapsedFrames();
            // starting point of the trail
            mShapes[i].mTrailPoint.mLocation = Vec3f( mShapes[i].centroid.x, mShapes[i].centroid.y, 0.0f );
            // add this new shape to tracked shapes
            mTrackedShapes.push_back( mShapes[i] );
            shapeUID++;
        }
    }
    
    // if we didn't find a match for x frames, delete the tracked shape
    for ( vector<Shape>::iterator it = mTrackedShapes.begin(); it != mTrackedShapes.end(); ) {
        if ( ci::app::getElapsedFrames() - it->lastFrameSeen > 20 ) {
            // remove the tracked shape
            it = mTrackedShapes.erase(it);
        } else {
            ++it;
        }
    }
    mSurfaceDepth = Surface8u( fromOcv( mInput  ) );
    mSurfaceSubtract = Surface8u( fromOcv(eightBit) );
}