void ShapeDetection::draw( bool useBalance, bool showNegativeSpace ) { gl::setMatricesWindow( Vec2i(getWindowWidth(), getWindowHeight()) ); // draw points for( int i=0; i<mTrackedShapes.size(); i++){ if( mTrackedShapes[i].mOffBalance && useBalance ){ glBegin( GL_POLYGON ); } else if (showNegativeSpace) { glLineWidth(2.0f); glBegin( GL_LINE_LOOP ); } else{ glLineWidth(1.0f); glBegin(GL_LINE_LOOP); } glLineWidth(1.0f); glBegin(GL_LINE_LOOP); for( int j=0; j<mTrackedShapes[i].hull.size(); j++ ){ if (showNegativeSpace) { gl::color( Color( 0.0f, 0.0f, 0.0f ) ); } else { gl::color(Color( 0.5f, 0.5f, 0.5f) ); } Vec2f v = fromOcv( mTrackedShapes[i].hull[j] ); // offset the points to align with the camera used for the mesh float newX = lmap(v.x, 0.0f, 320.0f, 0.0f, float(getWindowWidth())); float newY = lmap(v.y, 0.0f, 240.0f, 0.0f, float(getWindowHeight())); Vec2f pos = Vec2f( newX, newY); gl::vertex( pos ); } glEnd(); // glLineWidth(10.0f); // gl::enableAlphaBlending(); //// glEnable(GL_BLEND); // gl::color(0.0f, 0.75f, 1.0f); // glBegin( GL_LINE_STRIP ); // int counter = 50; // for( Vec3f v: mTrackedShapes[i].mTrailPoint.mTrail ) { //// gl::color( ColorA( 0.0f, 0.75f, 1.0f, (1.0 - counter*2/100)) ); //// glColor4f(0.0f, 0.75f, 1.0f, (1.0 - counter*2/100)); // float newX = lmap(v.x, 0.0f, 320.0f, 0.0f, float(getWindowWidth())); // float newY = lmap(v.y, 0.0f, 240.0f, 0.0f, float(getWindowHeight())); // gl::vertex( newX, newY ); // counter--; // } // glEnd(); //// glDisable(GL_BLEND); // gl::disableAlphaBlending(); } }
void KinectUser::update() { mNIUserTracker.setSmoothing( mHandSmoothing ); if ( mNI.checkNewVideoFrame() && mOutlineEnable ) { // generate user outline shapes Surface8u maskSurface = mNIUserTracker.getUserMask(); cv::Mat cvMask, cvMaskFiltered; cvMask = toOcv( Channel8u( maskSurface ) ); cv::blur( cvMask, cvMaskFiltered, cv::Size( mOutlineBlurAmt, mOutlineBlurAmt ) ); cv::Mat dilateElm = cv::getStructuringElement( cv::MORPH_RECT, cv::Size( mOutlineDilateAmt, mOutlineDilateAmt ) ); cv::Mat erodeElm = cv::getStructuringElement( cv::MORPH_RECT, cv::Size( mOutlineErodeAmt, mOutlineErodeAmt ) ); cv::erode( cvMaskFiltered, cvMaskFiltered, erodeElm, cv::Point( -1, -1 ), 1 ); cv::dilate( cvMaskFiltered, cvMaskFiltered, dilateElm, cv::Point( -1, -1 ), 3 ); cv::erode( cvMaskFiltered, cvMaskFiltered, erodeElm, cv::Point( -1, -1 ), 1 ); cv::blur( cvMaskFiltered, cvMaskFiltered, cv::Size( mOutlineBlurAmt, mOutlineBlurAmt ) ); cv::threshold( cvMaskFiltered, cvMaskFiltered, mOutlineThres, 255, CV_THRESH_BINARY); vector< vector< cv::Point > > contours; cv::findContours( cvMaskFiltered, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE ); mShape.clear(); for ( vector< vector< cv::Point > >::const_iterator it = contours.begin(); it != contours.end(); ++it ) { vector< cv::Point >::const_iterator pit = it->begin(); if ( it->empty() ) continue; mShape.moveTo( mOutputMapping.map( fromOcv( *pit ) ) ); ++pit; for ( ; pit != it->end(); ++pit ) { mShape.lineTo( mOutputMapping.map( fromOcv( *pit ) ) ); } mShape.close(); } } // retrieve hand positions mHandPositions.clear(); const XnSkeletonJoint jointIds[] = { XN_SKEL_LEFT_HAND, XN_SKEL_RIGHT_HAND }; vector< unsigned > users = mNIUserTracker.getUsers(); for ( vector< unsigned >::const_iterator it = users.begin(); it < users.end(); ++it ) { unsigned userId = *it; for ( int i = 0; i < sizeof( jointIds ) / sizeof( jointIds[0] ); ++i ) { Vec2f jointPos = mNIUserTracker.getJoint2d( userId, jointIds[i] ); float conf = mNIUserTracker.getJointConfidance( userId, jointIds[i] ); if ( conf > .9 ) { mHandPositions.push_back( mOutputMapping.map( jointPos ) ); } } } #ifdef OUTLINE_SHADER // update vbo from shape points // based on the work of Paul Houx // https://forum.libcinder.org/topic/smooth-thick-lines-using-geometry-shader#23286000001297067 if ( mOutlineEnable ) { mVboMeshes.clear(); for ( size_t i = 0; i < mShape.getNumContours(); ++i ) { const Path2d &path = mShape.getContour( i ); const vector< Vec2f > &points = path.getPoints(); if ( points.size() > 1 ) { // create a new vector that can contain 3D vertices vector< Vec3f > vertices; vertices.reserve( points.size() ); // add all 2D points as 3D vertices vector< Vec2f >::const_iterator it; for ( it = points.begin() ; it != points.end(); ++it ) vertices.push_back( Vec3f( *it ) ); // now that we have a list of vertices, create the index buffer size_t n = vertices.size(); vector< uint32_t > indices; indices.reserve( n * 4 ); indices.push_back( n - 1 ); indices.push_back( 0 ); indices.push_back( 1 ); indices.push_back( 2 ); for ( size_t i = 1; i < vertices.size() - 2; ++i ) { indices.push_back( i - 1 ); indices.push_back( i ); indices.push_back( i + 1 ); indices.push_back( i + 2 ); } indices.push_back( n - 3 ); indices.push_back( n - 2 ); indices.push_back( n - 1 ); indices.push_back( 0 ); indices.push_back( n - 2 ); indices.push_back( n - 1 ); indices.push_back( 0 ); indices.push_back( 1 ); // finally, create the mesh gl::VboMesh::Layout layout; layout.setStaticPositions(); layout.setStaticIndices(); gl::VboMesh vboMesh = gl::VboMesh( vertices.size(), indices.size(), layout, GL_LINES_ADJACENCY_EXT ); vboMesh.bufferPositions( &(vertices.front()), vertices.size() ); vboMesh.bufferIndices( indices ); vboMesh.unbindBuffers(); mVboMeshes.push_back( vboMesh ); } } } #endif }
void DebugDrawer::draw( const BlobTrackerRef &blobTracker, const Area &bounds, const Options &options ) { if ( options.mDebugMode == Options::DebugMode::NONE ) { return; } gl::TextureRef tex; switch ( options.mDrawMode ) { case Options::DrawMode::ORIGINAL: { cv::Mat img = blobTracker->getImageInput(); if ( img.data ) { tex = gl::Texture::create( fromOcv( img ) ); } break; } case Options::DrawMode::BLURRED: { cv::Mat img = blobTracker->getImageBlurred(); if ( img.data ) { tex = gl::Texture::create( fromOcv( img ) ); } break; } case Options::DrawMode::THRESHOLDED: { cv::Mat img = blobTracker->getImageThresholded(); if ( img.data ) { tex = gl::Texture::create( fromOcv( img ) ); } break; } default: break; } if ( ! tex ) { return; } ci::gl::disableDepthRead(); ci::gl::disableDepthWrite(); bool blendingEnabled = options.mDebugMode == Options::DebugMode::BLENDED; auto ctx = gl::context(); ctx->pushBoolState( GL_BLEND, blendingEnabled ); ctx->pushBlendFuncSeparate( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA ); gl::ScopedColor color( ColorA::gray( 1.0f, 0.5f ) ); Area outputArea = bounds; if ( options.mDrawProportionalFit ) { outputArea = Area::proportionalFit( tex->getBounds(), bounds, true, true ); } gl::draw( tex, outputArea ); { const auto &trackerOptions = blobTracker->getOptions(); float s = trackerOptions.mNormalizationScale; RectMapping blobMapping( Rectf( 0.0f, 0.0f, s, s ), Rectf( outputArea ) ); Rectf roi = trackerOptions.mNormalizedRegionOfInterest * s; gl::color( ColorA( 0.0f, 1.0f, 0.0f, 0.8f ) ); gl::drawStrokedRect( blobMapping.map( roi ) ); vec2 offset = outputArea.getUL(); vec2 scale = vec2( outputArea.getSize() ) / vec2( s, s ); for ( const auto &blob : blobTracker->getBlobs() ) { gl::pushModelView(); gl::translate( offset ); gl::scale( scale ); if ( trackerOptions.mBoundsEnabled ) { gl::color( ColorA( 1.0f, 1.0f, 0.0f, 0.5f ) ); gl::drawStrokedRect( blob->mBounds ); } if ( trackerOptions.mConvexHullEnabled && blob->mConvexHull ) { gl::color( ColorA( 1.0f, 0.0f, 1.0f, 0.5f ) ); gl::draw( *blob->mConvexHull.get() ); } gl::popModelView(); vec2 pos = blobMapping.map( blob->mPos ); gl::drawSolidCircle( pos, 2.0f ); gl::drawString( toString< int32_t >( blob->mId ), pos + vec2( 3.0f, -3.0f ), ColorA( 1.0f, 0.0f, 0.0f, 0.9f ) ); } } ctx->popBoolState( GL_BLEND ); ctx->popBlendFuncSeparate(); }
void ShapeDetection::onDepth( openni::VideoFrameRef frame, const OpenNI::DeviceOptions& deviceOptions ) { // convert frame from the camera to an OpenCV matrix mInput = toOcv( OpenNI::toChannel16u(frame) ); cv::Mat thresh; cv::Mat eightBit; cv::Mat withoutBlack; // remove black pixels from frame which get detected as noise withoutBlack = removeBlack( mInput, mNearLimit, mFarLimit ); // convert matrix from 16 bit to 8 bit with some color compensation withoutBlack.convertTo( eightBit, CV_8UC3, 0.1/1.0 ); // invert the image cv::bitwise_not( eightBit, eightBit ); mContours.clear(); mApproxContours.clear(); // using a threshold to reduce noise cv::threshold( eightBit, thresh, mThresh, mMaxVal, CV_8U ); // draw lines around shapes cv::findContours( thresh, mContours, mHierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE ); vector<cv::Point> approx; // approx number of points per contour for ( int i = 0; i < mContours.size(); i++ ) { cv::approxPolyDP(mContours[i], approx, 1, true ); mApproxContours.push_back( approx ); } mShapes.clear(); // get data that we can later compare mShapes = getEvaluationSet( mApproxContours, 75, 100000 ); // find the nearest match for each shape for ( int i = 0; i < mTrackedShapes.size(); i++ ) { Shape* nearestShape = findNearestMatch( mTrackedShapes[i], mShapes, 5000 ); // a tracked shape was found, update that tracked shape with the new shape if ( nearestShape != NULL ) { nearestShape->matchFound = true; mTrackedShapes[i].centroid = nearestShape->centroid; // get depth value from center point float centerDepth = (float)mInput.at<short>( mTrackedShapes[i].centroid.y, mTrackedShapes[i].centroid.x ); // map 10 10000 to 0 1 mTrackedShapes[i].depth = lmap( centerDepth, (float)mNearLimit, (float)mFarLimit, 0.0f, 1.0f ); mTrackedShapes[i].lastFrameSeen = ci::app::getElapsedFrames(); mTrackedShapes[i].hull.clear(); mTrackedShapes[i].hull = nearestShape->hull; mTrackedShapes[i].motion = nearestShape->motion; Vec3f centerVec = Vec3f( mTrackedShapes[i].centroid.x, mTrackedShapes[i].centroid.y, 0.0f ); mTrackedShapes[i].mTrailPoint.arrive(centerVec); mTrackedShapes[i].mTrailPoint.updateTrail(); } } // if shape->matchFound is false, add it as a new shape for ( int i = 0; i < mShapes.size(); i++ ) { if( mShapes[i].matchFound == false ){ // assign an unique ID mShapes[i].ID = shapeUID; mShapes[i].lastFrameSeen = ci::app::getElapsedFrames(); // starting point of the trail mShapes[i].mTrailPoint.mLocation = Vec3f( mShapes[i].centroid.x, mShapes[i].centroid.y, 0.0f ); // add this new shape to tracked shapes mTrackedShapes.push_back( mShapes[i] ); shapeUID++; } } // if we didn't find a match for x frames, delete the tracked shape for ( vector<Shape>::iterator it = mTrackedShapes.begin(); it != mTrackedShapes.end(); ) { if ( ci::app::getElapsedFrames() - it->lastFrameSeen > 20 ) { // remove the tracked shape it = mTrackedShapes.erase(it); } else { ++it; } } mSurfaceDepth = Surface8u( fromOcv( mInput ) ); mSurfaceSubtract = Surface8u( fromOcv(eightBit) ); }