void ScheinrieseApp::handleKinect() { if (!hasKinect) { return; } if( mKinectTilt != mKinect.getTilt() ) { mKinect.setTilt( mKinectTilt ); } if( mKinect.checkNewDepthFrame() ) { mDepthTexture = mKinect.getDepthImage(); } if( mKinect.checkNewVideoFrame() ) { mColorTexture = mKinect.getVideoImage(); } /* debug view */ if (mColorTexture && !mDebugViewColor) { mGui->addLabel("COLOR"); mDebugViewColor = mGui->addParam("COLOR", &mColorTexture); mDebugViewColor->var = &mColorTexture; console() << "color" << endl; } if (mDepthTexture && !mDebugViewDepth) { mGui->addLabel("DEPTH"); mDebugViewDepth = mGui->addParam("DEPTH", &mDepthTexture); mDebugViewDepth->var = &mDepthTexture; console() << "depth" << endl; } }
void RogalarmApp::lookingForUser() { if( mKinect.checkNewDepthFrame() ){ ImageSourceRef depthImage = mKinect.getDepthImage(); // make a texture to display mDepthTexture = depthImage; // make a surface for opencv mDepthSurface = depthImage; if(mDepthSurface){ // once the surface is avalable pass it to opencv // had trouble here with bit depth. surface comes in full color, needed to crush it down cv::Mat tmp( toOcv( Channel8u( mDepthSurface ) ) ), input, blurred, thresholded, thresholded2, output; if (mReflectionBottom > 0) { cv::Scalar black( 0, 0, 0 ); cv::Point p1 = cv::Point(0,480 - mReflectionBottom-1); cv::Point p2 = cv::Point(640, 480); cv::rectangle(tmp, p1, p2, black, -1, 8, 0); } if (mReflectionTop > 0) { cv::Scalar black( 0, 0, 0 ); cv::Point p1 = cv::Point(0, 0); cv::Point p2 = cv::Point(640, mReflectionTop); cv::rectangle(tmp, p1, p2, black, -1, 8, 0); } //tmp.copyTo(input, mBackground); cv::blur(tmp, blurred, cv::Size(10,10)); // make two thresholded images one to display and one // to pass to find contours since its process alters the image cv::threshold( blurred, thresholded, mThreshold, 255, CV_THRESH_BINARY); cv::threshold( blurred, thresholded2, mThreshold, 255, CV_THRESH_BINARY); // 2d vector to store the found contours vector<vector<cv::Point> > contours; // find em cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // convert theshold image to color for output // so we can draw blobs on it cv::cvtColor( thresholded2, output, CV_GRAY2RGB ); cv::Scalar color( 0, 255, 255 ); mUser = false; // loop the stored contours for (vector<vector<cv::Point> >::iterator it=contours.begin() ; it < contours.end(); it++ ){ // center abd radius for current blob cv::Point2f center; float radius; // convert the cuntour point to a matrix vector<cv::Point> pts = *it; cv::Mat pointsMatrix = cv::Mat(pts); // pass to min enclosing circle to make the blob cv::minEnclosingCircle(pointsMatrix, center, radius); if (radius > mBlobMin && radius < mBlobMax) { mUserPos = 640 - center.x; mUser = true; if (mNoUserMessage == false) { mNoUserMessage = true; } cv::circle(output, center, radius, color, 3); mStopedTime = getElapsedSeconds(); osc::Message message; message.addFloatArg(mUserPos); message.setAddress("/user/1"); message.setRemoteEndpoint(mHost, mPort); mSender.sendMessage(message); } else if (mNoUserMessage) { osc::Message message; message.addFloatArg(mUserPos); message.setAddress("/nouser"); message.setRemoteEndpoint(mHost, mPort); mSender.sendMessage(message); mNoUserMessage = false; } } cv::Scalar yellow( 0, 255, 255 ); if (mReflectionBottom > 0) { cv::Point p1 = cv::Point(0, 480 - mReflectionBottom-1); cv::Point p2 = cv::Point(640, 480); cv::rectangle(output, p1, p2, yellow, -1, 8, 0); } if (mReflectionTop > 0) { cv::Scalar black( 0, 0, 0 ); cv::Point p1 = cv::Point(0, 0); cv::Point p2 = cv::Point(640, mReflectionTop); cv::rectangle(output, p1, p2, yellow, -1, 8, 0); } mContourTexture = gl::Texture( fromOcv( output ) ); } } if( mKinect.checkNewVideoFrame() ) mColorTexture = mKinect.getVideoImage(); }