void ScheinrieseApp::handleKinect() { if (!hasKinect) { return; } if( mKinectTilt != mKinect.getTilt() ) { mKinect.setTilt( mKinectTilt ); } if( mKinect.checkNewDepthFrame() ) { mDepthTexture = mKinect.getDepthImage(); } if( mKinect.checkNewVideoFrame() ) { mColorTexture = mKinect.getVideoImage(); } /* debug view */ if (mColorTexture && !mDebugViewColor) { mGui->addLabel("COLOR"); mDebugViewColor = mGui->addParam("COLOR", &mColorTexture); mDebugViewColor->var = &mColorTexture; console() << "color" << endl; } if (mDepthTexture && !mDebugViewDepth) { mGui->addLabel("DEPTH"); mDebugViewDepth = mGui->addParam("DEPTH", &mDepthTexture); mDebugViewDepth->var = &mDepthTexture; console() << "depth" << endl; } }
void HiKinectApp::update() { if( mKinect.checkNewDepthFrame() ) { mDepthTexture = mKinect.getDepthImage(); mDepthSurface = Surface32f( mKinect.getDepthImage() ); mKinectReady = true; if ( !mKinectIR ) { mKinectIR = true; mKinect.setVideoInfrared( true ); } ci::Surface captureSurface = Surface8u( mKinect.getDepthImage() ); ci::Surface outputSurface = captureSurface; mContours->clear(); mSilhouetteDetector->processSurface(&captureSurface, mContours, &outputSurface); } if( mKinect.checkNewColorFrame() ) mColorTexture = mKinect.getVideoImage(); if( mIsMouseDown ) // using small number instead of 0.0 because lights go black after a few seconds when going to 0.0f mDirectional -= ( mDirectional - 0.00001f ) * 0.1f; else mDirectional -= ( mDirectional - 1.0f ) * 0.1f; if (mKinectReady) mGridMesh.updateKinect(mKinect); else mGridMesh.update(); }
void ContoursApp::update() { if ( mKinectReady && !mKinectIR ) mKinect.setVideoInfrared( true ); if( mKinect.checkNewDepthFrame() ) { mDepthTexture = mKinect.getDepthImage(); mDepthSurface = Surface8u( mKinect.getDepthImage() ); mKinectReady = true; ci::Surface captureSurface = Surface8u( mKinect.getDepthImage() ); ci::Surface outputSurface = captureSurface; contours->clear(); silhouetteDetector->processSurface(&captureSurface, contours, &outputSurface); console() << contours->size() << " is the size " << endl; mTexture1 = outputSurface; } if( mKinect.checkNewColorFrame() ) { mTexture2 = gl::Texture( mKinect.getVideoImage() ); } }
void kinectPointCloudApp::update() { if( mKinect.checkNewDepthFrame() ) mDepthTexture = mKinect.getDepthImage(); // This sample does not use the color data //if( mKinect.checkNewVideoFrame() ) // mColorTexture = mKinect.getVideoImage(); if( mKinectTilt != mKinect.getTilt() ) mKinect.setTilt( mKinectTilt ); mEye = Vec3f( 0.0f, 0.0f, mCameraDistance ); mCam.lookAt( mEye, mCenter, mUp ); gl::setMatrices( mCam ); }
void kinectSkelApp::update() { if( mKinect.checkNewDepthFrame() ) { //mDepthTexture = mKinect.getDepthImage(); depthSurf = Surface8u(mKinect.getDepthImage()); cv::Mat input( toOcv( depthSurf ) ); cv::Mat thr1, thr2, dilated, thr3(640, 480, CV_8UC1), dist(640, 480, CV_32FC1), distC, distLapl(640, 480, CV_8UC1), distLapl00(640, 480, CV_8UC1), distLapl45(640, 480, CV_8UC1), distLapl90(640, 480, CV_8UC1), distLapl135(640, 480, CV_8UC1), distLaplC, distThresh, distThreshC, distBlur, distFinal, distFinalC; //cv::medianBlur( input, output, 100 ); // cv::Sobel( input, output, CV_8U, 0, 1 ); cv::threshold( input, thr1, mLo * 255.0f, 255, CV_THRESH_TOZERO ); cv::threshold( thr1, thr2, mHi * 255.0f, 255, CV_THRESH_TOZERO_INV ); //cv::dilate( thr2, dilated, cv::Mat(mDilateSize, mDilateSize, CV_8UC1), cv::Point(-1,-1), mDilateIter ); cv::morphologyEx(thr2, dilated, cv::MORPH_CLOSE, cv::Mat(mDilateSize, mDilateSize, CV_8UC1), cv::Point(-1,-1), mDilateIter); cv::threshold( dilated, thr3, mLo * 255.0f, 255, CV_THRESH_BINARY ); cv::Mat thr3bin( toOcv( Channel8u( fromOcv( thr3 ) ) ) ); cv::distanceTransform(thr3bin, dist, CV_DIST_L2, CV_DIST_MASK_5); //cv::cvtColor(dist, distC, CV_GRAY2RGB); dist.convertTo(distC, CV_8UC1, 1.0f, .0f); //cv::Laplacian(distC, distLapl, 8, 3); // cv::Sobel(distC, distLapl, 8, 1, 1); cv::filter2D(distC, distLapl00, 8, kern00, cv::Point(-1, -1), 0); cv::filter2D(distC, distLapl45, 8, kern45, cv::Point(-1, -1), 0); cv::filter2D(distC, distLapl90, 8, kern90, cv::Point(-1, -1), 0); cv::filter2D(distC, distLapl135, 8, kern135, cv::Point(-1, -1), 0); distLapl = distLapl.t(); int index; for(int x = 0; x < 640; x++) { for(int y = 0; y < 480; y++) { index = y * 640 + x; float smax = getMax(distLapl00.data[index], distLapl45.data[index], distLapl90.data[index], distLapl135.data[index]); distLapl.data[index] = smax > 0 ? smax : 0; } } //distLapl = distLapl.t(); distLapl.convertTo(distLaplC, CV_8UC1, 1.0f, .0f); cv::threshold(distLaplC, distThresh, mSkelThresh, 255, CV_THRESH_BINARY); // cv::adaptiveThreshold(distLaplC, distThresh, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 0); distThresh.convertTo(distThreshC, CV_8UC1, 1.0f, .0f); cv::GaussianBlur(distThreshC, distBlur, cv::Size(5, 5), 3, 0); cv::adaptiveThreshold(distBlur, distFinal, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 0); //cv::threshold(distBlur, distFinal, mSkelThresh, 255, CV_THRESH_TOZERO); //cv::medianBlur( distThreshC, distFinal, 3 ); distFinal.convertTo(distFinalC, CV_8UC1, 1.0f, .0f); Surface8u surfDist(fromOcv(distThreshC)); Surface8u surfDistRaw(fromOcv(distC)); mDepthTexture = gl::Texture( surfDist ); mColorTexture = gl::Texture( surfDistRaw ); } // if( mKinect.checkNewColorFrame() ) // mColorTexture = mKinect.getColorImage(); if( mKinectTilt != mKinect.getTilt() ) mKinect.setTilt( mKinectTilt ); // console() << "Accel: " << kinect.getAccel() << std::endl; }
void HandTrackingApp::update() { if( mKinect.checkNewDepthFrame() ){ ImageSourceRef depthImage = mKinect.getDepthImage(); // make a texture to display mDepthTexture = depthImage; // make a surface for opencv mDepthSurface = depthImage; if(mDepthSurface){ // once the surface is avalable pass it to opencv // had trouble here with bit depth. surface comes in full color, needed to crush it down cv::Mat input( toOcv( Channel8u( mDepthSurface ) ) ), blurred, thresholded, thresholded2, output; cv::blur(input, blurred, cv::Size(10,10)); // make two thresholded images one to display and one // to pass to find contours since its process alters the image cv::threshold( blurred, thresholded, mThreshold, 255, CV_THRESH_BINARY); cv::threshold( blurred, thresholded2, mThreshold, 255, CV_THRESH_BINARY); // 2d vector to store the found contours vector<vector<cv::Point> > contours; // find em cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // convert theshold image to color for output // so we can draw blobs on it cv::cvtColor( thresholded2, output, CV_GRAY2RGB ); // loop the stored contours for (vector<vector<cv::Point> >::iterator it=contours.begin() ; it < contours.end(); it++ ){ // center abd radius for current blob cv::Point2f center; float radius; // convert the cuntour point to a matrix vector<cv::Point> pts = *it; cv::Mat pointsMatrix = cv::Mat(pts); // pass to min enclosing circle to make the blob cv::minEnclosingCircle(pointsMatrix, center, radius); cv::Scalar color( 0, 255, 0 ); if (radius > mBlobMin && radius < mBlobMax) { // draw the blob if it's in range cv::circle(output, center, radius, color); //update the target position mTargetPosition.x = 640 - center.x; mTargetPosition.y = center.y; mTargetPosition.z = 0; } } mCvTexture = gl::Texture( fromOcv( output ) ); } } if( mKinect.checkNewColorFrame() ) mColorTexture = mKinect.getColorImage(); if( mKinectTilt != mKinect.getTilt() ) mKinect.setTilt( mKinectTilt ); }
void RogalarmApp::lookingForUser() { if( mKinect.checkNewDepthFrame() ){ ImageSourceRef depthImage = mKinect.getDepthImage(); // make a texture to display mDepthTexture = depthImage; // make a surface for opencv mDepthSurface = depthImage; if(mDepthSurface){ // once the surface is avalable pass it to opencv // had trouble here with bit depth. surface comes in full color, needed to crush it down cv::Mat tmp( toOcv( Channel8u( mDepthSurface ) ) ), input, blurred, thresholded, thresholded2, output; if (mReflectionBottom > 0) { cv::Scalar black( 0, 0, 0 ); cv::Point p1 = cv::Point(0,480 - mReflectionBottom-1); cv::Point p2 = cv::Point(640, 480); cv::rectangle(tmp, p1, p2, black, -1, 8, 0); } if (mReflectionTop > 0) { cv::Scalar black( 0, 0, 0 ); cv::Point p1 = cv::Point(0, 0); cv::Point p2 = cv::Point(640, mReflectionTop); cv::rectangle(tmp, p1, p2, black, -1, 8, 0); } //tmp.copyTo(input, mBackground); cv::blur(tmp, blurred, cv::Size(10,10)); // make two thresholded images one to display and one // to pass to find contours since its process alters the image cv::threshold( blurred, thresholded, mThreshold, 255, CV_THRESH_BINARY); cv::threshold( blurred, thresholded2, mThreshold, 255, CV_THRESH_BINARY); // 2d vector to store the found contours vector<vector<cv::Point> > contours; // find em cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // convert theshold image to color for output // so we can draw blobs on it cv::cvtColor( thresholded2, output, CV_GRAY2RGB ); cv::Scalar color( 0, 255, 255 ); mUser = false; // loop the stored contours for (vector<vector<cv::Point> >::iterator it=contours.begin() ; it < contours.end(); it++ ){ // center abd radius for current blob cv::Point2f center; float radius; // convert the cuntour point to a matrix vector<cv::Point> pts = *it; cv::Mat pointsMatrix = cv::Mat(pts); // pass to min enclosing circle to make the blob cv::minEnclosingCircle(pointsMatrix, center, radius); if (radius > mBlobMin && radius < mBlobMax) { mUserPos = 640 - center.x; mUser = true; if (mNoUserMessage == false) { mNoUserMessage = true; } cv::circle(output, center, radius, color, 3); mStopedTime = getElapsedSeconds(); osc::Message message; message.addFloatArg(mUserPos); message.setAddress("/user/1"); message.setRemoteEndpoint(mHost, mPort); mSender.sendMessage(message); } else if (mNoUserMessage) { osc::Message message; message.addFloatArg(mUserPos); message.setAddress("/nouser"); message.setRemoteEndpoint(mHost, mPort); mSender.sendMessage(message); mNoUserMessage = false; } } cv::Scalar yellow( 0, 255, 255 ); if (mReflectionBottom > 0) { cv::Point p1 = cv::Point(0, 480 - mReflectionBottom-1); cv::Point p2 = cv::Point(640, 480); cv::rectangle(output, p1, p2, yellow, -1, 8, 0); } if (mReflectionTop > 0) { cv::Scalar black( 0, 0, 0 ); cv::Point p1 = cv::Point(0, 0); cv::Point p2 = cv::Point(640, mReflectionTop); cv::rectangle(output, p1, p2, yellow, -1, 8, 0); } mContourTexture = gl::Texture( fromOcv( output ) ); } } if( mKinect.checkNewVideoFrame() ) mColorTexture = mKinect.getVideoImage(); }