void UserApp::onUser( nite::UserTrackerFrameRef frame, const OpenNI::DeviceOptions& deviceOptions ) { mShapeDetection.onDepth( frame.getDepthFrame(), deviceOptions ); mChannel = OpenNI::toChannel16u( frame.getDepthFrame() ); mUsers = OpenNI::toVector( frame.getUsers() ); for ( vector<nite::UserData>::iterator iter = mUsers.begin(); iter != mUsers.end(); ++iter ) { if ( iter->isNew() ) { mDevice->getUserTracker().startSkeletonTracking( iter->getId() ); } else if ( iter->isLost() ) { mDevice->getUserTracker().stopSkeletonTracking( iter->getId() ); } } }
/* depthの生データをグレイスケールの画像にする。 CV_8UC3で返すよ */ cv::Mat depthToImage( nite::UserTrackerFrameRef& userFrame ) { cv::Mat depthImage; openni::VideoFrameRef depthFrame = userFrame.getDepthFrame(); if ( depthFrame.isValid() ) { // Depthの情報をdepthImageに openni::VideoMode videoMode = depthFrame.getVideoMode(); depthImage = cv::Mat( videoMode.getResolutionY(), videoMode.getResolutionX(), CV_16SC1, (short*) depthFrame.getData() ); // depthImageを表示向きのCV_8UC3に変更。 depthImage.convertTo(depthImage, CV_8UC1, 255.0/10000); cv::cvtColor(depthImage, depthImage, CV_GRAY2BGR); } return depthImage; }
void drawUser( nite::UserTrackerFrameRef& userFrame, cv::Mat& image ) { openni::VideoFrameRef depthFrame = userFrame.getDepthFrame(); if ( depthFrame.isValid() ) { openni::VideoMode videoMode = depthFrame.getVideoMode(); // cv::MatでUserMapを取得する cv::Mat pMapLabel = cv::Mat( videoMode.getResolutionY(), videoMode.getResolutionX(), CV_16SC1, (short*) userFrame.getUserMap().getPixels()); pMapLabel.convertTo(pMapLabel,CV_8UC1); // 見つけた人に色をつけるよ for(int i = 0; i < 6; i++){ cv::Mat mask; cv::compare(pMapLabel, i+1, mask, CV_CMP_EQ); cv::add(image, colors[i], image, mask); } } }