virtual bool tryConfigureVideoMode(VideoMode& mode) { bool result = true; VideoMode old = stream_.getVideoMode(); if(stream_.setVideoMode(mode) != STATUS_OK) { ROS_ERROR_STREAM_COND(stream_.setVideoMode(old) != STATUS_OK, "Failed to recover old video mode!"); result = false; } return result; }
int kinect_init() { Status rc = OpenNI::initialize(); if (rc != STATUS_OK) { printf("Initialize failed\n%s\n", OpenNI::getExtendedError()); return 1; } rc = device.open(ANY_DEVICE); if (rc != STATUS_OK) { printf("Couldn't open device\n%s\n", OpenNI::getExtendedError()); return 2; } if (device.getSensorInfo(SENSOR_DEPTH) != NULL) { rc = depth.create(device, SENSOR_DEPTH); if (rc != STATUS_OK) { printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError()); return 3; } const SensorInfo* sinfo = device.getSensorInfo(SENSOR_DEPTH); const Array<VideoMode>& modes = sinfo->getSupportedVideoModes(); for (int i=0; i<modes.getSize(); i++) { printf("%i: %ix%i, %i fps, %i format\n", i, modes[i].getResolutionX(), modes[i].getResolutionY(), modes[i].getFps(), modes[i].getPixelFormat() ); } //rc = depth.setVideoMode(modes[0]); // 320x240, 30fps, format: 100 //rc = depth.setVideoMode(modes[4]); // 640x480, 30fps, format: 100 rc = depth.setVideoMode(modes[4]); // 640x480, 30fps, format: 100 if (rc != openni::STATUS_OK) { printf("Failed to set depth resolution\n"); return -1; } } rc = depth.start(); if (rc != STATUS_OK) { printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError()); return 4; } return 0; }
// Copy basic properties between VideoStream void CopyGeneralProperties( const VideoStream& rSource, VideoStream& rTarget ) { rTarget.setVideoMode( rSource.getVideoMode() ); // assign basic properties rTarget.setProperty( ONI_STREAM_PROPERTY_VERTICAL_FOV, rSource.getVerticalFieldOfView() ); rTarget.setProperty( ONI_STREAM_PROPERTY_HORIZONTAL_FOV, rSource.getHorizontalFieldOfView() ); rTarget.setProperty( ONI_STREAM_PROPERTY_MIRRORING, rSource.getMirroringEnabled() ); // assign dpeth only properties rTarget.setProperty( ONI_STREAM_PROPERTY_MIN_VALUE, rSource.getMinPixelValue() ); rTarget.setProperty( ONI_STREAM_PROPERTY_MAX_VALUE, rSource.getMaxPixelValue() ); }
SensorStreamManager(ros::NodeHandle& nh, Device& device, SensorType type, std::string name, std::string frame_id, VideoMode& default_mode) : device_(device), default_mode_(default_mode), name_(name), frame_id_(frame_id), running_(false), nh_(nh, name_), it_(nh_), camera_info_manager_(nh_) { assert(device_.hasSensor(type)); callback_ = boost::bind(&SensorStreamManager::onSubscriptionChanged, this, _1); publisher_ = it_.advertiseCamera("image_raw", 1, callback_, callback_); ROS_ERROR_STREAM_COND(stream_.create(device_, type) != STATUS_OK, "Failed to create stream '" << toString(type) << "'!"); stream_.addNewFrameListener(this); ROS_ERROR_STREAM_COND(stream_.setVideoMode(default_mode_) != STATUS_OK, "Failed to set default video mode for stream '" << toString(type) << "'!"); }
int main(int argc, char **argv){ printf("starting\n"); fflush(stdout); ros::init(argc, argv, "xtion",ros::init_options::AnonymousName); ros::NodeHandle n("~"); //Base topic name n.param("topic", topic, string("/camera")); //Resolution //0 = 160x120 //1 = 320x240 n.param("depth_mode", _depth_mode, -1); n.param("rgb_mode", _rgb_mode, -1); n.param("sync", _sync, 0); n.param("registration", _registration,0); n.param("frame_id", frame_id, string("camera_frame")); n.param("device_num", _device_num, -1); n.param("device_uri", _device_uri, string("NA")); n.param("frame_skip", _frame_skip, 0); n.param("exposure", _exposure, -1); n.param("gain", _gain, -1); printf("Launched with params:\n"); printf("_device_num:= %d\n",_device_num); printf("_device_uri:= %s\n",_device_uri.c_str()); printf("_topic:= %s\n",topic.c_str()); printf("_sync:= %d\n",_sync); printf("_registration:= %d\n",_registration); printf("_depth_mode:= %d\n",_depth_mode); printf("_rgb_mode:= %d\n",_rgb_mode); printf("_frame_id:= %s\n",frame_id.c_str()); printf("_frame_skip:= %d\n",_frame_skip); printf("_exposure:= %d\n",_exposure); printf("_gain:= %d\n",_gain); fflush(stdout); if (_frame_skip<=0) _frame_skip = 1; //OPENNI2 STUFF //=================================================================== streams = new openni::VideoStream*[2]; streams[0]=&depth; streams[1]=&rgb; Status rc = OpenNI::initialize(); if (rc != STATUS_OK) { printf("Initialize failed\n%s\n", OpenNI::getExtendedError()); fflush(stdout); return 1; } // enumerate the devices openni::Array<openni::DeviceInfo> device_list; openni::OpenNI::enumerateDevices(&device_list); Device device; if(_device_uri.compare("NA")){ string dev_uri("NA"); for (int i = 0; i<device_list.getSize(); i++){ if(!string(device_list[i].getUri()).compare(0, _device_uri.size(), _device_uri )){ dev_uri = device_list[i].getUri(); break; } } if(!dev_uri.compare("NA")){ cerr << "cannot find device with uri starting for: " << _device_uri << endl; } rc = device.open(dev_uri.c_str()); } else{ if (_device_num < 0){ cerr << endl << endl << "found " << device_list.getSize() << " devices" << endl; for (int i = 0; i<device_list.getSize(); i++) cerr << "\t num: " << i << " uri: " << device_list[i].getUri() << endl; } if (_device_num>=device_list.getSize() || _device_num<0 ) { cerr << "device num: " << _device_num << " does not exist, aborting" << endl; openni::OpenNI::shutdown(); return 0; } rc = device.open(device_list[_device_num].getUri()); } if (rc != STATUS_OK){ printf("Couldn't open device\n%s\n", OpenNI::getExtendedError()); fflush(stdout); return 2; } if(_depth_mode>=0){ if (device.getSensorInfo(SENSOR_DEPTH) != NULL){ rc = depth.create(device, SENSOR_DEPTH); if (rc != STATUS_OK){ printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError()); fflush(stdout); return 3; } //DEPTH pub_depth = n.advertise<sensor_msgs::Image>("/"+topic+"/depth/image_raw", 1); pub_camera_info_depth = n.advertise<sensor_msgs::CameraInfo>("/"+topic+"/depth/camera_info", 1); } } if(_rgb_mode>=0){ if (device.getSensorInfo(SENSOR_COLOR) != NULL){ rc = rgb.create(device, SENSOR_COLOR); if (rc != STATUS_OK){ printf("Couldn't create rgb stream\n%s\n", OpenNI::getExtendedError()); fflush(stdout); return 3; } //RGB pub_rgb = n.advertise<sensor_msgs::Image>("/"+topic+"/rgb/image_raw", 1); pub_camera_info_rgb = n.advertise<sensor_msgs::CameraInfo>("/"+topic+"/rgb/camera_info", 1); } } if(_depth_mode<0 && _rgb_mode<0){ cout << "Depth modes" << endl; const openni::SensorInfo* sinfo = device.getSensorInfo(openni::SENSOR_DEPTH); // select index=4 640x480, 30 fps, 1mm const openni::Array< openni::VideoMode>& modesDepth = sinfo->getSupportedVideoModes(); printf("Enums data:\nPIXEL_FORMAT_DEPTH_1_MM = 100,\nPIXEL_FORMAT_DEPTH_100_UM = 101,\nPIXEL_FORMAT_SHIFT_9_2 = 102,\nPIXEL_FORMAT_SHIFT_9_3 = 103,\nPIXEL_FORMAT_RGB888 = 200,\nPIXEL_FORMAT_YUV422 = 201,\nPIXEL_FORMAT_GRAY8 = 202,\nPIXEL_FORMAT_GRAY16 = 203,\nPIXEL_FORMAT_JPEG = 204,\nPIXEL_FORMAT_YUYV = 205,\n\n"); cout << "Depth modes" << endl; for (int i = 0; i<modesDepth.getSize(); i++) { printf("%i: %ix%i, %i fps, %i format\n", i, modesDepth[i].getResolutionX(), modesDepth[i].getResolutionY(),modesDepth[i].getFps(), modesDepth[i].getPixelFormat()); //PIXEL_FORMAT_DEPTH_1_MM = 100, PIXEL_FORMAT_DEPTH_100_UM = 101 } cout << "Rgb modes" << endl; const openni::SensorInfo* sinfoRgb = device.getSensorInfo(openni::SENSOR_COLOR); // select index=4 640x480, 30 fps, 1mm const openni::Array< openni::VideoMode>& modesRgb = sinfoRgb->getSupportedVideoModes(); for (int i = 0; i<modesRgb.getSize(); i++) { printf("%i: %ix%i, %i fps, %i format\n", i, modesRgb[i].getResolutionX(), modesRgb[i].getResolutionY(),modesRgb[i].getFps(), modesRgb[i].getPixelFormat()); //PIXEL_FORMAT_DEPTH_1_MM = 100, PIXEL_FORMAT_DEPTH_100_UM } depth.stop(); depth.destroy(); rgb.stop(); rgb.destroy(); device.close(); OpenNI::shutdown(); exit(1); } if(_depth_mode>=0){ rc = depth.setVideoMode(device.getSensorInfo(SENSOR_DEPTH)->getSupportedVideoModes()[_depth_mode]); depth.setMirroringEnabled(false); rc = depth.start(); } if(_rgb_mode>=0){ rc = rgb.setVideoMode(device.getSensorInfo(SENSOR_COLOR)->getSupportedVideoModes()[_rgb_mode]); rgb.setMirroringEnabled(false); rgb.getCameraSettings()->setAutoExposureEnabled(true); rgb.getCameraSettings()->setAutoWhiteBalanceEnabled(true); cerr << "Camera settings valid: " << rgb.getCameraSettings()->isValid() << endl; rc = rgb.start(); } if(_depth_mode>=0 && _rgb_mode>=0 && _sync==1){ rc =device.setDepthColorSyncEnabled(true); if (rc != STATUS_OK) { printf("Couldn't enable de pth and rgb images synchronization\n%s\n", OpenNI::getExtendedError()); exit(2); } } if(_depth_mode>=0 && _rgb_mode>=0 && _registration==1){ device.setImageRegistrationMode(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR); } run = true; pthread_t runner; pthread_create(&runner, 0, camera_thread, 0); ros::spin(); void* result; run =false; pthread_join(runner, &result); depth.stop(); depth.destroy(); rgb.stop(); rgb.destroy(); device.close(); OpenNI::shutdown(); return 0; }
int _tmain(int argc, _TCHAR* argv[]) { DepthDetector detector(ThresholdMin, ThresholdMax); ScanLineSegmenter segmenter; OpenNI::initialize(); Device device; if (device.open(ANY_DEVICE) != STATUS_OK) { std::cout << "could not open any device\r\n"; return 1; } if (device.hasSensor(SENSOR_DEPTH)) { auto info = device.getSensorInfo(SENSOR_DEPTH); auto& modes = info->getSupportedVideoModes(); std::cout << "depth sensor supported modes:\r\n"; for (int i = 0; i < modes.getSize(); ++i) { auto& mode = modes[i]; std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n"; } } VideoStream stream; stream.create(device, SENSOR_DEPTH); VideoMode mode; mode.setFps(25); mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM); mode.setResolution(320, 240); stream.setMirroringEnabled(true); stream.setVideoMode(mode); stream.start(); std::cout << "press any key to capture background\r\n"; std::cin.get(); VideoFrameRef frame; stream.readFrame(&frame); DepthImage image(320, 240); copyFrameToImage(frame, image); detector.background(image); std::cout << "starting capture loop\r\n"; CenterPointExtractor centerPointExtractor(MinBlobSize); std::chrono::high_resolution_clock timer; auto startTime = timer.now(); int frameId = 0; while (true) { stream.readFrame(&frame); copyFrameToImage(frame, image); detector.detect(image); std::vector<LineSegment> segments; segmenter.segment(detector.mask(), segments); std::vector<std::pair<float, float>> centerPoints; centerPointExtractor.extract(segments, centerPoints); if (centerPoints.size()) { std::cout << "point count: " << centerPoints.size(); std::cout << "\t points: "; for (auto& point : centerPoints) { std::cout << "(" << point.first << ", " << point.second << ") "; } std::cout << "\r\n"; } ++frameId; if (frameId % 64 == 0) { auto stopTime = timer.now(); auto elapsedTime = stopTime - startTime; auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count(); std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl; startTime = stopTime; } } openni::OpenNI::shutdown(); return 0; }
int _tmain(int argc, _TCHAR* argv[]) { sdl::Application app; DepthDetector detector(ThresholdMin, ThresholdMax); ScanLineSegmenter segmenter; OpenNI::initialize(); Device device; if (device.open(ANY_DEVICE) != STATUS_OK) { std::cout << "could not open any device\r\n"; return 1; } if (device.hasSensor(SENSOR_DEPTH)) { auto info = device.getSensorInfo(SENSOR_DEPTH); auto& modes = info->getSupportedVideoModes(); std::cout << "depth sensor supported modes:\r\n"; for (int i = 0; i < modes.getSize(); ++i) { auto& mode = modes[i]; std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n"; } } VideoStream stream; stream.create(device, SENSOR_DEPTH); VideoMode mode; mode.setFps(25); mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM); mode.setResolution(320, 240); stream.setMirroringEnabled(true); stream.setVideoMode(mode); stream.start(); std::cout << "press any key to capture background\r\n"; std::cin.get(); VideoFrameRef frame; stream.readFrame(&frame); DepthImage image(320, 240); copyFrameToImage(frame, image); detector.background(image); std::cout << "starting capture loop\r\n"; sdl::GLContext::setVersion(4, 3); ImageViewer viewer; viewer.add(0, 0, 320, 240); viewer.add(320, 0, 320, 240); viewer.add(0, 240, 320, 240); viewer.add(320, 240, 320, 240); CenterPointExtractor centerPointExtractor(MinBlobSize); MotionRecorder recorder; while (true) { stream.readFrame(&frame); copyFrameToImage(frame, image); detector.detect(image); std::vector<LineSegment> segments; segmenter.segment(detector.mask(), segments); std::vector<std::pair<float, float>> centerPoints; centerPointExtractor.extract(segments, centerPoints); recorder.track(centerPoints); viewer.crosses.clear(); std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) { return Cross{ coord.first, coord.second }; }); viewer.lines.clear(); std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) { return Lines{ motion.points }; }); viewer[0].update(detector.mask()); viewer[1].update(image); viewer[2].update(detector.background()); viewer[3].update(detector.difference()); viewer.update(); } openni::OpenNI::shutdown(); return 0; }
int main() { FILE *fptrI = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv","w"); fprintf(fptrI, "Classtype, Area, Perimeter, Circularity, Extent\n"); fclose(fptrI); Mat input = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\Fingers.bmp", 1); Mat input2 = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\NotFingers.bmp", 1); Mat inputF = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\ImageFeaturesBinaryF.bmp", 1); Mat gray(input.rows, input.cols, CV_8UC3); Mat gray2(input.rows, input.cols, CV_8UC3); Mat grayF(input.rows, input.cols, CV_8UC3); cvtColor(input, gray, CV_BGR2GRAY); cvtColor(input2, gray2, CV_BGR2GRAY); cvtColor(inputF, grayF, CV_BGR2GRAY); shapeFeatures(gray, input, 1); shapeFeatures(gray2, input2, 2); namedWindow("Image"); imshow("Image", input); namedWindow("Image2"); imshow("Image2", input2); //------------------------------------------------------ //--------[SVM]-------- // Read input data from file created above double parameters[5]; vector<double> svmI, svmA, svmP, svmC, svmE; int size = 1; double index = 0; double area = 0; double perimeter = 0; double circularity = 0; char buffer[1024]; char *record, *line; FILE* fptrR = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv", "r"); fscanf(fptrR, "%*[^\n]\n", NULL); svmI.resize(size); svmA.resize(size); svmP.resize(size); svmC.resize(size); while((line=fgets(buffer, sizeof(buffer), fptrR))!=NULL) { size++; svmI.resize(size); svmA.resize(size); svmP.resize(size); svmC.resize(size); svmE.resize(size); record = strtok(line, ";"); for(int i = 0; i < 5; i++); { double value = atoi(record); record = strtok(line,";"); } char *lineCopy = record; char *pch; pch = strtok(lineCopy, ","); parameters[0] = atoi(pch); int j = 1; while( j < 5 ) { pch = strtok (NULL, ","); parameters[j] = atof(pch); j++; } svmI[size-1] = parameters[0]; svmA[size-1] = parameters[1]; svmP[size-1] = parameters[2]; svmC[size-1] = parameters[3]; svmE[size-1] = parameters[4]; } fclose(fptrR); //--------------------- // Data for visual representation int width = 512, height = 512; Mat image = Mat::zeros(height, width, CV_8UC3); // Set up training data //float labels[8] = {1.0, -1.0, -1.0, -1.0}; float labels[1000]; for(int i = 0; i < svmI.size()-1; i++) { labels[i] = svmI[i+1]; } Mat labelsMat(1000, 1, CV_32FC1, labels); float trainingData[1000][4]; for(int i = 0; i < svmE.size()-1; i++) { trainingData[i][0] = svmE[i+1]; trainingData[i][1] = svmC[i+1]; trainingData[i][2] = svmA[i+1]; trainingData[i][3] = svmP[i+1]; } Mat trainingDataMat(1000, 4, CV_32FC1, trainingData); // Set up SVM's parameters CvSVMParams params; params = SVMFinger.get_params(); //params.svm_type = CvSVM::C_SVC; //params.kernel_type = CvSVM::LINEAR; //params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6); // Train the SVM SVMFinger.train_auto(trainingDataMat, labelsMat, Mat(), Mat(), params); // Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57); // float response = SVMFinger.predict(sampleMat); waitKey(); destroyWindow("Image"); destroyWindow("Image2"); //------------------------------------------ OpenNI::initialize(); Device devAnyDevice; devAnyDevice.open(ANY_DEVICE); //----------------[Define Video Settings]------------------- //Set Properties of Depth Stream VideoMode mModeDepth; mModeDepth.setResolution( 640, 480 ); mModeDepth.setFps( 30 ); mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM ); //Set Properties of Color Stream VideoMode mModeColor; mModeColor.setResolution( 640, 480 ); mModeColor.setFps( 30 ); mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 ); //---------------------------------------------------------- //----------------------[Initial Streams]--------------------- VideoStream streamInitDepth; streamInitDepth.create( devAnyDevice, SENSOR_DEPTH ); VideoStream streamInitColor; streamInitColor.create( devAnyDevice, SENSOR_COLOR ); streamInitDepth.setVideoMode( mModeDepth ); streamInitColor.setVideoMode( mModeColor ); namedWindow( "Depth Image (Init)", CV_WINDOW_AUTOSIZE ); namedWindow( "Color Image (Init)", CV_WINDOW_AUTOSIZE ); //namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE ); VideoFrameRef frameDepthInit; VideoFrameRef frameColorInit; streamInitDepth.start(); streamInitColor.start(); cv::Mat BackgroundFrame; int avgDist = 0; int iMaxDepthInit = streamInitDepth.getMaxPixelValue(); OutX.clear(); OutY.clear(); vector<int> OldOutX, OldOutY; OldOutX.clear(); OldOutY.clear(); //------------------------------------------------------------ //--------------------[Initiation Process]-------------------- while( true ) { streamInitDepth.readFrame( &frameDepthInit ); streamInitColor.readFrame( &frameColorInit ); const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData()); cv::Mat mScaledDepth; mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepthInit ); cv::imshow( "Depth Image (Init)", mScaledDepth ); const cv::Mat mImageRGB(frameColorInit.getHeight(), frameColorInit.getWidth(), CV_8UC3, (void*)frameColorInit.getData()); cv::Mat cImageBGR; cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR ); //--------------------[Get Average Distance]--------------------- int depthVal = 0; int frameHeight = frameDepthInit.getHeight(); int frameWidth = frameDepthInit.getWidth(); //------------ //backgroundDepth.resize(frameHeight * frameWidth); //--------------------------------------------------------------- int initCount = 0; for(int i = 0; i < frameHeight; i++) { for(int j = 0; j < frameWidth; j++) { depthVal = mImageDepth.at<unsigned short>(i, j) + depthVal; initCount++; } } avgDist = depthVal / ((frameHeight) * (frameWidth)); cout << "Average Distance: " << avgDist << endl; cv::imshow( "Color Image (Init)", cImageBGR ); if( cv::waitKey(1) == 'q') { mImageDepth.copyTo(BackgroundFrame); break; } } streamInitDepth.destroy(); streamInitColor.destroy(); destroyWindow( "Depth Image (Init)" ); destroyWindow( "Color Image (Init)" ); VideoStream streamDepth; streamDepth.create( devAnyDevice, SENSOR_DEPTH ); VideoStream streamColor; streamColor.create( devAnyDevice, SENSOR_COLOR ); streamDepth.setVideoMode( mModeDepth ); streamColor.setVideoMode( mModeColor ); streamDepth.start(); streamColor.start(); namedWindow( "Depth Image", CV_WINDOW_AUTOSIZE ); namedWindow( "Color Image", CV_WINDOW_AUTOSIZE ); namedWindow( "Thresholded Image", CV_WINDOW_AUTOSIZE ); int iMaxDepth = streamDepth.getMaxPixelValue(); VideoFrameRef frameColor; VideoFrameRef frameDepth; OutX.clear(); OutY.clear(); //------------------------------------------------------------ //------------------------------------------------------------ //-----------------------[Main Process]----------------------- while( true ) { streamDepth.readFrame( &frameDepth ); streamColor.readFrame( &frameColor ); const cv::Mat mImageDepth( frameDepth.getHeight(), frameDepth.getWidth(), CV_16UC1, (void*)frameDepth.getData()); cv::Mat mScaledDepth; mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth ); //////////////////////////////////////////////////////////////////////////////////////////// //---------------------[Downsampling]------------------------------------------------------- double min; double max; cv::minMaxIdx(mImageDepth, &min, &max); cv::Mat adjMap; // expand your range to 0..255. Similar to histEq(); float scale = 255 / (max-min); mImageDepth.convertTo(adjMap,CV_8UC1, scale, -min*scale); // this is great. It converts your grayscale image into a tone-mapped one, // much more pleasing for the eye // function is found in contrib module, so include contrib.hpp // and link accordingly cv::Mat falseColorsMap; applyColorMap(adjMap, falseColorsMap, cv::COLORMAP_AUTUMN); cv::imshow("Out", falseColorsMap); //------------------------------------------------------------------------------------------ //////////////////////////////////////////////////////////////////////////////////////////// cv::imshow( "Depth Image", mScaledDepth ); cv::imshow( "Depth Image2", adjMap ); const cv::Mat mImageRGB(frameColor.getHeight(), frameColor.getWidth(), CV_8UC3, (void*)frameColor.getData()); cv::Mat cImageBGR; cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR ); //-------------[Threshold]----------------- cv::Mat mImageThres( frameDepth.getHeight(), frameDepth.getWidth(), CV_8UC1 ); int backgroundPxlCount = 0; for(int i = 0; i < 480; i++) { for(int j = 0; j < 640; j++) { int depthVal = mImageDepth.at<unsigned short>(i, j); avgDist = BackgroundFrame.at<unsigned short>(i, j)-2; if((depthVal > (avgDist-14)) && (depthVal <= (avgDist-7))) { //mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 255; mImageThres.at<uchar>(i, j) = 255; } else { //mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 0; mImageThres.at<uchar>(i, j) = 0; } backgroundPxlCount++; } } GaussianBlur( mImageThres, mImageThres, Size(3,3), 0, 0 ); fingerDetection( mImageThres, cImageBGR, OldOutX, OldOutY); cv::imshow("Thresholded Image", mImageThres); //---------------------------------------- if( cv::waitKey(1) == 'q') { break; } //------------------------------------------------ cv::imshow( "Color Image", cImageBGR ); //---------------------------------- OldOutX.clear(); OldOutY.clear(); OldOutX = OutX; OldOutY = OutY; OutX.clear(); OutY.clear(); } return 0; }
int initializeOpenNIDevice(int deviceID ,const char * deviceName , Device &device , VideoStream &color , VideoStream &depth ,unsigned int width ,unsigned int height , unsigned int fps) { unsigned int openMode=OPENNI2_OPEN_REGULAR_ENUM; /* 0 = regular deviceID and enumeration*/ if (deviceName!=0) { //If our deviceName contains a .oni we assume that we have an oni file to open if (strstr(deviceName,".oni")!=0) { fprintf(stderr,"Found an .ONI filename , trying to open it..\n"); openMode=OPENNI2_OPEN_USING_STRING; } else if (strlen(deviceName)>7) { fprintf(stderr,"deviceName is too long (%lu chars) , assuming it is a Device URI ..\n",strlen(deviceName)); openMode=OPENNI2_OPEN_USING_STRING; } } switch (openMode) { //------------------------------------------------------------------------------------- //If we have an ONI file to open just pass it as an argument to device.open(deviceName) case OPENNI2_OPEN_USING_STRING : if (device.open(deviceName) != STATUS_OK) { fprintf(stderr,"Could not open using given string ( %s ) : %s \n",deviceName,OpenNI::getExtendedError()); return 0; } break; //------------------------------------------------------------------------------------- //If we don't have a deviceName we assume deviceID points to the device we want to open so we will try to use //the openNI enumerator to get the specific device URI for device with number deviceID and use this to device.open( devURI ) case OPENNI2_OPEN_REGULAR_ENUM : default : //We have to supply our own buffer to hold the uri device string , so we make one here char devURIBuffer[512]={0}; if (device.open(getURIForDeviceNumber(deviceID,devURIBuffer,512)) != STATUS_OK) { fprintf(stderr,"Could not open an OpenNI device : %s \n",OpenNI::getExtendedError()); return 0; } break; } if (device.getSensorInfo(SENSOR_DEPTH) != NULL) { Status rc = depth.create(device, SENSOR_DEPTH); if (rc == STATUS_OK) { VideoMode depthMode = depth.getVideoMode(); depthMode.setResolution(width,height); depthMode.setFps(fps); Status rc = depth.setVideoMode(depthMode); if (rc != STATUS_OK) { fprintf(stderr,"Error getting color at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); } if(depth.start()!= STATUS_OK) { fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError()); return 0; } } else { fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError()); return 0; } } if (device.getSensorInfo(SENSOR_COLOR) != NULL) { Status rc = color.create(device, SENSOR_COLOR); if (rc == STATUS_OK) { VideoMode colorMode = color.getVideoMode(); colorMode.setResolution(width,height); colorMode.setFps(fps); Status rc = color.setVideoMode(colorMode); if (rc != STATUS_OK) { fprintf(stderr,"Error getting depth at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); } if(color.start() != STATUS_OK) { fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError()); return 0; } } else { fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError()); OpenNI::getExtendedError(); return 0; } } #if MOD_IR if(device.getSensorInfo(SENSOR_IR) != NULL) { Status rc = ir.create(device, SENSOR_IR); // Create the VideoStream for IR if (rc == STATUS_OK) { rc = ir.start(); // Start the IR VideoStream } else { fprintf(stderr,"Couldn't create IR stream: %s \n",OpenNI::getExtendedError()); OpenNI::getExtendedError(); return 0; } } #endif // MOD_IR //Mirroring is disabled depth.setMirroringEnabled (false); color.setMirroringEnabled (false); fprintf(stdout,"Device Initialization Requested %u x %u @ %u fps \n",width,height,fps); return 1; }