Beispiel #1
0
void TouchTracking::initializeVideo()
{
    OpenNI::initialize();

    if (device.open(ANY_DEVICE) != STATUS_OK)
    {
        throw std::runtime_error("could not open any device!");
    }

    if (!device.hasSensor(SENSOR_DEPTH))
    {
        throw std::runtime_error("sensor cannot receive depth!");
    }

    auto info = device.getSensorInfo(SENSOR_DEPTH);
    auto& modes = info->getSupportedVideoModes();
    //std::cout << "depth sensor supported modes:\r\n";
    for (int i = 0; i < modes.getSize(); ++i)
    {
        m_videoModes.push_back(modes[i]);
        //std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
    }

    VideoMode mode;
    mode.setFps(60);
    mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
    mode.setResolution(320, 240);
    videoMode(mode);

    stream.setMirroringEnabled(false);
}
void XtionDepthDriverImpl::open()
{
  if(_device.isValid()) return;
  
  Status rc = _device.open(ANY_DEVICE);
  if(rc != STATUS_OK) {
    throw Exception(std::string("Open the device failed with\n")
      + OpenNI::getExtendedError());
  }
  _device.open(ANY_DEVICE);

  if(!_device.getSensorInfo(SENSOR_DEPTH)) {
    _device.close();
    throw Exception("Device has no depth sensor!");
  }

  rc = _stream.create(_device, SENSOR_DEPTH);
  if(rc != STATUS_OK) {
    _device.close();
    throw Exception(std::string("Create the depth stream failed with\n")
      + OpenNI::getExtendedError());
  }

  VideoMode mode = _stream.getVideoMode();
  mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);

  rc = _stream.setVideoMode(mode);
  if(rc != STATUS_OK) {
      throw Exception(std::string("Set the pixel format to "
          "PIXEL_FORMAT_DEPTH_1_MM failed with\n")
          + OpenNI::getExtendedError());
  }

  rc = _stream.start();
  if(rc != STATUS_OK) {
    _stream.destroy();
    _device.close();

    throw Exception(std::string("Starting the depth stream failed with\n")
      + OpenNI::getExtendedError());
  }

  rc = _stream.addNewFrameListener(this);
  if(rc != STATUS_OK) {
    _stream.stop();
    _stream.destroy();
    _device.close();

    throw Exception(std::string("Adding the frame listener failed with\n")
      + OpenNI::getExtendedError());
  }
}
int _tmain(int argc, _TCHAR* argv[])
{
	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	CenterPointExtractor centerPointExtractor(MinBlobSize);
	std::chrono::high_resolution_clock timer;
	auto startTime = timer.now();
	int frameId = 0;
	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);

		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		if (centerPoints.size())
		{
			std::cout << "point count: " << centerPoints.size();
		
			std::cout << "\t points: ";
		
			for (auto& point : centerPoints)
			{
				std::cout << "(" << point.first << ", " << point.second << ")  ";
			}
			std::cout << "\r\n";
		}

		++frameId;

		
		
		if (frameId % 64 == 0)
		{
			auto stopTime = timer.now();
			
			auto elapsedTime = stopTime - startTime;
			auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count();

			std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl;

			startTime = stopTime;
		}
		
	}
	
	openni::OpenNI::shutdown();
	return 0;
}
Beispiel #4
0
int _tmain(int argc, _TCHAR* argv[])
{
	sdl::Application app;

	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	sdl::GLContext::setVersion(4, 3);

	ImageViewer viewer;
	viewer.add(0, 0, 320, 240);
	viewer.add(320, 0, 320, 240);
	viewer.add(0, 240, 320, 240);
	viewer.add(320, 240, 320, 240);
	
	CenterPointExtractor centerPointExtractor(MinBlobSize);
	MotionRecorder recorder;

	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);
		
		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		recorder.track(centerPoints);
		
		viewer.crosses.clear();
		std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) {
			return Cross{ coord.first, coord.second };
		});

		viewer.lines.clear();
		std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) {
			return Lines{ motion.points };
		});
		
		viewer[0].update(detector.mask());
		viewer[1].update(image);
		viewer[2].update(detector.background());
		viewer[3].update(detector.difference());
		
		viewer.update();
	}
	
	openni::OpenNI::shutdown();
	return 0;
}
Beispiel #5
0
int main()
{
	FILE *fptrI = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv","w");
	fprintf(fptrI, "Classtype, Area, Perimeter, Circularity, Extent\n");
	fclose(fptrI);

	Mat input = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\Fingers.bmp", 1);
	Mat input2 = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\NotFingers.bmp", 1);
	Mat inputF = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\ImageFeaturesBinaryF.bmp", 1);
	Mat gray(input.rows, input.cols, CV_8UC3);
	Mat gray2(input.rows, input.cols, CV_8UC3);
	Mat grayF(input.rows, input.cols, CV_8UC3);
	cvtColor(input, gray, CV_BGR2GRAY);
	cvtColor(input2, gray2, CV_BGR2GRAY);
	cvtColor(inputF, grayF, CV_BGR2GRAY);
	shapeFeatures(gray, input, 1);
	shapeFeatures(gray2, input2, 2);
	namedWindow("Image");
	imshow("Image", input);
	namedWindow("Image2");
	imshow("Image2", input2);

	//------------------------------------------------------
	//--------[SVM]--------
	// Read input data from file created above
	double parameters[5];
	vector<double> svmI, svmA, svmP, svmC, svmE;
	int size = 1;
	double index = 0; double area = 0; double perimeter = 0; double circularity = 0;
	char buffer[1024];
	char *record, *line;
	FILE* fptrR = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv", "r");
	fscanf(fptrR, "%*[^\n]\n", NULL);

	svmI.resize(size); svmA.resize(size); svmP.resize(size); svmC.resize(size); 

	while((line=fgets(buffer, sizeof(buffer), fptrR))!=NULL)
	{
		size++;
		svmI.resize(size);
		svmA.resize(size);
		svmP.resize(size);
		svmC.resize(size);
		svmE.resize(size);

		record = strtok(line, ";");
		for(int i = 0; i < 5; i++);
		{
			double value = atoi(record);
			record = strtok(line,";");
		}
		char *lineCopy = record;
		char *pch;

		pch = strtok(lineCopy, ",");
		parameters[0] = atoi(pch);
		
		int j = 1;
		while( j < 5 )
		{
			pch = strtok (NULL, ",");
			parameters[j] = atof(pch);
			j++;
		}
		svmI[size-1] = parameters[0];
		svmA[size-1] = parameters[1];
		svmP[size-1] = parameters[2];
		svmC[size-1] = parameters[3];
		svmE[size-1] = parameters[4];
	}
	fclose(fptrR);
	//---------------------
	// Data for visual representation
    int width = 512, height = 512;
    Mat image = Mat::zeros(height, width, CV_8UC3);

    // Set up training data
    //float labels[8] = {1.0, -1.0, -1.0, -1.0};
	float labels[1000];
	for(int i = 0; i < svmI.size()-1; i++)
	{
		labels[i] = svmI[i+1];
	}
    Mat labelsMat(1000, 1, CV_32FC1, labels);

    float trainingData[1000][4];
	for(int i = 0; i < svmE.size()-1; i++)
	{
		trainingData[i][0] = svmE[i+1];
		trainingData[i][1] = svmC[i+1];
		trainingData[i][2] = svmA[i+1];
		trainingData[i][3] = svmP[i+1];
	}
    Mat trainingDataMat(1000, 4, CV_32FC1, trainingData);

    // Set up SVM's parameters
    CvSVMParams params;
	params = SVMFinger.get_params();
    //params.svm_type    = CvSVM::C_SVC;
    //params.kernel_type = CvSVM::LINEAR;
    //params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);

    // Train the SVM
    SVMFinger.train_auto(trainingDataMat, labelsMat, Mat(), Mat(), params);

//	Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57);
//	float response = SVMFinger.predict(sampleMat);

	waitKey();
	destroyWindow("Image");
	destroyWindow("Image2");

	//------------------------------------------
	OpenNI::initialize();

	Device devAnyDevice;
    devAnyDevice.open(ANY_DEVICE);

	//----------------[Define Video Settings]-------------------
	//Set Properties of Depth Stream
	VideoMode mModeDepth;
	mModeDepth.setResolution( 640, 480 );
	mModeDepth.setFps( 30 );
	mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );

	//Set Properties of Color Stream
	VideoMode mModeColor;
    mModeColor.setResolution( 640, 480 );
    mModeColor.setFps( 30 );
    mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
	//----------------------------------------------------------
	//----------------------[Initial Streams]---------------------
	VideoStream streamInitDepth;
    streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamInitColor;
    streamInitColor.create( devAnyDevice, SENSOR_COLOR );

	streamInitDepth.setVideoMode( mModeDepth );
	streamInitColor.setVideoMode( mModeColor );

	namedWindow( "Depth Image (Init)",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image (Init)",  CV_WINDOW_AUTOSIZE );
	//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );

	VideoFrameRef  frameDepthInit;
    VideoFrameRef  frameColorInit;

	streamInitDepth.start();
	streamInitColor.start();
	cv::Mat BackgroundFrame;

	int avgDist = 0;
	int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
	
	OutX.clear();
	OutY.clear();

	vector<int> OldOutX, OldOutY;
	OldOutX.clear();
	OldOutY.clear();
	//------------------------------------------------------------
	//--------------------[Initiation Process]--------------------
	while( true )
	{
		streamInitDepth.readFrame( &frameDepthInit );
		streamInitColor.readFrame( &frameColorInit );

		const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepthInit );

        cv::imshow( "Depth Image (Init)", mScaledDepth );

        const cv::Mat mImageRGB(frameColorInit.getHeight(), frameColorInit.getWidth(), CV_8UC3, (void*)frameColorInit.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );

		//--------------------[Get Average Distance]---------------------
		int depthVal = 0;
		int frameHeight = frameDepthInit.getHeight();
		int frameWidth = frameDepthInit.getWidth();
		//------------
		//backgroundDepth.resize(frameHeight * frameWidth);
		//---------------------------------------------------------------
		
		int initCount = 0;
		for(int i = 0; i < frameHeight; i++)
		{
			for(int j = 0; j < frameWidth; j++)
			{
				depthVal = mImageDepth.at<unsigned short>(i, j) + depthVal;
				initCount++;
			}
		}
		avgDist = depthVal / ((frameHeight) * (frameWidth));

		cout << "Average Distance: " << avgDist << endl;
		cv::imshow( "Color Image (Init)", cImageBGR );

		if( cv::waitKey(1) == 'q')
		{
			mImageDepth.copyTo(BackgroundFrame);
            break;
		}
	}

	streamInitDepth.destroy();
	streamInitColor.destroy();

	destroyWindow( "Depth Image (Init)" );
	destroyWindow( "Color Image (Init)" );

	VideoStream streamDepth;
    streamDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamColor;
    streamColor.create( devAnyDevice, SENSOR_COLOR );

	streamDepth.setVideoMode( mModeDepth );
	streamColor.setVideoMode( mModeColor );

	streamDepth.start();
    streamColor.start();

	namedWindow( "Depth Image",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image",  CV_WINDOW_AUTOSIZE );
	namedWindow( "Thresholded Image", CV_WINDOW_AUTOSIZE );

	int iMaxDepth = streamDepth.getMaxPixelValue();

    VideoFrameRef  frameColor;
	VideoFrameRef  frameDepth;

	OutX.clear();
	OutY.clear();
	//------------------------------------------------------------

	//------------------------------------------------------------
	//-----------------------[Main Process]-----------------------
	while( true ) 
    {
        streamDepth.readFrame( &frameDepth );
        streamColor.readFrame( &frameColor );

        const cv::Mat mImageDepth( frameDepth.getHeight(), frameDepth.getWidth(), CV_16UC1, (void*)frameDepth.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );

		////////////////////////////////////////////////////////////////////////////////////////////
		//---------------------[Downsampling]-------------------------------------------------------
		double min;
		double max;
		cv::minMaxIdx(mImageDepth, &min, &max);
		cv::Mat adjMap;
		// expand your range to 0..255. Similar to histEq();
		float scale = 255 / (max-min);
		mImageDepth.convertTo(adjMap,CV_8UC1, scale, -min*scale); 

		// this is great. It converts your grayscale image into a tone-mapped one, 
		// much more pleasing for the eye
		// function is found in contrib module, so include contrib.hpp 
		// and link accordingly
		cv::Mat falseColorsMap;
		applyColorMap(adjMap, falseColorsMap, cv::COLORMAP_AUTUMN);

		cv::imshow("Out", falseColorsMap);
		//------------------------------------------------------------------------------------------
		////////////////////////////////////////////////////////////////////////////////////////////

        cv::imshow( "Depth Image", mScaledDepth );
		cv::imshow( "Depth Image2", adjMap );

        const cv::Mat mImageRGB(frameColor.getHeight(), frameColor.getWidth(), CV_8UC3, (void*)frameColor.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );
		
		//-------------[Threshold]-----------------
		cv::Mat mImageThres( frameDepth.getHeight(), frameDepth.getWidth(), CV_8UC1 );

		int backgroundPxlCount = 0;
		for(int i = 0; i < 480; i++)
		{
			for(int j = 0; j < 640; j++)
			{
				int depthVal = mImageDepth.at<unsigned short>(i, j);

				avgDist = BackgroundFrame.at<unsigned short>(i, j)-2;

				if((depthVal > (avgDist-14)) && (depthVal <= (avgDist-7)))
				{
					//mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 255;
					mImageThres.at<uchar>(i, j) = 255;
				}
				else
				{
					//mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 0;
					mImageThres.at<uchar>(i, j) = 0;
				}

				backgroundPxlCount++;
			}
		}
		GaussianBlur( mImageThres, mImageThres, Size(3,3), 0, 0 );
		
		fingerDetection( mImageThres, cImageBGR, OldOutX, OldOutY);

		cv::imshow("Thresholded Image", mImageThres);
		//----------------------------------------
        if( cv::waitKey(1) == 'q')
		{
            break;
		}
		//------------------------------------------------
		cv::imshow( "Color Image", cImageBGR );
		//----------------------------------
		OldOutX.clear();
		OldOutY.clear();
		OldOutX = OutX;
		OldOutY = OutY;
		OutX.clear();
		OutY.clear();
    }

	return 0;
}
Beispiel #6
0
 void createVideoMode(VideoMode& m, int x, int y, int fps, PixelFormat format)
 {
   m.setResolution(x, y);
   m.setFps(fps);
   m.setPixelFormat(format);
 }
int main (int argc, char* argv[])
{
    using namespace openni;

    bool outputIsLibrary = true;
    if (argc > 1) {
        if (!strcmp(argv[1], "test_data")) {
            std::cout << "Capturing data for testing.." << std::endl;
            outputIsLibrary = false;
        } else std::cout << "Capturing data for library.." << std::endl;
    } else std::cout << "Capturing data for library.." << std::endl;

    CreateDirectory ("Output", NULL);
    int frameGap = 0;

    std::string outputAddress;

    if (outputIsLibrary) {
        CreateDirectory ("Output/image_library", NULL);
        if (argc > 1 && argv[2] != '\0') {
            //if(argv[2] != '\0'){
            char folder[256];
            sprintf(folder, "Output/image_library/%s", argv[2]);
            CreateDirectory (folder, NULL);
            sprintf(folder, "Output/image_library/%s/rgbd", argv[2]);
            outputAddress = folder;
            CreateDirectory (outputAddress.c_str(), NULL);
            sprintf(folder, "Output/image_library/%s/rgbd/rgb", argv[2]);
            CreateDirectory (folder, NULL);
            sprintf(folder, "Output/image_library/%s/rgbd/depth", argv[2]);
            CreateDirectory (folder, NULL);
            //}
        } else {
            CreateDirectory ("Output/image_library/item", NULL);
            outputAddress = "Output/image_library/item/rgbd";
            CreateDirectory (outputAddress.c_str(), NULL);
            CreateDirectory ("Output/image_library/item/rgbd/rgb", NULL);
            CreateDirectory ("Output/image_library/item/rgbd/depth", NULL);
        }
        frameGap = 75;
    } else {
        outputAddress = "Output/test_data/";
        CreateDirectory (outputAddress.c_str(), NULL);
        CreateDirectory ("Output/test_data/rgb", NULL);
        CreateDirectory ("Output/test_data/depth", NULL);
        frameGap = 200;
    }

    OpenNI::initialize();
    puts( "Kinect initialization..." );
    Device device;
    if ( device.open( openni::ANY_DEVICE ) != 0 )
    {
        puts( "Kinect not found !" );
        return -1;
    }
    puts( "Kinect opened" );
    VideoStream depthV, colorV;
    colorV.create( device, SENSOR_COLOR );
    colorV.start();
    puts( "Camera ok" );
    depthV.create( device, SENSOR_DEPTH );
    depthV.start();
    puts( "Depth sensor ok" );
    VideoMode paramvideo;
    paramvideo.setResolution( 640, 480 );
    paramvideo.setFps( 30 );
    paramvideo.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );
    depthV.setVideoMode( paramvideo );
    paramvideo.setPixelFormat( PIXEL_FORMAT_RGB888 );
    colorV.setVideoMode( paramvideo );

    // If the depth/color synchronisation is not necessary, start is faster :
    //device.setDepthColorSyncEnabled( false );

    // Otherwise, the streams can be synchronized with a reception in the order of our choice :
    device.setDepthColorSyncEnabled( true );
    device.setImageRegistrationMode( openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR );

    VideoStream** stream = new VideoStream*[2];
    stream[0] = &depthV;
    stream[1] = &colorV;
    puts( "Kinect initialization completed" );

    cv::Mat colorBuffer( cv::Size( 640, 480 ), CV_8UC3 );
    cv::Mat depthBuffer( cv::Size( 640, 480 ), CV_16UC1 ), depthTemp, depthDisplay;
    cv::namedWindow( "RGB", CV_WINDOW_AUTOSIZE );
    cv::namedWindow( "Depth", CV_WINDOW_AUTOSIZE );
    bool firstDepth = true, firstColor = true;

    cv::Mat captureMat = cv::Mat::zeros(200, 200, CV_8UC3);

    int rgbFrameCount = 0, outputFrameCount = 0, lastRgbFrameCount = -1;
    for (;;)
    {
        cv::Mat color, depth, mask, depthShow;

        if ( device.getSensorInfo( SENSOR_DEPTH ) != NULL ) {
            VideoFrameRef depthFrame, colorFrame;
            cv::Mat colorcv( cv::Size( 640, 480 ), CV_8UC3);
            cv::Mat depthcv( cv::Size( 640, 480 ), CV_16UC1);

            int changedIndex;
            if( device.isValid() ) {
                OpenNI::waitForAnyStream( stream, 2, &changedIndex );
                switch ( changedIndex ) {
                case 0:
                    depthV.readFrame( &depthFrame );

                    if ( depthFrame.isValid() ) {
                        if(firstDepth == true)	firstDepth = false;
                        depthcv.data = (uchar*) depthFrame.getData();

                        //To show depth image using entire range of viewed depth instead of default scale.
                        //double min;
                        //double max;
                        //cv::minMaxIdx(depthcv, &min, &max);
                        //cv::Mat adjMap = depthcv * 5;
                        //cv::convertScaleAbs(depthcv, adjMap, 255 / max);
                        //depthBuffer = adjMap.clone();
                        cv::flip(depthcv, depthBuffer, 1);
                        depthBuffer *= 5;
                        depthBuffer.copyTo(depthTemp);
                        depthTemp = 65535 - depthTemp;
                        depthDisplay = cv::Mat::zeros(depthBuffer.size(), CV_16UC1);
                        cv::Mat depthMask = depthBuffer > 0;
                        depthTemp.copyTo(depthDisplay, depthMask);
                        cv::imshow( "Depth", depthDisplay);
                    }
                    break;

                case 1:
                    colorV.readFrame( &colorFrame );

                    if ( colorFrame.isValid() ) {
                        if(firstColor == true)	firstColor = false;
                        colorcv.data = (uchar*) colorFrame.getData();
                        cv::cvtColor( colorcv, colorcv, CV_BGR2RGB );
                        //colorBuffer = colorcv.clone();
                        cv::flip(colorcv, colorBuffer, 1);
                        cv::imshow( "RGB", colorBuffer );
                        rgbFrameCount++;
                    }
                    break;

                default:
                    puts( "Error retrieving a stream" );
                }

            }

            if (!firstDepth && !firstColor) {
                if ((rgbFrameCount > 125) && (rgbFrameCount > lastRgbFrameCount)) {
                    if ((rgbFrameCount % frameGap) == 0) {
                        char rgbChar[256], depthChar[256];

                        if (argc > 2 && !strcmp(argv[1], "library")) {
                            //debugging inputs: [library] [item] [surface(AB, AC, BC)] [Upright?]
                            //if (!strcmp(argv[1], "library")) {
                            //if(argv[3] == '\0' || argv[4] == '\0'){
                            //}
                            sprintf(rgbChar, "%s/rgb/%s_%s_R%04d.png", outputAddress.c_str(), argv[3], argv[4], outputFrameCount);
                            sprintf(depthChar, "%s/depth/%s_%s_R%04d.png", outputAddress.c_str(), argv[3], argv[4], outputFrameCount);
                            //}
                        } else {
                            sprintf(rgbChar, "%s/rgb/%04d.png", outputAddress.c_str(), outputFrameCount);
                            sprintf(depthChar, "%s/depth/%04d.png", outputAddress.c_str(), outputFrameCount);
                        }

                        cv::imwrite(std::string(rgbChar), colorBuffer);
                        cv::imwrite(std::string(depthChar), depthBuffer);

                        printf("Frameset (%d) written to file..\n", outputFrameCount);

                        lastRgbFrameCount = rgbFrameCount;
                        outputFrameCount++;
                    }

                    if ((rgbFrameCount % frameGap) < 5) {
                        captureMat = cv::Scalar(0.0, 255.0, 0.0);
                    } else captureMat = cv::Scalar(0.0, 0.0, 255.0);
                }
                cv::imshow("ready", captureMat);

                int key = cv::waitKey(10) % 255;

                if (key == 27 || key == 'q') break;
            }
        }

    }

    cv::destroyWindow( "RGB" );
    cv::destroyWindow( "Depth" );
    depthV.stop();
    depthV.destroy();
    colorV.stop();
    colorV.destroy();
    device.close();
    OpenNI::shutdown();

    return 0;
}