Exemple #1
0
int main()
{
  KinectGrabber grabber;
  grabber.initialize();
  // Set camera tilt.
  grabber.setTiltAngle(0);
  grabber.start();

  // Postprocess raw kinect data.
  // Tell the processor to transform raw depth into meters using baseline-offset technique.
  RGBDProcessor processor;
  processor.setFilterFlag(RGBDProcessor::ComputeKinectDepthBaseline, true);

  // OpenCV windows.
  namedWindow("color");
  namedWindow("depth");
  namedWindow("depth_as_color");
  
  // Current image. An RGBDImage stores rgb and depth data.
  RGBDImage current_frame;
  while (true)
  {
    grabber.waitForNextFrame();
    grabber.copyImageTo(current_frame);
    processor.processImage(current_frame);

    // Show the frames per second of the grabber
    int fps = grabber.frameRate();
    cv::putText(current_frame.rgbRef(),
                cv::format("%d fps", fps),
                Point(10,20), 0, 0.5, Scalar(255,0,0,255));

    // Display the color image
    imshow("color", current_frame.rgb());

    // Show the depth image as normalized gray scale
    imshow_normalized("depth", current_frame.depth());

    // Compute color encoded depth.
    cv::Mat3b depth_as_color;
    compute_color_encoded_depth(current_frame.depth(), depth_as_color);
    imshow("depth_as_color", depth_as_color);

    // Enable switching to InfraRead mode.
    unsigned char c = cv::waitKey(10) & 0xff;
    if (c == 'q')
      exit(0);
  }
  
 
  return 0;
}
Exemple #2
0
int main(int argc, char** argv) {
  arg_base::set_help_option("-h");
  arg_parse(argc, argv);
  ntk_debug_level = 1;
  cv::setBreakOnError(true);
  KinectGrabber * grabber = new KinectGrabber();
  grabber->initialize(); 

  //Initialize X11 Stuff
	display = XOpenDisplay(0);
	root_window = DefaultRootWindow(display);

	screenw = XDisplayWidth(display, SCREEN);
	screenh = XDisplayHeight(display, SCREEN);
       
	printf("\nDefault Display Found\n");
	printf("\nSize: %dx%d\n", screenw, screenh);

	screenw += 200;
	screenh += 200; 

  ntk::RGBDCalibration* calib_data = 0;
  if (opt::calibration_file()) {
	//cout << "calibrated" << endl;
    calib_data = new RGBDCalibration();
    calib_data->loadFromFile(opt::calibration_file());
  }
  else if (QDir::current().exists("kinect_calibration.yml")) {
    ntk_dbg(0) << "[WARNING] Using kinect_calibration.yml in current directory";
    ntk_dbg(0) << "[WARNING] use --calibration to specify a different file.";
	
    calib_data = new RGBDCalibration();
    calib_data->loadFromFile("kinect_calibration.yml");
  }
  if (calib_data) {
    grabber->setCalibrationData(*calib_data);
  }
  
  // Set camera tilt.
  grabber->setTiltAngle(25);
  grabber->start();

  // Postprocess raw kinect data.
  // Tell the processor to transform raw depth into meters using baseline-offset technique.
  RGBDProcessor processor;
  processor.setFilterFlag(RGBDProcessor::ComputeKinectDepthBaseline, true);

  // OpenCV windows.
  namedWindow("color");
  //namedWindow("depth_as_color");
  //namedWindow("depth");
  //namedWindow("depth_normalized");
  namedWindow("fingers");

  // OpenCV variables
  cv::Mat z(480, 640, CV_32FC1); //our matrix to mask with
  Mat1f debugFrame(480, 640); //our frame to paint fingers and circles and lines
  debugFrame = z * 0.1;//??
  hand1.center.x=300;
  hand1.center.y=200;

  int counter;
  // Current image. An RGBDImage stores rgb and depth data.
  RGBDImage current_frame;
  while (true) {
    grabber->waitForNextFrame();
    grabber->copyImageTo(current_frame);
		
    /**
     * This is where the RGB and depth are processed
     * Take a look at RGBDProcessor.cpp to see whats going on
     */
    processor.processImage(current_frame);
		
    //Show the frames per second of the grabber
    int fps = grabber->frameRate();
    cv::putText(current_frame.rgbRef(),
    		cv::format("%d fps", fps),
    		Point(10,20), 0, 0.5, Scalar(255,0,0,255));
		
    // Show the depth image as normalized gray scale
    //imshow_normalized("depth", current_frame.depth());

    // Compute color encoded depth.
    //cv::Mat3b depth_as_color;
    //compute_color_encoded_depth(current_frame.depth(), depth_as_color);
    //imshow("depth_as_color", depth_as_color);
		
		
    // get the depth channel and show it	
    Mat1f depth = current_frame.depth();
    //imshow("depth", depth);
		
    // normalize the depth channel and show it
    Mat1b depth_normalized;
    normalize(depth, depth_normalized, 0, 255, NORM_MINMAX, 0);
    //imshow("depth_normalized", depth_normalized);

    // failed attempt at thresholding
    //Mat1f thresh;
    //threshold(depth, thresh, 2, 2, THRESH_TOZERO);
    //imshow("thresh", thresh);
		
    /*
      for (int i = 0; i < d.rows; i++) {
      for (int j = 0;j < d.cols; j++) {
      if (d.at<float>(i, j) > 10) {
      cout << d.at<float>(i, j) << " ";
      }
      }
      cout << endl;
      }
    */
    //cout << endl << endl;

    // OpenCV Magic
    std::vector<cv::Point2i> fingerTips; //our fingertips output info
    detectFingertips(depth_normalized, 1, 45, debugFrame);
    
    // update hand centers
    if(hand1.isOn) circle (current_frame.rgbRef(), hand1.center, 10, CV_RGB(255,0,0), 10);
    if(hand2.isOn) circle (current_frame.rgbRef(), hand2.center, 10, CV_RGB(0,255,0), 10);
    
    // post smoothing
    //cout << "for hand 1... " << endl;
    hand1.smoothData();
    //cout << "for hand 2... " << endl;
    hand2.smoothData();
    
    // draw fingertips
    for(vector<Point2i>::iterator it = hand1.fingerTips.begin(); it != hand1.fingerTips.end(); it++) {
      circle(debugFrame, (*it), 10, Scalar(1.0f), -1);
      circle (current_frame.rgbRef(), (*it), 5, CV_RGB(255,0,0), 5);
    }
    for(vector<Point2i>::iterator it = hand2.fingerTips.begin(); it != hand2.fingerTips.end(); it++) {
      circle(debugFrame, (*it), 10, Scalar(1.0f), -1);
      circle (current_frame.rgbRef(), (*it), 5, CV_RGB(0,255,0), 5);
    }
    imshow("fingers", debugFrame);
    
    // Display the color image
    imshow("color", current_frame.rgb());
    hand2.fingerTips.clear();
    hand1.fingerTips.clear();

   //X11 Mouse Control Code
    //Right now giving priority to hand 1
    int px, py, isMouse=0;
    if(hand1.isOn&&(hand1.center.x!=0 && hand1.center.y!=0)){
      px = hand1.center.x; py = hand1.center.y;
      isMouse=1;
    }
    else if((hand2.isOn&&!hand1.isOn&&(hand2.center.x!=0 && hand2.center.y!=0))||
	    (hand2.isOn&&(hand1.center.x!=0 && hand1.center.y!=0))){
	cout << "made it here" << endl;
      px = hand2.center.x; py = hand2.center.y;
      isMouse=1;
    }
    //cout << "x= " << hand1.center.x << "y = " hand1.center.y << endl;


      if(isMouse&&allowMouse){
	    pointerx = ((px-640.0f) / -1);
	    pointery = (py);
	    mousex = ((pointerx / 630.0f) * screenw);
	    mousey = ((pointery / 470.0f) * screenh);
	    int mx , my;
	    mx = mousex;
	    my = mousey;

		if(mx > tmousex) tmousex+= (mx - tmousex) / 7;
		if(mx < tmousex) tmousex-= (tmousex - mx) / 7;
		if(my > tmousey) tmousey+= (my - tmousey) / 7;
		if(my < tmousey) tmousey-= (tmousey - my) / 7;			

		if((pusx <= (mx + 15))  && (pusx >= (mx - 15)) && (pusy <= (my + 15))  && (pusy >= (my - 15))) {
			pauseTime++;
			printf("\n%d\n", pauseTime);
		} else {
			pusx = mx;
			pusy = my;
			pauseTime = 0;
		}		

		    if(pauseTime > 15) {
				pauseTime = -30;
				XTestFakeButtonEvent(display, 1, TRUE, CurrentTime);
				XTestFakeButtonEvent(display, 1, FALSE, CurrentTime);
			}

			//printf("-- %d x %d -- \n", mx, my);

			XTestFakeMotionEvent(display, -1, tmousex-200, tmousey-200, CurrentTime);
			XSync(display, 0);

			//printf("\n\n %d  -  %d \n\n", mx, my);
      }
    
    unsigned char c = cv::waitKey(10) & 0xff;
    if (c == 'q' || c == 27)
      exit(0);
    else if (c == 'm'){
      if(allowMouse==0)
	allowMouse=1;
      else
	allowMouse=0;

    }
  }

  return 0;
}
DWORD ProcessThread(LPVOID pParam) {
	KinectGrabber *p = (KinectGrabber*) pParam;
	p->ProcessThreadInternal();
	return 0;
}