示例#1
0
int main()
{
	Status rc = OpenNI::initialize();
	if (rc != STATUS_OK)
	{
		printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
		return 1;
	}

	OpenNIEventListener eventPrinter;
	OpenNI::addListener(&eventPrinter);

	Device device;
	rc = device.open(ANY_DEVICE);
	if (rc != STATUS_OK)
	{
		printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	VideoStream depth;

	if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
	{
		rc = depth.create(device, SENSOR_DEPTH);
		if (rc != STATUS_OK)
		{
			printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
		}
	}
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
	}


	PrintCallback depthPrinter;

	// Register to new frame
	depth.addListener(&depthPrinter);

	// Wait while we're getting frames through the printer
	while (!wasKeyboardHit())
	{
		Sleep(100);
	}

	depth.removeListener(&depthPrinter);


	depth.stop();
	depth.destroy();
	device.close();
	OpenNI::shutdown();

	return 0;
}
int kinect_init()
{
    Status rc = OpenNI::initialize();
    if (rc != STATUS_OK)
    {
        printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
        return 1;
    }

    rc = device.open(ANY_DEVICE);
    if (rc != STATUS_OK)
    {
        printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
        return 2;
    }

    if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
    {
        rc = depth.create(device, SENSOR_DEPTH);
        if (rc != STATUS_OK)
        {
            printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
            return 3;
        }

        const SensorInfo* sinfo = device.getSensorInfo(SENSOR_DEPTH);
        const Array<VideoMode>& modes = sinfo->getSupportedVideoModes();

        for (int i=0; i<modes.getSize(); i++) {
            printf("%i: %ix%i, %i fps, %i format\n",
                i,
                modes[i].getResolutionX(),
                modes[i].getResolutionY(),
                modes[i].getFps(),
                modes[i].getPixelFormat()
                );
        }

        //rc = depth.setVideoMode(modes[0]); // 320x240, 30fps, format: 100
        //rc = depth.setVideoMode(modes[4]); // 640x480, 30fps, format: 100
        rc = depth.setVideoMode(modes[4]); // 640x480, 30fps, format: 100

        if (rc != openni::STATUS_OK) {
            printf("Failed to set depth resolution\n");
            return -1;
        }
    }

    rc = depth.start();
    if (rc != STATUS_OK)
    {
        printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
        return 4;
    }

    return 0;
}
示例#3
0
  virtual void endConfigure()
  {
    if(was_running_)
    {
      Status rc = stream_.start();

      if(rc != STATUS_OK)
      {
        SensorType type = stream_.getSensorInfo().getSensorType();
        ROS_WARN_STREAM("Failed to restart stream '" << name_ << "' after configuration!");

        int max_trials = 1;

        for(int trials = 0; trials < max_trials && rc != STATUS_OK; ++trials)
        {
          ros::Duration(0.1).sleep();

          stream_.removeNewFrameListener(this);
          stream_.destroy();
          stream_.create(device_, type);
          stream_.addNewFrameListener(this);
          //stream_.setVideoMode(default_mode_);
          rc = stream_.start();

          ROS_WARN_STREAM_COND(rc != STATUS_OK, "Recovery trial " << trials << " failed!");
        }

        ROS_ERROR_STREAM_COND(rc != STATUS_OK, "Failed to recover stream '" << name_ << "'! Restart required!");
        ROS_INFO_STREAM_COND(rc == STATUS_OK, "Recovered stream '" << name_ << "'.");
      }

      if(rc == STATUS_OK)
      {
        running_ = true;
      }
    }
  }
示例#4
0
 virtual void onSubscriptionChanged(const image_transport::SingleSubscriberPublisher& topic)
 {
   if(topic.getNumSubscribers() > 0)
   {
     if(!running_ && stream_.start() == STATUS_OK)
     {
       running_ = true;
     }
   }
   else
   {
     stream_.stop();
     running_ = false;
   }
 }
示例#5
0
void wb_kinect_enable(WbDeviceTag tag, int colorMS, int rangeMS, bool advancedModel) {
  rc = OpenNI::initialize();
  if (rc != STATUS_OK)
  {
    printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
    return;
  }

  rc = device.open(ANY_DEVICE);
  if (rc != STATUS_OK)
  {
    printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
    return;
  }

  if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
  {
    rc = depth.create(device, SENSOR_DEPTH);
    if (rc != STATUS_OK)
    {
      printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
      return;
    }
  }

  rc = depth.start();
  if (rc != STATUS_OK)
  {
    printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
    return;
  }

  // Do one read to update the withd and height.
  kinectenable = true;
  wb_kinect_get_range_image_mm(tag);
}
示例#6
0
int
main (int argc, char** argv)
{
    Status rc = OpenNI::initialize();
    if (rc != STATUS_OK)
    {
        std::cout << "Initialize failed: " << OpenNI::getExtendedError() << std::endl;
        return 1;
    }

    Device device;
    rc = device.open(ANY_DEVICE);
    if (rc != STATUS_OK)
    {
        std::cout << "Couldn't open device: " << OpenNI::getExtendedError() << std::endl;
        return 2;
    }

    VideoStream stream;
    if (device.getSensorInfo(currentSensor) != NULL)
    {
        rc = stream.create(device, currentSensor);
        if (rc != STATUS_OK)
        {
            std::cout << "Couldn't create stream: " << OpenNI::getExtendedError() << std::endl;
            return 3;
        }
    }

    rc = stream.start();
    if (rc != STATUS_OK)
    {
        std::cout << "Couldn't start the stream: " << OpenNI::getExtendedError() << std::endl;
        return 4;
    }

    VideoFrameRef frame;

    //now open the video writer
    Size S = Size(stream.getVideoMode().getResolutionX(),
                  stream.getVideoMode().getResolutionY());
    VideoWriter outputVideo;
    std::string fileName = "out.avi";
    outputVideo.open(fileName, -1, stream.getVideoMode().getFps(), S, currentSensor == SENSOR_COLOR ? true : false);
    if (!outputVideo.isOpened())
    {
        std::cout  << "Could not open the output video for write: " << fileName << std::endl;
        return -1;
    }

    while (waitKey(50) == -1)
    {
        int changedStreamDummy;
        VideoStream* pStream = &stream;
        rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);
        if (rc != STATUS_OK)
        {
            std::cout << "Wait failed! (timeout is " << SAMPLE_READ_WAIT_TIMEOUT << "ms): " << OpenNI::getExtendedError() << std::endl;
            continue;
        }

        rc = stream.readFrame(&frame);
        if (rc != STATUS_OK)
        {
            std::cout << "Read failed:" << OpenNI::getExtendedError() << std::endl;
            continue;
        }

        Mat image;
        switch (currentSensor)
        {
        case SENSOR_COLOR:
            image = Mat(frame.getHeight(), frame.getWidth(), CV_8UC3, (void*)frame.getData());
            break;
        case SENSOR_DEPTH:
            image = Mat(frame.getHeight(), frame.getWidth(), DataType<DepthPixel>::type, (void*)frame.getData());
            break;
        case SENSOR_IR:
            image = Mat(frame.getHeight(), frame.getWidth(), CV_8U, (void*)frame.getData());
            break;
        default:
            break;
        }

        namedWindow( "Display window", WINDOW_AUTOSIZE );    // Create a window for display.
        imshow( "Display window", image );                   // Show our image inside it.

        outputVideo << image;
    }

    stream.stop();
    stream.destroy();
    device.close();
    OpenNI::shutdown();

    return 0;
}
示例#7
0
int main()
{
	// 2. initialize OpenNI
	Status rc = OpenNI::initialize();
	if (rc != STATUS_OK)
	{
		printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
		return 1;
	}

	// 3. open a device
	Device device;
	rc = device.open(ANY_DEVICE);
	if (rc != STATUS_OK)
	{
		printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	// 4. create depth stream
	VideoStream depth;
	if (device.getSensorInfo(SENSOR_DEPTH) != NULL){
		rc = depth.create(device, SENSOR_DEPTH);
		if (rc != STATUS_OK){
			printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
			return 3;
		}
	}
	VideoStream color;
	if (device.getSensorInfo(SENSOR_COLOR) != NULL){
		rc = color.create(device, SENSOR_COLOR);
		if (rc != STATUS_OK){
			printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError());
			return 4;
		}
	}

	// 5. create OpenCV Window
	cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);
	cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE);

	// 6. start
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 5;
	}
	rc = color.start();
	if (rc != STATUS_OK){
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 6;
	}
	
	VideoFrameRef colorframe;
	VideoFrameRef depthframe;
	int iMaxDepth = depth.getMaxPixelValue();
	int iColorFps = color.getVideoMode().getFps();
	cv::Size iColorFrameSize = cv::Size(color.getVideoMode().getResolutionX(), color.getVideoMode().getResolutionY());

	cv::Mat colorimageRGB;
	cv::Mat colorimageBGR;
	cv::Mat depthimage;
	cv::Mat depthimageScaled;

#ifdef F_RECORDVIDEO

	cv::VideoWriter outputvideo_color;
	cv::FileStorage outputfile_depth;
	
	time_t timenow = time(0);
	tm ltime;
	localtime_s(&ltime, &timenow);
	int tyear = 1900 + ltime.tm_year;
	int tmouth = 1 + ltime.tm_mon;
	int tday = ltime.tm_mday;
	int thour = ltime.tm_hour;
	int tmin = ltime.tm_min;
	int tsecond = ltime.tm_sec;

	string filename_rgb = "RGB/rgb_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday)
		+ "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".avi";
	string filename_d = "D/d_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday)
		+ "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".yml";

	outputvideo_color.open(filename_rgb, CV_FOURCC('I', '4', '2', '0'), iColorFps, iColorFrameSize, true);
	if (!outputvideo_color.isOpened()){
		cout << "Could not open the output color video for write: " << endl;
		return 7;
	}
	outputfile_depth.open(filename_d, cv::FileStorage::WRITE);
	if (!outputfile_depth.isOpened()){
		cout << "Could not open the output depth file for write: " << endl;
		return 8;
	}

#endif // F_RECORDVIDEO


	// 7. main loop, continue read
	while (!wasKeyboardHit())
	{
		// 8. check is color stream is available
		if (color.isValid()){
			if (color.readFrame(&colorframe) == STATUS_OK){
				colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() };
				cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR);
			}
		}

		// 9. check is depth stream is available
		if (depth.isValid()){
			if (depth.readFrame(&depthframe) == STATUS_OK){
				depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() };
				depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth);
			}
		}

		cv::imshow("Color Image", colorimageBGR);
		cv::imshow("Depth Image", depthimageScaled);

#ifdef F_RECORDVIDEO
		
		outputvideo_color << colorimageBGR;
		outputfile_depth << "Mat" << depthimage;

#endif // F_RECORDVIDEO

		cv::waitKey(10);
	}

	color.stop();
	depth.stop();

	color.destroy();
	depth.destroy();

	device.close();
	OpenNI::shutdown();

	return 0;
}
示例#8
0
int main(int argc, char **argv){


  printf("starting\n");
  fflush(stdout);
  ros::init(argc, argv, "xtion",ros::init_options::AnonymousName);
  ros::NodeHandle n("~");

  //Base topic name
  n.param("topic", topic, string("/camera"));
  //Resolution
  //0 = 160x120
  //1 = 320x240
  n.param("depth_mode", _depth_mode, -1);
  n.param("rgb_mode", _rgb_mode, -1);
  n.param("sync", _sync, 0);
  n.param("registration", _registration,0);
  n.param("frame_id", frame_id, string("camera_frame"));
  n.param("device_num", _device_num, -1);
  n.param("device_uri", _device_uri, string("NA"));
  n.param("frame_skip", _frame_skip, 0);
  n.param("exposure", _exposure, -1);
  n.param("gain", _gain, -1);

  printf("Launched with params:\n");
  printf("_device_num:= %d\n",_device_num);
  printf("_device_uri:= %s\n",_device_uri.c_str());
  printf("_topic:= %s\n",topic.c_str());
  printf("_sync:= %d\n",_sync);
  printf("_registration:= %d\n",_registration);
  printf("_depth_mode:= %d\n",_depth_mode);
  printf("_rgb_mode:= %d\n",_rgb_mode);
  printf("_frame_id:= %s\n",frame_id.c_str());
  printf("_frame_skip:= %d\n",_frame_skip);
  printf("_exposure:= %d\n",_exposure);
  printf("_gain:= %d\n",_gain);
  
  fflush(stdout);


  if (_frame_skip<=0)
    _frame_skip = 1;

	
  //OPENNI2 STUFF
  //===================================================================
  streams = new openni::VideoStream*[2];
  streams[0]=&depth;
  streams[1]=&rgb;

  Status rc = OpenNI::initialize();
  if (rc != STATUS_OK)
    {
      printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
      fflush(stdout);
      return 1;
    }

  // enumerate the devices
  openni::Array<openni::DeviceInfo> device_list;
  openni::OpenNI::enumerateDevices(&device_list);
  
  Device device;
  
  if(_device_uri.compare("NA")){
    
    string dev_uri("NA");
    
    for (int i = 0; i<device_list.getSize(); i++){
      if(!string(device_list[i].getUri()).compare(0, _device_uri.size(), _device_uri )){
	dev_uri = device_list[i].getUri();
	break;
      }
    }
    if(!dev_uri.compare("NA")){
      cerr << "cannot find device with uri starting for: " << _device_uri << endl;
    }
    rc = device.open(dev_uri.c_str());
  }
  else{
    
    if (_device_num < 0){
      cerr << endl << endl << "found " << device_list.getSize() << " devices" << endl;
      for (int i = 0; i<device_list.getSize(); i++)
	cerr << "\t num: " << i << " uri: " << device_list[i].getUri() << endl;
    }

    if (_device_num>=device_list.getSize() || _device_num<0 ) {
      cerr << "device num: " << _device_num << " does not exist, aborting" << endl;
      openni::OpenNI::shutdown();
      return 0;
    }

    rc = device.open(device_list[_device_num].getUri());
  }
  
  if (rc != STATUS_OK){
    printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
    fflush(stdout);
    return 2;
  }
	

  if(_depth_mode>=0){
    if (device.getSensorInfo(SENSOR_DEPTH) != NULL){
      rc = depth.create(device, SENSOR_DEPTH);
      if (rc != STATUS_OK){
	printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
	fflush(stdout);
	return 3;
      }
      //DEPTH
      pub_depth = n.advertise<sensor_msgs::Image>("/"+topic+"/depth/image_raw", 1);
      pub_camera_info_depth = n.advertise<sensor_msgs::CameraInfo>("/"+topic+"/depth/camera_info", 1);

    }
  }


  if(_rgb_mode>=0){
    if (device.getSensorInfo(SENSOR_COLOR) != NULL){
      rc = rgb.create(device, SENSOR_COLOR);
      if (rc != STATUS_OK){
	printf("Couldn't create rgb stream\n%s\n", OpenNI::getExtendedError());
	fflush(stdout);
	return 3;
      }
      //RGB
      pub_rgb = n.advertise<sensor_msgs::Image>("/"+topic+"/rgb/image_raw", 1);
      pub_camera_info_rgb = n.advertise<sensor_msgs::CameraInfo>("/"+topic+"/rgb/camera_info", 1);
    }

  }
	

  if(_depth_mode<0 && _rgb_mode<0){
    cout << "Depth modes" << endl;
    const openni::SensorInfo* sinfo = device.getSensorInfo(openni::SENSOR_DEPTH); // select index=4 640x480, 30 fps, 1mm
    const openni::Array< openni::VideoMode>& modesDepth = sinfo->getSupportedVideoModes();

    printf("Enums data:\nPIXEL_FORMAT_DEPTH_1_MM = 100,\nPIXEL_FORMAT_DEPTH_100_UM = 101,\nPIXEL_FORMAT_SHIFT_9_2 = 102,\nPIXEL_FORMAT_SHIFT_9_3 = 103,\nPIXEL_FORMAT_RGB888 = 200,\nPIXEL_FORMAT_YUV422 = 201,\nPIXEL_FORMAT_GRAY8 = 202,\nPIXEL_FORMAT_GRAY16 = 203,\nPIXEL_FORMAT_JPEG = 204,\nPIXEL_FORMAT_YUYV = 205,\n\n");
    cout << "Depth modes" << endl;
    for (int i = 0; i<modesDepth.getSize(); i++) {
      printf("%i: %ix%i, %i fps, %i format\n", i, modesDepth[i].getResolutionX(), modesDepth[i].getResolutionY(),modesDepth[i].getFps(), modesDepth[i].getPixelFormat()); //PIXEL_FORMAT_DEPTH_1_MM = 100, PIXEL_FORMAT_DEPTH_100_UM = 101
    }

    cout << "Rgb modes" << endl;
    const openni::SensorInfo* sinfoRgb = device.getSensorInfo(openni::SENSOR_COLOR); // select index=4 640x480, 30 fps, 1mm
    const openni::Array< openni::VideoMode>& modesRgb = sinfoRgb->getSupportedVideoModes();
    for (int i = 0; i<modesRgb.getSize(); i++) {
      printf("%i: %ix%i, %i fps, %i format\n", i, modesRgb[i].getResolutionX(), modesRgb[i].getResolutionY(),modesRgb[i].getFps(), modesRgb[i].getPixelFormat()); //PIXEL_FORMAT_DEPTH_1_MM = 100, PIXEL_FORMAT_DEPTH_100_UM
    }
    depth.stop();
    depth.destroy();
    rgb.stop();
    rgb.destroy();
    device.close();
    OpenNI::shutdown();	
    exit(1);
  }
	
	
  if(_depth_mode>=0){
    rc = depth.setVideoMode(device.getSensorInfo(SENSOR_DEPTH)->getSupportedVideoModes()[_depth_mode]);
    depth.setMirroringEnabled(false);
    rc = depth.start();
  }
  if(_rgb_mode>=0){
    rc = rgb.setVideoMode(device.getSensorInfo(SENSOR_COLOR)->getSupportedVideoModes()[_rgb_mode]);
    rgb.setMirroringEnabled(false);


    rgb.getCameraSettings()->setAutoExposureEnabled(true);
    rgb.getCameraSettings()->setAutoWhiteBalanceEnabled(true);
	  
    cerr << "Camera settings valid: " << rgb.getCameraSettings()->isValid() << endl;
    rc = rgb.start();
  }
  
  if(_depth_mode>=0 && _rgb_mode>=0 && _sync==1){
    rc =device.setDepthColorSyncEnabled(true);
    if (rc != STATUS_OK) {
      printf("Couldn't enable de  pth and rgb images synchronization\n%s\n",
	     OpenNI::getExtendedError());
      exit(2);
    }
  }


  if(_depth_mode>=0 && _rgb_mode>=0 && _registration==1){
    device.setImageRegistrationMode(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR);
  }

  run = true;
  pthread_t runner;
  pthread_create(&runner, 0, camera_thread, 0);
  ros::spin();
  void* result;
  run =false;

  pthread_join(runner, &result);

  depth.stop();
  depth.destroy();
  rgb.stop();
  rgb.destroy();
  device.close();
  OpenNI::shutdown();	
  return 0;

}
示例#9
0
int _tmain(int argc, _TCHAR* argv[])
{
	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	CenterPointExtractor centerPointExtractor(MinBlobSize);
	std::chrono::high_resolution_clock timer;
	auto startTime = timer.now();
	int frameId = 0;
	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);

		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		if (centerPoints.size())
		{
			std::cout << "point count: " << centerPoints.size();
		
			std::cout << "\t points: ";
		
			for (auto& point : centerPoints)
			{
				std::cout << "(" << point.first << ", " << point.second << ")  ";
			}
			std::cout << "\r\n";
		}

		++frameId;

		
		
		if (frameId % 64 == 0)
		{
			auto stopTime = timer.now();
			
			auto elapsedTime = stopTime - startTime;
			auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count();

			std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl;

			startTime = stopTime;
		}
		
	}
	
	openni::OpenNI::shutdown();
	return 0;
}
示例#10
0
int _tmain(int argc, _TCHAR* argv[])
{
	sdl::Application app;

	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	sdl::GLContext::setVersion(4, 3);

	ImageViewer viewer;
	viewer.add(0, 0, 320, 240);
	viewer.add(320, 0, 320, 240);
	viewer.add(0, 240, 320, 240);
	viewer.add(320, 240, 320, 240);
	
	CenterPointExtractor centerPointExtractor(MinBlobSize);
	MotionRecorder recorder;

	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);
		
		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		recorder.track(centerPoints);
		
		viewer.crosses.clear();
		std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) {
			return Cross{ coord.first, coord.second };
		});

		viewer.lines.clear();
		std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) {
			return Lines{ motion.points };
		});
		
		viewer[0].update(detector.mask());
		viewer[1].update(image);
		viewer[2].update(detector.background());
		viewer[3].update(detector.difference());
		
		viewer.update();
	}
	
	openni::OpenNI::shutdown();
	return 0;
}
示例#11
0
int main()
{
	FILE *fptrI = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv","w");
	fprintf(fptrI, "Classtype, Area, Perimeter, Circularity, Extent\n");
	fclose(fptrI);

	Mat input = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\Fingers.bmp", 1);
	Mat input2 = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\NotFingers.bmp", 1);
	Mat inputF = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\ImageFeaturesBinaryF.bmp", 1);
	Mat gray(input.rows, input.cols, CV_8UC3);
	Mat gray2(input.rows, input.cols, CV_8UC3);
	Mat grayF(input.rows, input.cols, CV_8UC3);
	cvtColor(input, gray, CV_BGR2GRAY);
	cvtColor(input2, gray2, CV_BGR2GRAY);
	cvtColor(inputF, grayF, CV_BGR2GRAY);
	shapeFeatures(gray, input, 1);
	shapeFeatures(gray2, input2, 2);
	namedWindow("Image");
	imshow("Image", input);
	namedWindow("Image2");
	imshow("Image2", input2);

	//------------------------------------------------------
	//--------[SVM]--------
	// Read input data from file created above
	double parameters[5];
	vector<double> svmI, svmA, svmP, svmC, svmE;
	int size = 1;
	double index = 0; double area = 0; double perimeter = 0; double circularity = 0;
	char buffer[1024];
	char *record, *line;
	FILE* fptrR = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv", "r");
	fscanf(fptrR, "%*[^\n]\n", NULL);

	svmI.resize(size); svmA.resize(size); svmP.resize(size); svmC.resize(size); 

	while((line=fgets(buffer, sizeof(buffer), fptrR))!=NULL)
	{
		size++;
		svmI.resize(size);
		svmA.resize(size);
		svmP.resize(size);
		svmC.resize(size);
		svmE.resize(size);

		record = strtok(line, ";");
		for(int i = 0; i < 5; i++);
		{
			double value = atoi(record);
			record = strtok(line,";");
		}
		char *lineCopy = record;
		char *pch;

		pch = strtok(lineCopy, ",");
		parameters[0] = atoi(pch);
		
		int j = 1;
		while( j < 5 )
		{
			pch = strtok (NULL, ",");
			parameters[j] = atof(pch);
			j++;
		}
		svmI[size-1] = parameters[0];
		svmA[size-1] = parameters[1];
		svmP[size-1] = parameters[2];
		svmC[size-1] = parameters[3];
		svmE[size-1] = parameters[4];
	}
	fclose(fptrR);
	//---------------------
	// Data for visual representation
    int width = 512, height = 512;
    Mat image = Mat::zeros(height, width, CV_8UC3);

    // Set up training data
    //float labels[8] = {1.0, -1.0, -1.0, -1.0};
	float labels[1000];
	for(int i = 0; i < svmI.size()-1; i++)
	{
		labels[i] = svmI[i+1];
	}
    Mat labelsMat(1000, 1, CV_32FC1, labels);

    float trainingData[1000][4];
	for(int i = 0; i < svmE.size()-1; i++)
	{
		trainingData[i][0] = svmE[i+1];
		trainingData[i][1] = svmC[i+1];
		trainingData[i][2] = svmA[i+1];
		trainingData[i][3] = svmP[i+1];
	}
    Mat trainingDataMat(1000, 4, CV_32FC1, trainingData);

    // Set up SVM's parameters
    CvSVMParams params;
	params = SVMFinger.get_params();
    //params.svm_type    = CvSVM::C_SVC;
    //params.kernel_type = CvSVM::LINEAR;
    //params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);

    // Train the SVM
    SVMFinger.train_auto(trainingDataMat, labelsMat, Mat(), Mat(), params);

//	Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57);
//	float response = SVMFinger.predict(sampleMat);

	waitKey();
	destroyWindow("Image");
	destroyWindow("Image2");

	//------------------------------------------
	OpenNI::initialize();

	Device devAnyDevice;
    devAnyDevice.open(ANY_DEVICE);

	//----------------[Define Video Settings]-------------------
	//Set Properties of Depth Stream
	VideoMode mModeDepth;
	mModeDepth.setResolution( 640, 480 );
	mModeDepth.setFps( 30 );
	mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );

	//Set Properties of Color Stream
	VideoMode mModeColor;
    mModeColor.setResolution( 640, 480 );
    mModeColor.setFps( 30 );
    mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
	//----------------------------------------------------------
	//----------------------[Initial Streams]---------------------
	VideoStream streamInitDepth;
    streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamInitColor;
    streamInitColor.create( devAnyDevice, SENSOR_COLOR );

	streamInitDepth.setVideoMode( mModeDepth );
	streamInitColor.setVideoMode( mModeColor );

	namedWindow( "Depth Image (Init)",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image (Init)",  CV_WINDOW_AUTOSIZE );
	//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );

	VideoFrameRef  frameDepthInit;
    VideoFrameRef  frameColorInit;

	streamInitDepth.start();
	streamInitColor.start();
	cv::Mat BackgroundFrame;

	int avgDist = 0;
	int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
	
	OutX.clear();
	OutY.clear();

	vector<int> OldOutX, OldOutY;
	OldOutX.clear();
	OldOutY.clear();
	//------------------------------------------------------------
	//--------------------[Initiation Process]--------------------
	while( true )
	{
		streamInitDepth.readFrame( &frameDepthInit );
		streamInitColor.readFrame( &frameColorInit );

		const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepthInit );

        cv::imshow( "Depth Image (Init)", mScaledDepth );

        const cv::Mat mImageRGB(frameColorInit.getHeight(), frameColorInit.getWidth(), CV_8UC3, (void*)frameColorInit.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );

		//--------------------[Get Average Distance]---------------------
		int depthVal = 0;
		int frameHeight = frameDepthInit.getHeight();
		int frameWidth = frameDepthInit.getWidth();
		//------------
		//backgroundDepth.resize(frameHeight * frameWidth);
		//---------------------------------------------------------------
		
		int initCount = 0;
		for(int i = 0; i < frameHeight; i++)
		{
			for(int j = 0; j < frameWidth; j++)
			{
				depthVal = mImageDepth.at<unsigned short>(i, j) + depthVal;
				initCount++;
			}
		}
		avgDist = depthVal / ((frameHeight) * (frameWidth));

		cout << "Average Distance: " << avgDist << endl;
		cv::imshow( "Color Image (Init)", cImageBGR );

		if( cv::waitKey(1) == 'q')
		{
			mImageDepth.copyTo(BackgroundFrame);
            break;
		}
	}

	streamInitDepth.destroy();
	streamInitColor.destroy();

	destroyWindow( "Depth Image (Init)" );
	destroyWindow( "Color Image (Init)" );

	VideoStream streamDepth;
    streamDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamColor;
    streamColor.create( devAnyDevice, SENSOR_COLOR );

	streamDepth.setVideoMode( mModeDepth );
	streamColor.setVideoMode( mModeColor );

	streamDepth.start();
    streamColor.start();

	namedWindow( "Depth Image",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image",  CV_WINDOW_AUTOSIZE );
	namedWindow( "Thresholded Image", CV_WINDOW_AUTOSIZE );

	int iMaxDepth = streamDepth.getMaxPixelValue();

    VideoFrameRef  frameColor;
	VideoFrameRef  frameDepth;

	OutX.clear();
	OutY.clear();
	//------------------------------------------------------------

	//------------------------------------------------------------
	//-----------------------[Main Process]-----------------------
	while( true ) 
    {
        streamDepth.readFrame( &frameDepth );
        streamColor.readFrame( &frameColor );

        const cv::Mat mImageDepth( frameDepth.getHeight(), frameDepth.getWidth(), CV_16UC1, (void*)frameDepth.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );

		////////////////////////////////////////////////////////////////////////////////////////////
		//---------------------[Downsampling]-------------------------------------------------------
		double min;
		double max;
		cv::minMaxIdx(mImageDepth, &min, &max);
		cv::Mat adjMap;
		// expand your range to 0..255. Similar to histEq();
		float scale = 255 / (max-min);
		mImageDepth.convertTo(adjMap,CV_8UC1, scale, -min*scale); 

		// this is great. It converts your grayscale image into a tone-mapped one, 
		// much more pleasing for the eye
		// function is found in contrib module, so include contrib.hpp 
		// and link accordingly
		cv::Mat falseColorsMap;
		applyColorMap(adjMap, falseColorsMap, cv::COLORMAP_AUTUMN);

		cv::imshow("Out", falseColorsMap);
		//------------------------------------------------------------------------------------------
		////////////////////////////////////////////////////////////////////////////////////////////

        cv::imshow( "Depth Image", mScaledDepth );
		cv::imshow( "Depth Image2", adjMap );

        const cv::Mat mImageRGB(frameColor.getHeight(), frameColor.getWidth(), CV_8UC3, (void*)frameColor.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );
		
		//-------------[Threshold]-----------------
		cv::Mat mImageThres( frameDepth.getHeight(), frameDepth.getWidth(), CV_8UC1 );

		int backgroundPxlCount = 0;
		for(int i = 0; i < 480; i++)
		{
			for(int j = 0; j < 640; j++)
			{
				int depthVal = mImageDepth.at<unsigned short>(i, j);

				avgDist = BackgroundFrame.at<unsigned short>(i, j)-2;

				if((depthVal > (avgDist-14)) && (depthVal <= (avgDist-7)))
				{
					//mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 255;
					mImageThres.at<uchar>(i, j) = 255;
				}
				else
				{
					//mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 0;
					mImageThres.at<uchar>(i, j) = 0;
				}

				backgroundPxlCount++;
			}
		}
		GaussianBlur( mImageThres, mImageThres, Size(3,3), 0, 0 );
		
		fingerDetection( mImageThres, cImageBGR, OldOutX, OldOutY);

		cv::imshow("Thresholded Image", mImageThres);
		//----------------------------------------
        if( cv::waitKey(1) == 'q')
		{
            break;
		}
		//------------------------------------------------
		cv::imshow( "Color Image", cImageBGR );
		//----------------------------------
		OldOutX.clear();
		OldOutY.clear();
		OldOutX = OutX;
		OldOutY = OutY;
		OutX.clear();
		OutY.clear();
    }

	return 0;
}
int initializeOpenNIDevice(int deviceID ,const  char * deviceName  , Device &device , VideoStream &color , VideoStream &depth ,unsigned int width ,unsigned int height , unsigned int fps)
{
   unsigned int openMode=OPENNI2_OPEN_REGULAR_ENUM; /* 0 = regular deviceID and enumeration*/
   if (deviceName!=0)
   {
      //If our deviceName contains a .oni we assume that we have an oni file to open
      if (strstr(deviceName,".oni")!=0)
         {
           fprintf(stderr,"Found an .ONI filename , trying to open it..\n");
           openMode=OPENNI2_OPEN_USING_STRING;
         } else
      if (strlen(deviceName)>7)
        {
           fprintf(stderr,"deviceName is too long (%lu chars) , assuming it is a Device URI ..\n",strlen(deviceName));
           openMode=OPENNI2_OPEN_USING_STRING;
        }

   }

   switch (openMode)
   {
     //-------------------------------------------------------------------------------------
     //If we have an ONI file to open just pass it as an argument to device.open(deviceName)
     case OPENNI2_OPEN_USING_STRING :
      if (device.open(deviceName) != STATUS_OK)
      {
        fprintf(stderr,"Could not open using given string ( %s ) : %s \n",deviceName,OpenNI::getExtendedError());
        return 0;
      }
     break;
     //-------------------------------------------------------------------------------------
     //If we don't have a deviceName we assume deviceID points to the device we want to open so we will try to use
     //the openNI enumerator to get the specific device URI for device with number deviceID and use this to device.open( devURI )
     case OPENNI2_OPEN_REGULAR_ENUM :
     default :
      //We have to supply our own buffer to hold the uri device string , so we make one here
      char devURIBuffer[512]={0};
      if (device.open(getURIForDeviceNumber(deviceID,devURIBuffer,512)) != STATUS_OK)
      {
        fprintf(stderr,"Could not open an OpenNI device : %s \n",OpenNI::getExtendedError());
        return 0;
      }
     break;
   }

if (device.getSensorInfo(SENSOR_DEPTH)  != NULL)
    {
        Status rc = depth.create(device, SENSOR_DEPTH);
        if (rc == STATUS_OK)
        {
            VideoMode depthMode = depth.getVideoMode();
            depthMode.setResolution(width,height);
            depthMode.setFps(fps);
            Status rc = depth.setVideoMode(depthMode);
            if (rc != STATUS_OK) { fprintf(stderr,"Error getting color at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }

            if(depth.start()!= STATUS_OK)
            {
                fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
                return 0;
            }
        }
        else
        {
            fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
            return 0;
        }
    }

    if (device.getSensorInfo(SENSOR_COLOR) != NULL)
    {
        Status rc = color.create(device, SENSOR_COLOR);
        if (rc == STATUS_OK)
        {
            VideoMode colorMode = color.getVideoMode();
            colorMode.setResolution(width,height);
            colorMode.setFps(fps);
            Status rc = color.setVideoMode(colorMode);
            if (rc != STATUS_OK) { fprintf(stderr,"Error getting depth at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }

            if(color.start() != STATUS_OK)
            {
                fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
                return 0;
            }
        }
        else
        {
            fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
            OpenNI::getExtendedError();
            return 0;
        }
    }


  #if MOD_IR
    if(device.getSensorInfo(SENSOR_IR) != NULL)
    {
        Status rc = ir.create(device, SENSOR_IR);    // Create the VideoStream for IR
        if (rc == STATUS_OK)
        {
          rc = ir.start();                      // Start the IR VideoStream
        }
         else
        {
            fprintf(stderr,"Couldn't create IR stream: %s \n",OpenNI::getExtendedError());
            OpenNI::getExtendedError();
            return 0;
        }
    }
  #endif // MOD_IR

    //Mirroring is disabled
    depth.setMirroringEnabled (false);
    color.setMirroringEnabled (false);


    fprintf(stdout,"Device Initialization Requested %u x %u @ %u fps \n",width,height,fps);
   return 1;
}
示例#13
0
int main()
{
	// 2. initialize OpenNI
	Status rc = OpenNI::initialize();
	if (rc != STATUS_OK)
	{
		printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
		return 1;
	}

	// 3. open a device
	Device device;
	rc = device.open(ANY_DEVICE);
	if (rc != STATUS_OK)
	{
		printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	// 4. create depth stream
	VideoStream depth;
	if (device.getSensorInfo(SENSOR_DEPTH) != NULL){
		rc = depth.create(device, SENSOR_DEPTH);
		if (rc != STATUS_OK){
			printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
			return 3;
		}
	}
	VideoStream color;
	if (device.getSensorInfo(SENSOR_COLOR) != NULL){
		rc = color.create(device, SENSOR_COLOR);
		if (rc != STATUS_OK){
			printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError());
			return 4;
		}
	}

	// 5. create OpenCV Window
	cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);
	cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE);

	// 6. start
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 5;
	}
	rc = color.start();
	if (rc != STATUS_OK){
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 6;
	}
	VideoFrameRef colorframe;
	VideoFrameRef depthframe;
	int iMaxDepth = depth.getMaxPixelValue();

	cv::Mat colorimageRGB;
	cv::Mat colorimageBGR;
	cv::Mat depthimage;
	cv::Mat depthimageScaled;

	// 7. main loop, continue read
	while (!wasKeyboardHit())
	{
		// 8. check is color stream is available
		if (color.isValid()){
			if (color.readFrame(&colorframe) == STATUS_OK){
				colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() };
				cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR);
			}
		}

		// 9. check is depth stream is available
		if (depth.isValid()){
			if (depth.readFrame(&depthframe) == STATUS_OK){
				depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() };
				depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth);
			}
		}

		cv::imshow("Color Image", colorimageBGR);
		cv::imshow("Depth Image", depthimageScaled);
		cv::waitKey(10);
	}

	color.stop();
	depth.stop();

	color.destroy();
	depth.destroy();

	device.close();
	OpenNI::shutdown();

	return 0;
}
示例#14
0
文件: main.cpp 项目: eranws/OpenNI2
int main()
{
	Status rc = OpenNI::initialize();
	if (rc != STATUS_OK)
	{
		printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
		return 1;
	}

	OpenNIDeviceListener devicePrinter;

	OpenNI::addDeviceConnectedListener(&devicePrinter);
	OpenNI::addDeviceDisconnectedListener(&devicePrinter);
	OpenNI::addDeviceStateChangedListener(&devicePrinter);

	openni::Array<openni::DeviceInfo> deviceList;
	openni::OpenNI::enumerateDevices(&deviceList);
	for (int i = 0; i < deviceList.getSize(); ++i)
	{
		printf("Device \"%s\" already connected\n", deviceList[i].getUri());
	}

	Device device;
	rc = device.open(ANY_DEVICE);
	if (rc != STATUS_OK)
	{
		printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	VideoStream depth;

	if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
	{
		rc = depth.create(device, SENSOR_DEPTH);
		if (rc != STATUS_OK)
		{
			printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
		}
	}
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
	}


	PrintCallback depthPrinter;

	// Register to new frame
	depth.addNewFrameListener(&depthPrinter);
	

	int i = 1;
	while(i > 0)
	{
		scanf_s("%d", i);
		printf("%d\n", i);
	}

	depth.removeNewFrameListener(&depthPrinter);


	depth.stop();
	depth.destroy();
	device.close();
	OpenNI::shutdown();

	return 0;
}