示例#1
0
void OpenNIDevice::DepthDataThreadFunction () throw (OpenNIException)
{
  while (running_)
  {
    // lock before checking running flag
    unique_lock<mutex> depth_lock (depth_mutex_);
    if (!running_)
      return;
    depth_condition_.wait (depth_lock);
    if (!running_)
      return;

    depth_generator_.WaitAndUpdateData ();
    xn::DepthMetaData depth_data;
    depth_generator_.GetMetaData (depth_data);
    depth_lock.unlock ();

    DepthImage depth_image (depth_data, baseline_, getDepthFocalLength (), shadow_value_, no_sample_value_);

    for (map< OpenNIDevice::CallbackHandle, ActualDepthImageCallbackFunction >::iterator callbackIt = depth_callback_.begin ();
         callbackIt != depth_callback_.end (); ++callbackIt)
    {
      callbackIt->second.operator()(depth_image);
    }
  }
}
示例#2
0
int main(int argc, char **argv) {

    std::string test_filename = "/tmp/rgbd_test_image";

    {
        image_geometry::PinholeCameraModel cam_model;

        cv::Mat rgb_image(480, 640, CV_8UC3, cv::Scalar(0,0,255));
        cv::line(rgb_image, cv::Point2i(100, 100), cv::Point2i(200, 300), cv::Scalar(255, 0, 0), 3);
        cv::Mat depth_image(480, 640, CV_32FC1, 3);
        cv::line(depth_image, cv::Point2i(100, 100), cv::Point2i(200, 300), cv::Scalar(1), 3);

        rgbd::Image image(rgb_image, depth_image, cam_model, "no_frame", 0);

        // write
        std::ofstream f_out;
        f_out.open(test_filename.c_str(), std::ifstream::binary);

        tue::serialization::OutputArchive a_out(f_out);

        if (!rgbd::serialize(image, a_out, rgbd::RGB_STORAGE_LOSSLESS, rgbd::DEPTH_STORAGE_LOSSLESS))
        {
            std::cout << "Could not store image to disk" << std::endl;
            return 1;
        }
    }

    std::cout << "Image stored to disk." << std::endl;

    rgbd::Image image;

    {
        // read
        std::ifstream f_in;
        f_in.open(test_filename.c_str(), std::ifstream::binary);

        tue::serialization::InputArchive a_in(f_in);

        rgbd::deserialize(a_in, image);
    }

    std::cout << "Image loaded from disk." << std::endl;
//    std::cout << "    size:  " << image.getWidth() << " x " << image.getHeight() << std::endl;
    std::cout << "    frame: " << image.getFrameId() << std::endl;
    std::cout << "    time:  " << ros::Time(image.getTimestamp()) << std::endl;

    if (image.getRGBImage().data)
        cv::imshow("rgb", image.getRGBImage());
    else
        std::cout << "File did not contain RGB data." << std::endl;

    if (image.getDepthImage().data)
        cv::imshow("depth", image.getDepthImage() / 8);
    else
        std::cout << "File did not contain Depth data" << std::endl;
    cv::waitKey();

    return 0;
}
示例#3
0
void Client::imageCallback(sensor_msgs::ImageConstPtr rgb_image_msg, sensor_msgs::ImageConstPtr depth_image_msg)
{
    if (!ros_image_sync_data_->cam_model_.initialized())
        return;

    cv_bridge::CvImagePtr img_ptr, depth_img_ptr;

    // Convert RGB image
    try
    {
        img_ptr = cv_bridge::toCvCopy(rgb_image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
        ROS_ERROR("Could not deserialize rgb image: %s", e.what());
        return;
    }

    // Convert depth image
    try
    {
        depth_img_ptr = cv_bridge::toCvCopy(depth_image_msg, depth_image_msg->encoding);

        if (depth_image_msg->encoding == "16UC1")
        {
            // depths are 16-bit unsigned ints, in mm. Convert to 32-bit float (meters)
            cv::Mat depth_image(depth_img_ptr->image.rows, depth_img_ptr->image.cols, CV_32FC1);
            for(int x = 0; x < depth_image.cols; ++x)
            {
                for(int y = 0; y < depth_image.rows; ++y)
                {
                    depth_image.at<float>(y, x) = ((float)depth_img_ptr->image.at<unsigned short>(y, x)) / 1000; // (mm to m)
                }
            }

            depth_img_ptr->image = depth_image;
        }

    }
    catch (cv_bridge::Exception& e)
    {
        ROS_ERROR("Could not deserialize depth image: %s", e.what());
        return;
    }

    if (!image_ptr_) {
        // in this case, the pointer will always be wrapped in a shared ptr, so no mem leaks (see nextImage() )
        image_ptr_ = new Image();
    }

    image_ptr_->rgb_image_ = img_ptr->image;
    image_ptr_->depth_image_ = depth_img_ptr->image;
    image_ptr_->frame_id_ = rgb_image_msg->header.frame_id;
    image_ptr_->timestamp_ = rgb_image_msg->header.stamp.toSec();
}
void FaceDetector::writeFaceDetectionResult(const ed::Measurement& msr, const cv::Rect& rgb_roi,
                                            const std::vector<cv::Rect>& rgb_face_rois,
                                            int& face_counter, tue::Configuration& result) const
{
    // get color image
    const cv::Mat& color_image = msr.image()->getRGBImage();

    // get color image
    const cv::Mat& depth_image = msr.image()->getDepthImage();

    // Calculate size factor between depth and rgb images
    double f_depth_rgb = (double)depth_image.cols / color_image.cols;

    // Create depth view
    rgbd::View depth_view(*msr.image(), depth_image.cols);

    for (uint j = 0; j < rgb_face_rois.size(); j++)
    {
        cv::Rect rgb_face_roi = rgb_face_rois[j];
        rgb_face_roi.x += rgb_roi.x;
        rgb_face_roi.y += rgb_roi.y;

        if (debug_mode_)
            ed::perception::saveDebugImage("face_detector-rgb", color_image(rgb_face_roi));

        result.addArrayItem();

        result.setValue("index", face_counter);

        // add 2D location of the face
        result.setValue("x", rgb_face_roi.x);
        result.setValue("y", rgb_face_roi.y);
        result.setValue("width", rgb_face_roi.width);
        result.setValue("height", rgb_face_roi.height);

        // Compute face roi for depth image
        cv::Rect depth_face_roi(f_depth_rgb * rgb_face_roi.x, f_depth_rgb * rgb_face_roi.y,
                                f_depth_rgb * rgb_face_roi.width, f_depth_rgb * rgb_face_roi.height);

        if (debug_mode_)
            ed::perception::saveDebugImage("face_detector-depth", depth_image(depth_face_roi));

        cv::Mat face_area = depth_image(depth_face_roi);
        float avg_depth = ed::perception::getMedianDepth(face_area);

        if (avg_depth > 0)
        {
            // calculate the center point of the face
            cv::Point2i p_2d(depth_face_roi.x + depth_face_roi.width/2,
                             depth_face_roi.y + depth_face_roi.height/2);

            geo::Vector3 projection = depth_view.getRasterizer().project2Dto3D(p_2d.x, p_2d.y) * avg_depth;
            geo::Vector3 point_map = msr.sensorPose() * projection;

            // add 3D location of the face
            result.setValue("map_x", point_map.x);
            result.setValue("map_y", point_map.y);
            result.setValue("map_z", point_map.z);

        }
        else
        {
            std::cout << "[ED FACE DETECTOR] Could not calculate face's average depth. Map coordinates might be incorrect!" << std::endl;
        }

        result.endArrayItem();
        face_counter++;
    }
}
示例#5
0
文件: main.cpp 项目: EricLYang/fovis
int main(int argc, char **argv)
{
  const char *optstring = "hc:v";
  int c;
  struct option long_opts[] = {
    { "help", no_argument, 0, 'h' },
    { "camera", required_argument, 0, 'c' },
    { "visualize", no_argument, 0, 'v' },
    { 0, 0, 0, 0 }
  };

  std::string camera_id;
  bool visualize = false;

  while ((c = getopt_long (argc, argv, optstring, long_opts, 0)) >= 0) {
    switch (c) {
      case 'c':
        camera_id = optarg;
        break;
      case 'v':
        visualize = true;
        break;
      default:
        usage();
        return 1;
    };
  }

  if(optind >= argc - 1) {
    usage();
  }

  std::string datadir = argv[optind];
  std::string outdir = argv[optind+1];

  if(camera_id.empty()) {
    err("Missing --camera parameter.  Run with -h for options.");
    return 1;
  }

  fovis::CameraIntrinsicsParameters camParams;
  camParams.width = 640;
  camParams.height = 480;

  // RGB camera parameters
  if(camera_id == "fr1") {
    camParams.fx = 517.306408;
    camParams.fy = 516.469215;
    camParams.cx = 318.643040;
    camParams.cy = 255.313989;
    camParams.k1 = 0.262383;
    camParams.k2 = -0.953104;
    camParams.p1 = -0.005358;
    camParams.p2 = 0.002628;
    camParams.k3 = 1.163314;
  } else if(camera_id == "fr2") {
    camParams.fx = 520.908620;
    camParams.fy = 521.007327;
    camParams.cx = 325.141442;
    camParams.cy = 249.701764;
    camParams.k1 = 0.231222;
    camParams.k2 = -0.784899;
    camParams.p1 = -0.003257;
    camParams.p2 = -0.000105;
    camParams.k3 =  0.917205;
  } else if(camera_id == "fr3") {
    camParams.fx = 537.960322;
    camParams.fy = 539.597659;
    camParams.cx = 319.183641;
    camParams.cy = 247.053820;
    camParams.k1 = 0.026370;
    camParams.k2 = -0.100086;
    camParams.p1 = 0.003138;
    camParams.p2 = 0.002421;
    camParams.k3 = 0.000000;
  } else {
    err("Unknown camera id [%s]", camera_id.c_str());
    return 1;
  }

  info("Loading data from: [%s]\n", datadir.c_str());

  fovis::Rectification rect(camParams);

  // If we wanted to play around with the different VO parameters, we could set
  // them here in the "options" variable.
  fovis::VisualOdometryOptions options =
      fovis::VisualOdometry::getDefaultOptions();

  // setup the visual odometry
  fovis::VisualOdometry odom(&rect, options);

  // create the output trajectory file
  std::string traj_fname = outdir + "/traj.txt";
  FILE* traj_fp = fopen(traj_fname.c_str(), "w");
  if(!traj_fp) {
    err("Unable to create %s - %s", traj_fname.c_str(), strerror(errno));
    return 1;
  }
  std::string gray_pgm_dir = outdir + "/gray";
  mkdir(gray_pgm_dir.c_str(), 0755);
  std::string depth_pgm_dir = outdir + "/depth";
  mkdir(depth_pgm_dir.c_str(), 0755);
  std::string vis_dir = outdir + "/vis";
  mkdir(vis_dir.c_str(), 0755);

  // read the RGB and depth index files
  std::vector<TimestampFilename> rgb_fnames =
    read_file_index(datadir + "/rgb.txt");
  std::vector<TimestampFilename> depth_fnames =
    read_file_index(datadir + "/depth.txt");
  int depth_fname_index = 0;

  DrawImage draw_img(camParams.width, camParams.height * 2);

  for(int rgb_fname_index=0;
      rgb_fname_index < rgb_fnames.size();
      rgb_fname_index++) {

    int64_t timestamp = rgb_fnames[rgb_fname_index].timestamp;
    std::string rgb_fname =
      datadir + "/" + rgb_fnames[rgb_fname_index].filename;
    long long ts_sec = timestamp / 1000000;
    long ts_usec = timestamp % 1000000;
    char ts_str[80];
    snprintf(ts_str, 80, "%lld.%06ld", ts_sec, ts_usec);

    // match the RGB image up with a depth image
    bool matched_depth_image = false;
    while(depth_fname_index < depth_fnames.size()) {
      int64_t depth_ts = depth_fnames[depth_fname_index].timestamp;

      // declare a depth image match if the depth image timestamp is within
      // 40ms of the RGB image.
      int64_t dt_usec = depth_ts - timestamp;
      if(dt_usec > 40000) {
        dbg("  stop %lld.%06ld (dt %f)",
            (long long)(depth_ts / 1000000),
            (long)(depth_ts % 1000000),
            dt_usec / 1000.);
        break;
      } else if(dt_usec < -40000) {
        dbg("  skip %lld.%06ld (dt %f)",
            (long long)(depth_ts / 1000000),
            (long)(depth_ts % 1000000),
            dt_usec / 1000.);
        depth_fname_index++;
      } else {
        matched_depth_image = true;
        dbg("  mtch %lld.%06ld (dt %f)",
            (long long)(depth_ts / 1000000),
            (long)(depth_ts % 1000000),
            dt_usec / 1000.);
        break;
      }
    }

    if(!matched_depth_image) {
      // didn't find a depth image with a close enough timestamp.  Skip this
      // RGB image.
      info("# skip %s", ts_str);
      continue;
    }

    std::string depth_fname =
      datadir + "/" + depth_fnames[depth_fname_index].filename;
    depth_fname_index++;

    // read RGB data
    std::vector<uint8_t> rgb_data;
    unsigned rgb_width;
    unsigned rgb_height;
    std::vector<uint8_t> rgb_png_data;
    lodepng::load_file(rgb_png_data, rgb_fname.c_str());
    if(rgb_png_data.empty()) {
      err("Failed to load %s", rgb_fname.c_str());
      continue;
    }
    if(0 != lodepng::decode(rgb_data, rgb_width, rgb_height,
        rgb_png_data, LCT_RGB, 8)) {
      err("Error decoding PNG %s", rgb_fname.c_str());
      continue;
    }

    // convert RGB data to grayscale
    std::vector<uint8_t> gray_data(rgb_width*rgb_height);
    const uint8_t* rgb_pixel = &rgb_data[0];
    for(int pixel_index=0; pixel_index<rgb_width*rgb_height; pixel_index++) {
      uint8_t r = rgb_pixel[0];
      uint8_t g = rgb_pixel[1];
      uint8_t b = rgb_pixel[2];
      gray_data[pixel_index] = (r + g + b) / 3;
      rgb_pixel += 3;
    }

    // write gray image out.
    write_pgm(outdir + "/gray/" + ts_str + "-gray.pgm",
        &gray_data[0], rgb_width, rgb_height, rgb_width);

    // read depth data
    std::vector<uint8_t> depth_data_u8;
    unsigned depth_width;
    unsigned depth_height;
    std::vector<uint8_t> depth_png_data;
    lodepng::load_file(depth_png_data, depth_fname.c_str());
    if(depth_png_data.empty()) {
      err("Failed to load %s", depth_fname.c_str());
      continue;
    }
    if(0 != lodepng::decode(depth_data_u8, depth_width, depth_height,
        depth_png_data, LCT_GREY, 16)) {
      err("Error decoding PNG %s", depth_fname.c_str());
      continue;
    }

    // convert depth data to a DepthImage object
    fovis::DepthImage depth_image(camParams, depth_width, depth_height);
    std::vector<float> depth_data(depth_width * depth_height);
    for(int i=0; i<depth_width*depth_height; i++) {
      // lodepng loads 16-bit PNGs into memory with big-endian ordering.
      // swizzle the bytes to get a uint16_t
      uint8_t high_byte = depth_data_u8[i*2];
      uint8_t low_byte = depth_data_u8[i*2+1];
      uint16_t data16 = (high_byte << 8) | low_byte;
      if(0 == data16)
        depth_data[i] = NAN;
      else
        depth_data[i] = data16 / float(5000);
    }
    depth_image.setDepthImage(&depth_data[0]);

    // debug depth image
    std::vector<uint8_t> depth_pgm_data(depth_width * depth_height);
    float depth_pgm_max_depth = 5;
    for(int i=0; i<depth_width*depth_height; i++) {
      if(isnan(depth_data[i]))
        depth_pgm_data[i] = 0;
      else {
        float d = depth_data[i];
        if(d > depth_pgm_max_depth)
          d = depth_pgm_max_depth;
        depth_pgm_data[i] = 255 * d / depth_pgm_max_depth;
      }
    }
    write_pgm(outdir + "/depth/" + ts_str + "-depth.pgm",
        &depth_pgm_data[0], depth_width, depth_height, depth_width);

    // process the frame
    odom.processFrame(&gray_data[0], &depth_image);

    fovis::MotionEstimateStatusCode status = odom.getMotionEstimateStatus();
    switch(status) {
      case fovis::INSUFFICIENT_INLIERS:
        printf("# %s - insufficient inliers\n", ts_str);
        break;
      case fovis::OPTIMIZATION_FAILURE:
        printf("# %s - optimization failed\n", ts_str);
        break;
      case fovis::REPROJECTION_ERROR:
        printf("# %s - reprojection error too high\n", ts_str);
        break;
      case fovis::NO_DATA:
        printf("# %s - no data\n", ts_str);
        break;
      case fovis::SUCCESS:
      default:
        break;
    }

    // get the integrated pose estimate.
    Eigen::Isometry3d cam_to_local = odom.getPose();
    Eigen::Vector3d t = cam_to_local.translation();
    Eigen::Quaterniond q(cam_to_local.rotation());

    printf("%lld.%06ld %f %f %f %f %f %f %f\n",
        ts_sec, ts_usec, t.x(), t.y(), t.z(), q.x(), q.y(), q.z(), q.w());
    fprintf(traj_fp, "%lld.%06ld %f %f %f %f %f %f %f\n",
        ts_sec, ts_usec, t.x(), t.y(), t.z(), q.x(), q.y(), q.z(), q.w());

    // visualization
    if(visualize) {
      DrawColor status_colors[] = {
        DrawColor(255, 255, 0),
        DrawColor(0, 255, 0),
        DrawColor(255, 0, 0),
        DrawColor(255, 0, 255),
        DrawColor(127, 127, 0)
      };

      DrawColor red(255, 0, 0);
      DrawColor green(0, 255, 0);
      DrawColor blue(0, 0, 255);

      memset(&draw_img.data[0], 0, draw_img.data.size());

      const fovis::OdometryFrame* ref_frame = odom.getReferenceFrame();
      const fovis::OdometryFrame* tgt_frame = odom.getTargetFrame();

      const fovis::PyramidLevel* ref_pyr_base = ref_frame->getLevel(0);
      const uint8_t* ref_img_data = ref_pyr_base->getGrayscaleImage();
      int ref_img_stride = ref_pyr_base->getGrayscaleImageStride();

      const fovis::PyramidLevel* tgt_pyr_base = tgt_frame->getLevel(0);
      const uint8_t* tgt_img_data = tgt_pyr_base->getGrayscaleImage();
      int tgt_img_stride = tgt_pyr_base->getGrayscaleImageStride();

      int tgt_yoff = ref_pyr_base->getHeight();

      // draw the reference frame on top
      draw_gray_img_rgb(ref_img_data, ref_pyr_base->getWidth(),
          ref_pyr_base->getHeight(), ref_img_stride, 0, 0, &draw_img);

      // draw the target frame on bottom
      draw_gray_img_rgb(tgt_img_data, tgt_pyr_base->getWidth(),
          tgt_pyr_base->getHeight(), tgt_img_stride, 0, tgt_yoff, &draw_img);

      const fovis::MotionEstimator* mestimator = odom.getMotionEstimator();

      int num_levels = ref_frame->getNumLevels();
      for(int level_index=0; level_index<num_levels; level_index++) {
        // draw reference features
        const fovis::PyramidLevel* ref_level = ref_frame->getLevel(level_index);
        int num_ref_keypoints = ref_level->getNumKeypoints();
        for(int ref_kp_ind=0; ref_kp_ind<num_ref_keypoints; ref_kp_ind++) {
          const fovis::KeypointData* ref_kp = ref_level->getKeypointData(ref_kp_ind);
          int ref_u = (int)round(ref_kp->base_uv.x());
          int ref_v = (int)round(ref_kp->base_uv.y());
          draw_box_rgb(ref_u-1, ref_v-1, ref_u+1, ref_v+1, blue, &draw_img);
        }

        // draw target features
        const fovis::PyramidLevel* tgt_level = tgt_frame->getLevel(level_index);
        int num_tgt_keypoints = tgt_level->getNumKeypoints();
        for(int tgt_kp_ind=0; tgt_kp_ind<num_tgt_keypoints; tgt_kp_ind++) {
          const fovis::KeypointData* tgt_kp = tgt_level->getKeypointData(tgt_kp_ind);
          int tgt_u = (int)round(tgt_kp->base_uv.x());
          int tgt_v = (int)round(tgt_kp->base_uv.y());
          draw_box_rgb(tgt_u-1, tgt_v-1 + tgt_yoff, tgt_u+1, tgt_v+1 + tgt_yoff,
              blue, &draw_img);
        }
      }

      const fovis::FeatureMatch* matches = mestimator->getMatches();
      int num_matches = mestimator->getNumMatches();

      // draw non-inlier matches
      for(int match_index=0; match_index<num_matches; match_index++) {
        const fovis::FeatureMatch& match = matches[match_index];
        if(match.inlier)
          continue;
        const fovis::KeypointData* ref_keypoint = match.ref_keypoint;
        const fovis::KeypointData* tgt_keypoint = match.target_keypoint;

        int ref_u = (int)round(ref_keypoint->base_uv.x());
        int ref_v = (int)round(ref_keypoint->base_uv.y());

        int tgt_u = (int)round(tgt_keypoint->base_uv.x());
        int tgt_v = (int)round(tgt_keypoint->base_uv.y());

        draw_line_rgb(ref_u, ref_v,
            tgt_u, tgt_v + tgt_yoff,
            red, &draw_img);
      }

      // draw inlier matches
      for(int match_index=0; match_index<num_matches; match_index++) {
        const fovis::FeatureMatch& match = matches[match_index];
        if(!match.inlier)
          continue;
        const fovis::KeypointData* ref_keypoint = match.ref_keypoint;
        const fovis::KeypointData* tgt_keypoint = match.target_keypoint;

        int ref_u = (int)round(ref_keypoint->base_uv.x());
        int ref_v = (int)round(ref_keypoint->base_uv.y());

        int tgt_u = (int)round(tgt_keypoint->base_uv.x());
        int tgt_v = (int)round(tgt_keypoint->base_uv.y());

        draw_line_rgb(ref_u, ref_v,
            tgt_u, tgt_v + tgt_yoff,
            green, &draw_img);
      }

      // draw a couple lines indicating the VO status
      draw_box_rgb(0, tgt_yoff - 1, draw_img.width,
          tgt_yoff + 1, status_colors[status], &draw_img);

      // save visualization
      std::string vis_fname = vis_dir + "/" + ts_str + "-vis.png";
      lodepng::encode(vis_fname, &draw_img.data[0], draw_img.width,
          draw_img.height, LCT_RGB);
    }
  }

  fclose(traj_fp);

  return 0;
}
int main(int argc, char** argv) {
	// (1) Open OpenNI2 device
	StructureGrabber grabber;
	grabber.setDebugMode();
	grabber.enableDepth();
	grabber.enableInfrared();
	grabber.disableColor();
	grabber.setDepthRange(0.3, 5.0);
	if(argc > 1 ? grabber.open(argv[1]) : grabber.open()) {
		std::cerr << "Faled to open OpenNI2 device" << std::endl;
		return -1;
	}

	// (2) Prepare for images
	cv::Size image_size = cv::Size(grabber.getWidth(), grabber.getHeight());
	cv::Mat depth_image(image_size, CV_8UC1, cv::Scalar::all(0));
	cv::Mat infrared_image(image_size, CV_8UC1, cv::Scalar::all(0));
	
	// (3) Iterate
	int key(0);
	while(key != 'q' && key != 27) {
		// (3-1) Acquire new frame
		grabber.acquire();

		// (3-2) Create depth image
		if(grabber.copyDepthImageTo(depth_image)) {
			std::cerr << "Failed to copy the depth image" << std::endl;
		}

		// (3-3) Create IR image
		if(grabber.copyInfraredImageTo(infrared_image)) {
			std::cerr << "Failed to copy the infrared image" << std::endl;
		}

		// (3-4) Flip images
		cv::flip(depth_image, depth_image, 1);
		cv::flip(infrared_image, infrared_image, 1);

		// (3-5) Visualize
		cv::imshow("Depth", depth_image);
		cv::imshow("Infrared", infrared_image);
		key = cv::waitKey(10);

		// (3-6) Save
		switch(key) {
		case 's':
			std::cout << "Saving...";
			cv::imwrite("capture_depth.png", depth_image);
			cv::imwrite("capture_infrared.png", infrared_image);
			std::cout << "done" << std::endl;
			break;
		case 'v':
			std::cout << "Started recording" << std::endl;
			grabber.startRecording("capture.oni");
			break;
		case 'n':
			std::cout << "Stopped recording" << std::endl;
			grabber.stopRecording();
			break;
		default:
			break;
		};
	}

	// (4) Close OpenNI2 device
	grabber.close();
	return 0;
}
示例#7
0
文件: viewer.cpp 项目: svddries/mwm
void Viewer::render(const WorldModel& wm)
{
    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    if (!cam_initialized_)
    {
        if (!wm.points().empty() || !wm.vertices().empty())
        {
            geo::Vec3 p_total(0, 0, 0);
            for(unsigned int i = 0; i < wm.points().size(); ++i)
                p_total += wm.points()[i];

            for(unsigned int i = 0; i < wm.vertices().size(); ++i)
                p_total += wm.vertices()[i];

            cam_control_.cam_lookat = p_total / (wm.points().size() + wm.vertices().size());
            cam_initialized_ = true;
        }
        else
        {
            cam_control_.cam_lookat = geo::Vec3(0, 0, 0);
        }

        cam_control_.cam_dist = 5;
        cam_control_.cam_pitch = 0.7;
        cam_control_.cam_yaw = 3.1415;
    }

    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    // Calculate camera pose

    cam_control_.cam_pose.t = geo::Vector3(cos(cam_control_.cam_yaw), sin(cam_control_.cam_yaw), 0)
                                * cos(cam_control_.cam_pitch) * cam_control_.cam_dist;
    cam_control_.cam_pose.t.z = sin(cam_control_.cam_pitch) * cam_control_.cam_dist;
    cam_control_.cam_pose.t += cam_control_.cam_lookat;

    geo::Vec3 rz = -(cam_control_.cam_lookat - cam_control_.cam_pose.t).normalized();
    geo::Vec3 rx = geo::Vector3(0, 0, 1).cross(rz).normalized();
    geo::Vec3 ry = rz.cross(rx).normalized();

    cam_control_.cam_pose.R = geo::Mat3(rx.x, ry.x, rz.x,
                                        rx.y, ry.y, rz.y,
                                        rx.z, ry.z, rz.z);

    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    canvas_.setTo(cv::Vec3b(20, 20, 20));
    cv::Mat depth_image(canvas_.rows, canvas_.cols, CV_32FC1, 0.0);

    LightingRenderer res(depth_image, canvas_, cam_control_.cam_pose);
    mwm::render::renderDepth(wm, P_, cam_control_.cam_pose, res);

    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    geo::Pose3D sensor_pose_inv = cam_control_.cam_pose.inverse();

    for(unsigned int i = 0; i < wm.points().size(); ++i)
    {
        const geo::Vec3& p = wm.points()[i];
        const cv::Vec3b& color = wm.point_colors()[i];

        geo::Vec3 p_sensor = sensor_pose_inv * p;
        geo::Vec2i p_2d = P_.project3Dto2D(p_sensor);
        double z = -p_sensor.z;

//        if (z < 0)
//            continue;

        if (p_2d.x < 0 || p_2d.y < 0 || p_2d.x >= canvas_.cols || p_2d.y >= canvas_.rows)
            continue;

        float& d = depth_image.at<float>(p_2d.y, p_2d.x);
        if (d == 0 || z < d)
        {
            d = z;
            canvas_.at<cv::Vec3b>(p_2d.y, p_2d.x) = color;
//            cv::circle(canvas_, cv::Point(p_2d.x, p_2d.y), 3, cv::Scalar(color[0], color[1], color[2]), CV_FILLED);
        }
    }

//    unsigned int size = canvas_.rows * canvas_.cols;
//    for(unsigned int i = 0; i < size; ++i)
//    {
//        float d = depth_image.at<float>(i);
//        if (d == 0)
//            canvas_.at<cv::Vec3b>(i) = cv::Vec3b(20, 20, 20);
//        else
//            canvas_.at<cv::Vec3b>(i) = (d / 10) * cv::Vec3b(255, 255, 255);
//    }
}
void MappingNode::callback(const sensor_msgs::ImageConstPtr& depth_image_msg,
                           const sensor_msgs::ImageConstPtr& thermal_image_msg)
{

    cv_bridge::CvImagePtr cv_depth_image_ptr, cv_thermal_image_ptr;

    try{
        cv_depth_image_ptr   = cv_bridge::toCvCopy(depth_image_msg, "16UC1");
        cv_thermal_image_ptr = cv_bridge::toCvCopy(thermal_image_msg, "32FC1"); //16UC1 for thermal raw, bgr8 for colored
    }
    catch (cv_bridge::Exception& e)
    {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }

    cv::Mat depth_image, thermal_image;
    depth_image   = cv_depth_image_ptr->image.clone();
    thermal_image = cv_thermal_image_ptr->image.clone();

    cv::Mat depth_image_float = cv::Mat::zeros(depth_image.size(), depth_image.type());

    depth_image(cv::Rect(0,0,depth_image.cols-4,depth_image.rows-4)).copyTo(depth_image_float(cv::Rect(4,4,depth_image.cols-4,depth_image.rows-4)));

    depth_image = depth_image_float.clone();

    cv::Mat thermalImageCast = cv::Mat::ones(thermal_image.rows,thermal_image.cols, CV_8UC1);

    float newMinCelsius = 20.0;
    float newMaxCelsius = 40.0;

    for (int y = 0; y < thermal_image.rows; y++)
    {
        for (int x = 0; x < thermal_image.cols; x++)
        {
            if(thermal_image.at<float>(y,x) <= newMinCelsius){
                thermalImageCast.at<unsigned char>(y,x) = 0;
            }else if (thermal_image.at<float>(y,x) >= newMaxCelsius){
                thermalImageCast.at<unsigned char>(y,x) = 255;
            }else{

                thermalImageCast.at<unsigned char>(y,x) = (thermal_image.at<float>(y,x) - newMinCelsius) / ( (newMaxCelsius - newMinCelsius) / 255.) ;
            }
        }
    }

    thermal_image = thermalImageCast;

    //==============================================================
    // If you wish, you can apply a colormap here
    //==============================================================
    if(enable_colormap_){
        cv::applyColorMap(thermal_image, thermal_image, cv::COLORMAP_JET);
    }

    //cv::imshow("thermal_image colored", thermal_image);

    resizeImg(thermal_image, thermal_image, 640);


    cv::Mat depth_image_rect, thermal_image_rect;
    mapAndPublish(thermal_image, depth_image, thermal_image_rect, depth_image_rect);

    cv::waitKey(30);

}