Esempio n. 1
0
void render3d(std::string file_name, size_t width, size_t height) {
  Renderer3d renderer = Renderer3d(file_name);

  double near = 0.1, far = 1000;
  double focal_length_x = 525, focal_length_y = 525;

  renderer.set_parameters(width, height, focal_length_x, focal_length_y, near, far);

  RendererIterator renderer_iterator = RendererIterator(&renderer, 150);

  cv::Rect rect;
  cv::Mat image, depth, mask;
  for (size_t i = 0; !renderer_iterator.isDone(); ++i, ++renderer_iterator) {
    try {
      renderer_iterator.render(image, depth, mask, rect);
      cv::imwrite(boost::str(boost::format("depth_%05d.png") % (i)), depth);
      cv::imwrite(boost::str(boost::format("image_%05d.png") % (i)), image);
      cv::imwrite(boost::str(boost::format("mask_%05d.png") % (i)), mask);
    } catch (...) {

    }
  }
}
Esempio n. 2
0
void render_and_perform_icp(std::string file_name, std::string points_3d_ref_filename, 
                            std::string points_3d_model_filename, size_t width, size_t height) {
  Renderer3d renderer = Renderer3d(file_name);

  double near = 0.3, far = 3500;
  double focal_length_x = 530, focal_length_y = 530;

  renderer.set_parameters(width, height, focal_length_x, focal_length_y, near, far);
  RendererIterator renderer_iterator = RendererIterator(&renderer, 1);
  
  Matx33f R_object_ref(-0.7366852281578607, -0.4999999600934258, -0.4552965127480917,
                      0.4253253695228831, -0.8660254268245088, 0.2628655362227054,
                      -0.5257311144056662, 0, 0.8506508069388852);
  Vec3d T_object_ref(-0.36801174, 0.0, 0.5954555);
  
  Matx33f R_object_model(-0.8506508159122358, 0, -0.5257310998864799,
                        -0, -1, 0,
                        -0.5257310998864799, 0, 0.8506508159122358);
  Vec3d T_object_model(-0.368012, 0, 0.595456);
  
  // extract the UP vector from R
  cv::Vec3d UP_ref(-(R_object_ref.inv()(0,1)),
                  -(R_object_ref.inv()(1,1)), 
                  -(R_object_ref.inv()(2,1)));
  cv::Vec3d UP_model(-(R_object_model.inv()(0,1)),
                    -(R_object_model.inv()(1,1)),
                    -(R_object_model.inv()(2,1)));
  
  cv::Rect rect;
  cv::Mat image, depth, mask;
  // render reference and model, just for visualization
  try {
    renderer_iterator.render(image, depth, mask, rect, T_object_ref, UP_ref);
    imwrite("rendered_reference.png", image);
    renderer_iterator.render(image, depth, mask, rect, T_object_model, UP_model);
    imwrite("rendered_model.png", image);
  } catch (...) {
      std::cout << "RENDER EXCEPTION" << endl;
  }
  
  // retrieve the mats of 3d points
  Mat_<cv::Vec3f> points_3d_ref = read_mat(points_3d_ref_filename);
  Mat_<cv::Vec3f> points_3d_model = read_mat(points_3d_model_filename);
  
  // show them --warning-- QT could show them a bit deformed, they are fine if saved with imwrite
  visualize_3d_points(points_3d_ref, "reference 3d points");
  visualize_3d_points(points_3d_model, "model 3d points");
  
  // NOW PEFORM ICP
  
  //initialize the translation based on reference data
  cv::Vec3f T_icp = points_3d_ref(points_3d_ref.rows/ 2.0f, points_3d_ref.cols / 2.0f);
  //add the object's depth
  T_icp(2) += 0.065;
  if (!cv::checkRange(T_icp))
    CV_Assert(false);
      
  //initialize the rotation based on model data
  if (!cv::checkRange(R_object_model))
    CV_Assert(false);
  cv::Matx33f R_icp(R_object_model);
  
  //get the point clouds (for both reference and model)
  std::vector<cv::Vec3f> pts_real_model_temp;
  std::vector<cv::Vec3f> pts_real_ref_temp;
  float px_ratio_missing = matToVec(points_3d_model, points_3d_ref, pts_real_model_temp, pts_real_ref_temp);
  if (px_ratio_missing > 0.25f)
    CV_Assert(false);
    
  //perform the first approximate ICP
  float px_ratio_match_inliers = 0.0f;
  float icp_dist = icpCloudToCloud(pts_real_ref_temp, pts_real_model_temp, R_icp, T_icp, px_ratio_match_inliers, 1);
  
  cout <<"Distance after first ICP: "<<icp_dist<<endl; 
  
  //perform a finer ICP
  icp_dist = icpCloudToCloud(pts_real_ref_temp, pts_real_model_temp, R_icp, T_icp, px_ratio_match_inliers, 2);
  
  cout <<"Distance after second ICP: "<<icp_dist<<endl;  
  
  //perform the final precise icp
  float icp_px_match = 0.0f;
  icp_dist = icpCloudToCloud(pts_real_ref_temp, pts_real_model_temp, R_icp, T_icp, icp_px_match, 0);
  
  // extract the UP vector from updated R
  cv::Vec3d UP_icp(-(R_icp.inv()(0,1)),
                  -(R_icp.inv()(1,1)), 
                  -(R_icp.inv()(2,1)));
  
  // render result
  try {
    renderer_iterator.render(image, depth, mask, rect, T_icp, UP_icp);
    imwrite("rendered_icp_result.png", image);
  } catch (...) {
      std::cout << "RENDER EXCEPTION" << endl;
  }
  
  cout << "------ICP output--------" << endl << endl;
  
  cout << "R: "<< R_icp << endl <<endl;
  cout << "T: "<< T_icp << endl <<endl;
  cout <<"Final distance after last ICP: "<<icp_dist<<endl; 
  cout <<"icp_px_match: "<<icp_px_match<<endl; 
  
  waitKey(0);
}
Esempio n. 3
0
  int process(const tendrils& inputs, const tendrils& outputs) {
    // Get the document for the object_id_ from the DB
    object_recognition_core::db::ObjectDbPtr db =
        object_recognition_core::db::ObjectDbParameters(*json_db_).generateDb();
    object_recognition_core::db::Documents documents =
        object_recognition_core::db::ModelDocuments(db,
            std::vector<object_recognition_core::db::ObjectId>(1, *object_id_),
            "mesh");
    if (documents.empty()) {
      std::cerr << "Skipping object id \"" << *object_id_ << "\" : no mesh in the DB" << std::endl;
      return ecto::OK;
    }

    // Get the list of _attachments and figure out the original one
    object_recognition_core::db::Document document = documents[0];
    std::vector<std::string> attachments_names = document.attachment_names();
    std::string mesh_path;
    std::vector<std::string> possible_names(2);
    possible_names[0] = "original";
    possible_names[1] = "mesh";
    for (size_t i = 0; i < possible_names.size() && mesh_path.empty(); ++i) {
      BOOST_FOREACH(const std::string& attachment_name, attachments_names) {
        if (attachment_name.find(possible_names[i]) != 0)
          continue;
        // Create a temporary file
        char mesh_path_tmp[L_tmpnam];
        tmpnam(mesh_path_tmp);
        mesh_path = std::string(mesh_path_tmp) + attachment_name.substr(possible_names[i].size());

        // Load the mesh and save it to the temporary file
        std::ofstream mesh_file;
        mesh_file.open(mesh_path.c_str());
        document.get_attachment_stream(attachment_name, mesh_file);
        mesh_file.close();
      }
    }

    cv::Ptr<cv::linemod::Detector> detector_ptr = cv::linemod::getDefaultLINEMOD();
    *detector_ = *detector_ptr;

    // Define the display
    //assign the parameters of the renderer
    *renderer_n_points_ = *param_n_points_;
    *renderer_angle_step_ = *param_angle_step_;
    *renderer_radius_min_ = *param_radius_min_;
    *renderer_radius_max_ = *param_radius_max_;
    *renderer_radius_step_ = *param_radius_step_;
    *renderer_width_ = *param_width_;
    *renderer_height_ = *param_height_;
    *renderer_near_ = *param_near_;
    *renderer_far_ = *param_far_;
    *renderer_focal_length_x_ = *param_focal_length_x_;
    *renderer_focal_length_y_ = *param_focal_length_y_;

    // the model name can be specified on the command line.
    Renderer3d renderer = Renderer3d(mesh_path);
    renderer.set_parameters(*renderer_width_, *renderer_height_, *renderer_focal_length_x_,
                            *renderer_focal_length_y_, *renderer_near_, *renderer_far_);

    std::remove(mesh_path.c_str());

    RendererIterator renderer_iterator = RendererIterator(&renderer, *renderer_n_points_);
    //set the RendererIterator parameters
    renderer_iterator.angle_step_ = *renderer_angle_step_;
    renderer_iterator.radius_min_ = float(*renderer_radius_min_);
    renderer_iterator.radius_max_ = float(*renderer_radius_max_);
    renderer_iterator.radius_step_ = float(*renderer_radius_step_);

    cv::Mat image, depth, mask;
    cv::Matx33d R;
    cv::Vec3d T;
    cv::Matx33f K;
    for (size_t i = 0; !renderer_iterator.isDone(); ++i, ++renderer_iterator)
    {
      std::stringstream status;
      status << "Loading images " << (i+1) << "/"
          << renderer_iterator.n_templates();
      std::cout << status.str();

      cv::Rect rect;
      renderer_iterator.render(image, depth, mask, rect);

      R = renderer_iterator.R_obj();
      T = renderer_iterator.T();
      float distance = fabs(renderer_iterator.D_obj() - float(depth.at<ushort>(depth.rows/2.0f, depth.cols/2.0f)/1000.0f));
      K = cv::Matx33f(float(*renderer_focal_length_x_), 0.0f, float(rect.width)/2.0f, 0.0f, float(*renderer_focal_length_y_), float(rect.height)/2.0f, 0.0f, 0.0f, 1.0f);

      std::vector<cv::Mat> sources(2);
      sources[0] = image;
      sources[1] = depth;

#if LINEMOD_VIZ_IMG
      // Display the rendered image
      if (*visualize_)
      {
        cv::namedWindow("Rendering");
        if (!image.empty()) {
          cv::imshow("Rendering", image);
          cv::waitKey(1);
        }
      }
#endif

      int template_in = detector_->addTemplate(sources, "object1", mask);
      if (template_in == -1)
      {
        // Delete the status
        for (size_t j = 0; j < status.str().size(); ++j)
          std::cout << '\b';
        continue;
      }

      // Also store the pose of each template
      Rs_->push_back(cv::Mat(R));
      Ts_->push_back(cv::Mat(T));
      distances_->push_back(distance);
      Ks_->push_back(cv::Mat(K));

      // Delete the status
      for (size_t j = 0; j < status.str().size(); ++j)
        std::cout << '\b';
    }

      return ecto::OK;
    }