Пример #1
0
void CalibrationMeshViewer::onCameraPositionUpdate(const cv::Vec3f &translation, const cv::Vec3f &rotation)
{
    if (!m_calibration_mode)
    {
        MeshViewer::onCameraPositionUpdate(translation, rotation);
        return;
    }

    GLdouble m[16];
    GLdouble deltam[16];

    const float rotation_scale = 0.2;
    const float translation_scale = 0.2;

    // Get the delta transformation is visualization frame.
    makeCurrent();
    glMatrixMode(GL_MODELVIEW);
    glGetDoublev(GL_MODELVIEW_MATRIX, m);
    glLoadIdentity();
    glTranslatef(translation_scale*translation[0],translation_scale*translation[1],translation_scale*translation[2]);
    glTranslatef(m_display_center.x,m_display_center.y,m_display_center.z);
    glRotatef(rotation_scale*rotation[0], 0,1,0);
    glRotatef(rotation_scale*rotation[1], 1,0,0);
    glTranslatef(-m_display_center.x,-m_display_center.y,-m_display_center.z);
    glGetDoublev(GL_MODELVIEW_MATRIX, deltam);
    glLoadMatrixd(m);

    cv::Vec3f t,r;
    window->getCalibration(t, r);
    Pose3D p_old;
    p_old.applyTransformBefore(t, r);

    cv::Mat1d H_old = p_old.cvCameraTransformd();
    cv::Mat1d H(4,4,(double*)deltam); H = H.t(); // delta rotation AFTER model view matrix
    cv::Mat1d M(4,4,(double*)m); M = M.t(); // model view matrix

    cv::Mat1d Hp = (M.inv() * H * M * H_old.inv()).inv(); // delta rotation BEFORE model view matrix

    Pose3D p;
    p.setCameraTransform(Hp);

    window->updateFromCalibration(p.cvTranslation(), p.cvEulerRotation());
    window->updateToCalibration();
}
bool RelativePoseEstimatorICP<PointT> ::
computeRegistration(Pose3D& relative_pose,
                    PointCloudConstPtr source_cloud,
                    PointCloudConstPtr target_cloud,
                    PointCloudType& aligned_cloud)
{
    pcl::IterativeClosestPoint<PointT, PointT> reg;
    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-10);
    reg.setRANSACOutlierRejectionThreshold(m_ransac_outlier_threshold);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setInputCloud (source_cloud);
    reg.setInputTarget (target_cloud);
    reg.align (aligned_cloud);

    if (0)
    {
        ntk::Mesh mesh1, mesh2;
        pointCloudToMesh(mesh1, aligned_cloud);
        pointCloudToMesh(mesh2, *target_cloud);
        mesh1.saveToPlyFile("debug_icp_1.ply");
        mesh2.saveToPlyFile("debug_icp_2.ply");
    }

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    ntk_dbg_print(reg.getFitnessScore(), 1);

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    relative_pose.setCameraTransform(T);
    return true;
}
Пример #3
0
void calibrate_kinect_depth(std::vector< std::vector<Point2f> >& stereo_corners,
                            std::vector< DepthCalibrationPoint >& depth_values)
{
  std::vector< std::vector<Point2f> > good_corners;
  stereo_corners.resize(global::images_list.size());
  for (int i_image = 0; i_image < global::images_list.size(); ++i_image)
  {
    QString filename = global::images_list[i_image];
    QDir cur_image_dir (global::images_dir.absoluteFilePath(filename));

    std::string full_filename;
    cv::Mat3b image;
    load_intensity_file(cur_image_dir.path().toStdString(), full_filename, image);
    ntk_ensure(image.data, "Could not load color image");

    kinect_shift_ir_to_depth(image);

    std::vector<Point2f> current_view_corners;
    calibrationCorners(full_filename, "corners",
                       global::opt_pattern_width(), global::opt_pattern_height(),
                       current_view_corners, image, 1);

    if (current_view_corners.size() == global::opt_pattern_height()*global::opt_pattern_width())
    {
      good_corners.push_back(current_view_corners);
      stereo_corners[i_image] = current_view_corners;
      showCheckerboardCorners(image, current_view_corners, 1);
    }
    else
    {
      ntk_dbg(0) << "Warning: corners not detected";
      stereo_corners[i_image].resize(0);
    }
  }

  std::vector< std::vector<Point3f> > pattern_points;
  calibrationPattern(pattern_points,
                     global::opt_pattern_width(),  global::opt_pattern_height(), global::opt_square_size(),
                     good_corners.size());

  ntk_assert(pattern_points.size() == good_corners.size(), "Invalid points size");

  std::vector<Mat> rvecs, tvecs;

  int flags = 0;
  if (global::opt_ignore_distortions())
    flags = CV_CALIB_ZERO_TANGENT_DIST;

  double error = calibrateCamera(pattern_points, good_corners, global::image_size,
                                 global::depth_intrinsics, global::depth_distortion,
                                 rvecs, tvecs, flags);

  if (global::opt_ignore_distortions())
    global::depth_distortion = 0.f;

  ntk_dbg_print(error, 1);
  int good_i = 0;
  foreach_idx(stereo_i, stereo_corners)
  {
    if (stereo_corners[stereo_i].size() > 0)
    {
      QString filename = global::images_list[stereo_i];
      QDir cur_image_dir (global::images_dir.absoluteFilePath(filename));
      std::string full_filename;
      cv::Mat3b image;
      load_intensity_file(cur_image_dir.path().toStdString(), full_filename, image);
      ntk_ensure(image.data, "Could not load intensity image");
      kinect_shift_ir_to_depth(image);

      cv::Mat1f depth_image;

      if (is_file(cur_image_dir.absoluteFilePath("raw/depth.yml").toStdString()))
      {
        full_filename = cur_image_dir.absoluteFilePath("raw/depth.yml").toStdString();
        depth_image = imread_yml(full_filename);
      }
      else if (is_file(cur_image_dir.absoluteFilePath("raw/depth.raw").toStdString()))
      {
        full_filename = cur_image_dir.absoluteFilePath("raw/depth.raw").toStdString();
        depth_image = imread_Mat1f_raw(full_filename);
      }
      ntk_ensure(depth_image.data, "Could not load depth image");

      cv::Mat3b undistorted_image;
      if (global::opt_ignore_distortions())
        image.copyTo(undistorted_image);
      else
        undistort(image, undistorted_image, global::depth_intrinsics, global::depth_distortion);

      std::vector<Point2f> current_view_corners;
      calibrationCorners(full_filename, "corners",
                         global::opt_pattern_width(), global::opt_pattern_height(),
                         current_view_corners, undistorted_image, 1);

      if (current_view_corners.size() == (global::opt_pattern_width()*global::opt_pattern_height()))
      {
        stereo_corners[stereo_i] = current_view_corners;
        showCheckerboardCorners(undistorted_image, stereo_corners[stereo_i], 200);
      }
      else
      {
        stereo_corners[stereo_i].resize(0);
        continue;
      }

      // Generate calibration points
      {
        // FIXME: why rvecs and tvecs from calibrateCamera seems to be nonsense ?
        // calling findExtrinsics another time to get good chessboard estimations.

        cv::Mat1f H;
        estimate_checkerboard_pose(pattern_points[0],
                                   current_view_corners,
                                   global::depth_intrinsics,
                                   H);
        Pose3D pose;
        pose.setCameraParametersFromOpencv(global::depth_intrinsics);
        ntk_dbg_print(pose, 1);
        pose.setCameraTransform(H);

        foreach_idx(pattern_i, pattern_points[0])
        {
          ntk_dbg_print(pattern_points[0][pattern_i], 1);
          Point3f p = pose.projectToImage(pattern_points[0][pattern_i]);
          ntk_dbg_print(p, 1);
          double kinect_raw = depth_image(p.y, p.x);
          if (!(kinect_raw < 2047)) continue;
          ntk_dbg_print(kinect_raw, 1);
          double linear_depth = 1.0 / (kinect_raw * -0.0030711016 + 3.3309495161);
          const float k1 = 1.1863;
          const float k2 = 2842.5;
          const float k3 = 0.1236;
          double tan_depth = k3 * tanf(kinect_raw/k2 + k1);
          ntk_dbg_print(linear_depth, 1);
          ntk_dbg_print(tan_depth, 1);
          depth_values.push_back(DepthCalibrationPoint(kinect_raw, p.z));
        }
      }

      ++good_i;
    }
Пример #4
0
void calibrate_kinect_depth(std::vector< DepthCalibrationPoint >& depth_values)
{
    std::vector< std::vector<Point3f> > pattern_points;
    calibrationPattern(pattern_points,
                       global::opt_pattern_width(),  global::opt_pattern_height(), global::opt_square_size(),
                       global::images_list.size());

    for (int i_image = 0; i_image < global::images_list.size(); ++i_image)
    {
        // Generate depth calibration points
        QString filename = global::images_list[i_image];
        QDir cur_image_dir (global::images_dir.absoluteFilePath(filename));
        std::string full_filename = cur_image_dir.absoluteFilePath("raw/color.png").toStdString();
        RGBDImage image;
        OpenniRGBDProcessor processor;
        processor.setFilterFlag(RGBDProcessorFlags::ComputeMapping, true);
        image.loadFromDir(cur_image_dir.absolutePath().toStdString(), &global::calibration, &processor);

        imshow_normalized("mapped depth", image.mappedDepth());
        imshow("color", image.rgb());

        std::vector<Point2f> current_view_corners;
        calibrationCorners(full_filename, "corners",
                           global::opt_pattern_width(), global::opt_pattern_height(),
                           current_view_corners, image.rgb(), 1, global::pattern_type);

        if (current_view_corners.size() != (global::opt_pattern_width()*global::opt_pattern_height()))
        {
            ntk_dbg(1) << "Corners not detected in " << cur_image_dir.absolutePath().toStdString();
            continue;
        }

        // FIXME: why rvecs and tvecs from calibrateCamera seems to be nonsense ?
        // calling findExtrinsics another time to get good chessboard estimations.

        cv::Mat1f H;
        estimate_checkerboard_pose(pattern_points[0],
                                   current_view_corners,
                                   global::calibration.rgb_intrinsics,
                                   H);
        Pose3D pose;
        pose.setCameraParametersFromOpencv(global::calibration.rgb_intrinsics);
        pose.setCameraTransform(H);
        ntk_dbg_print(pose, 1);

        cv::Mat3b debug_img;
        image.rgb().copyTo(debug_img);
        foreach_idx(pattern_i, pattern_points[0])
        {
            Point3f p = pose.projectToImage(pattern_points[0][pattern_i]);
            ntk_dbg_print(p, 1);
            float kinect_raw = image.mappedDepth()(p.y, p.x);
            ntk_dbg_print(kinect_raw, 1);
            if (kinect_raw < 1e-5) continue;
            float err = kinect_raw-p.z;
            cv::putText(debug_img, format("%d", (int)(err*1000)), Point(p.x, p.y), CV_FONT_NORMAL, 0.4, Scalar(255,0,0));
            ntk_dbg_print(pattern_points[0][pattern_i], 1);
            ntk_dbg_print(p, 1);
            ntk_dbg_print(kinect_raw, 1);
            depth_values.push_back(DepthCalibrationPoint(kinect_raw, p.z));
        }
        imshow("errors", debug_img);
        cv::waitKey(0);
    }
bool RelativePoseEstimatorICPWithNormals<PointT> ::
computeRegistration(Pose3D& relative_pose,
                    PointCloudConstPtr source_cloud,
                    PointCloudConstPtr target_cloud,
                    PointCloudType& aligned_cloud)
{
#ifndef HAVE_PCL_GREATER_THAN_1_2_0
    return super::computeRegistration(relative_pose, source_cloud, target_cloud, aligned_cloud);
#else
    pcl::IterativeClosestPoint<PointT, PointT> reg;
    typedef pcl::registration::TransformationEstimationPointToPlaneLLS<PointT, PointT> PointToPlane;
    boost::shared_ptr<PointToPlane> point_to_plane (new PointToPlane);
    reg.setTransformationEstimation (point_to_plane);

    // typedef TransformationEstimationRGBD<PointT, PointT> TransformRGBD;
    // boost::shared_ptr<TransformRGBD> transform_rgbd (new TransformRGBD);
    // reg.setTransformationEstimation (transform_rgbd);

#ifdef HAVE_PCL_GREATER_THAN_1_6_0 // rejectors are not well supported before 1.7

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorDistance> rejector_distance (new pcl::registration::CorrespondenceRejectorDistance);
    rejector_distance->setInputSource<PointT>(source_cloud);
    rejector_distance->setInputTarget<PointT>(target_cloud);
    rejector_distance->setMaximumDistance(m_distance_threshold);
    reg.addCorrespondenceRejector(rejector_distance);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorSurfaceNormal> rejector_normal (new pcl::registration::CorrespondenceRejectorSurfaceNormal);
    rejector_normal->setThreshold(cos(M_PI/4.f));
    rejector_normal->initializeDataContainer<PointT, PointT>();
    rejector_normal->setInputSource<PointT>(source_cloud);
    rejector_normal->setInputTarget<PointT>(target_cloud);
    rejector_normal->setInputNormals<PointT, PointT>(source_cloud);
    rejector_normal->setTargetNormals<PointT, PointT>(target_cloud);
    reg.addCorrespondenceRejector(rejector_normal);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorOneToOne> rejector_one_to_one (new pcl::registration::CorrespondenceRejectorOneToOne);
    reg.addCorrespondenceRejector(rejector_one_to_one);

    typedef pcl::registration::CorrespondenceRejectorSampleConsensus<PointT> RejectorConsensusT;
    boost::shared_ptr<RejectorConsensusT> rejector_ransac (new RejectorConsensusT());
    rejector_ransac->setInputSource(source_cloud);
    rejector_ransac->setInputTarget(target_cloud);
    rejector_ransac->setInlierThreshold(m_ransac_outlier_threshold);
    rejector_ransac->setMaxIterations(100);
    reg.addCorrespondenceRejector(rejector_ransac);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorVarTrimmed> rejector_var_trimmed (new pcl::registration::CorrespondenceRejectorVarTrimmed());
    rejector_var_trimmed->setInputSource<PointT>(source_cloud);
    rejector_var_trimmed->setInputTarget<PointT>(target_cloud);
    rejector_var_trimmed->setMinRatio(0.1f);
    rejector_var_trimmed->setMaxRatio(0.75f);
    reg.addCorrespondenceRejector(rejector_var_trimmed);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorTrimmed> rejector_trimmed (new pcl::registration::CorrespondenceRejectorTrimmed());
    rejector_trimmed->setMinCorrespondences(static_cast<int>(0.1f * source_cloud->size()));
    rejector_trimmed->setOverlapRatio(0.5f);
    reg.addCorrespondenceRejector(rejector_trimmed);
#endif

#if 0
    ntk::Mesh target_mesh;
    pointCloudToMesh(target_mesh, *target_cloud);
    target_mesh.saveToPlyFile("debug_target.ply");

    ntk::Mesh source_mesh;
    pointCloudToMesh(source_mesh, *source_cloud);
    source_mesh.saveToPlyFile("debug_source.ply");
#endif

    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-10);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setRANSACOutlierRejectionThreshold(m_ransac_outlier_threshold);
#ifdef HAVE_PCL_GREATER_THAN_1_6_0
    reg.setInputSource (source_cloud);
#else
    reg.setInputCloud (source_cloud);
#endif
    reg.setInputTarget (target_cloud);
    reg.align (aligned_cloud);

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    if (0)
    {
        ntk::Mesh mesh1, mesh2;
        pointCloudToMesh(mesh1, aligned_cloud);
        pointCloudToMesh(mesh2, *target_cloud);
        mesh1.saveToPlyFile("debug_icp_1.ply");
        mesh2.saveToPlyFile("debug_icp_2.ply");
    }

#if 0
    ntk::Mesh debug_mesh;
    pointCloudToMesh(debug_mesh, aligned_cloud);
    debug_mesh.saveToPlyFile("debug_aligned.ply");
#endif

    ntk_dbg_print(reg.getFitnessScore(), 1);

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    relative_pose.setCameraTransform(T);
#endif
    return true;
}