Exemple #1
0
RGBDCalibration::RGBDCalibration() :
    zero_rgb_distortion(true),
    zero_depth_distortion(true),
    depth_pose(0),
    rgb_pose(0),
    depth_baseline(7.5e-02),
    depth_offset(1090),
    depth_multiplicative_correction_factor(1.0),
    depth_additive_correction_factor(0.0),
    raw_rgb_size(0,0),
    rgb_size(0,0),
    raw_depth_size(0,0),
    depth_size(0,0)
{
    R_extrinsics = Mat1d(3,3);
    setIdentity(R_extrinsics);
    T_extrinsics = Mat1d(3,1);
    T_extrinsics = 0.0;

    R = Mat1d(3,3);
    setIdentity(R);
    T = Mat1d(3,1);
    T = 0.0;
}
void OpenniGrabber :: estimateCalibration()
{
    XnPoint3D p;
    p.X = 0; p.Y = 0; p.Z = -1;
    m_ni_depth_generator.ConvertProjectiveToRealWorld(1, &p, &p);

    p.X = 0; p.Y = 0; p.Z = -1;
    m_ni_depth_generator.ConvertRealWorldToProjective(1, &p, &p);
    double cx = p.X;
    double cy = p.Y;

    p.X = 1; p.Y = 1; p.Z = -1;
    m_ni_depth_generator.ConvertRealWorldToProjective(1, &p, &p);

    double fx = -(p.X-cx);
    double fy = p.Y-cy;

    // These factors were estimated using chessboard calibration.
    // They seem to accurately correct the bias in object sizes output by
    // the default parameters.
    const double f_correction_factor = 528.0/570.34;
    fx *= f_correction_factor;
    fy *= f_correction_factor;

    // FIXME: this bias was not observed anymore in recent experiments.
    // const double cy_correction_factor = 267.0/240.0;
    const double cy_correction_factor = 1.0;
    cy *= cy_correction_factor;

    fx /= m_subsampling_factor;
    fy /= m_subsampling_factor;
    cx /= m_subsampling_factor;
    cy /= m_subsampling_factor;

    m_calib_data = new RGBDCalibration();

    xn::DepthMetaData depthMD;
    m_ni_depth_generator.GetMetaData(depthMD);

    int rgb_width = 0;
    int rgb_height = 0;
    if (m_has_rgb)
    {
        xn::ImageMetaData rgbMD;
        m_ni_rgb_generator.GetMetaData(rgbMD);
        rgb_width = rgbMD.XRes();
        rgb_height = rgbMD.YRes();
    }

    int depth_width = depthMD.XRes() / m_subsampling_factor;
    int depth_height = depthMD.YRes() / m_subsampling_factor;

    m_calib_data->setRawRgbSize(cv::Size(rgb_width, rgb_height));
    m_calib_data->setRgbSize(cv::Size(rgb_width, rgb_height));
    m_calib_data->raw_depth_size = cv::Size(depth_width, depth_height);
    m_calib_data->depth_size = cv::Size(depth_width, depth_height);

    float width_ratio = float(rgb_width)/depth_width;
    float height_ratio = float(rgb_height)/depth_height;

    float rgb_fx = fx * width_ratio;
    // Pixels are square on a Kinect.
    // Image height gets cropped when going from 1280x1024 in 640x480.
    // The ratio remains 2.
    float rgb_fy = rgb_fx;
    float rgb_cx = cx * width_ratio;
    float rgb_cy = cy * width_ratio;

    m_calib_data->rgb_intrinsics = cv::Mat1d(3,3);
    setIdentity(m_calib_data->rgb_intrinsics);
    m_calib_data->rgb_intrinsics(0,0) = rgb_fx;
    m_calib_data->rgb_intrinsics(1,1) = rgb_fy;
    m_calib_data->rgb_intrinsics(0,2) = rgb_cx;
    m_calib_data->rgb_intrinsics(1,2) = rgb_cy;

    m_calib_data->rgb_distortion = Mat1d(1,5);
    m_calib_data->rgb_distortion = 0.;
    m_calib_data->zero_rgb_distortion = true;

    // After getAlternativeViewpoint, both camera have the same parameters.

    m_calib_data->depth_intrinsics = cv::Mat1d(3,3);
    setIdentity(m_calib_data->depth_intrinsics);
    m_calib_data->depth_intrinsics(0,0) = fx;
    m_calib_data->depth_intrinsics(1,1) = fy;
    m_calib_data->depth_intrinsics(0,2) = cx;
    m_calib_data->depth_intrinsics(1,2) = cy;

    m_calib_data->depth_distortion = Mat1d(1,5);
    m_calib_data->depth_distortion = 0.;
    m_calib_data->zero_depth_distortion = true;

    m_calib_data->R = Mat1d(3,3);
    setIdentity(m_calib_data->R);

    m_calib_data->T = Mat1d(3,1);
    m_calib_data->T = 0.;

    m_calib_data->depth_pose = new Pose3D();
    m_calib_data->depth_pose->setCameraParametersFromOpencv(m_calib_data->depth_intrinsics);

    m_calib_data->rgb_pose = new Pose3D();
    m_calib_data->rgb_pose->toRightCamera(m_calib_data->rgb_intrinsics,
                                          m_calib_data->R,
                                          m_calib_data->T);

    m_calib_data->camera_type = "kinect-ni";
}
void NiteRGBDGrabber :: estimateCalibration()
{
  XnPoint3D p;
  p.X = 0; p.Y = 0; p.Z = -1;
  m_ni_depth_generator.ConvertProjectiveToRealWorld(1, &p, &p);

  p.X = 0; p.Y = 0; p.Z = -1;
  m_ni_depth_generator.ConvertRealWorldToProjective(1, &p, &p);
  double cx = p.X;
  double cy = p.Y;  

  p.X = 1; p.Y = 1; p.Z = -1;
  m_ni_depth_generator.ConvertRealWorldToProjective(1, &p, &p);

  double fx = -(p.X-cx);
  double fy = p.Y-cy;

  const double f_correction_factor = 528.0/575.8;
  fx *= f_correction_factor;
  fy *= f_correction_factor;

  const double cy_correction_factor = 267.0/240.0;
  cy *= cy_correction_factor;

  m_calib_data = new RGBDCalibration();

  xn::DepthMetaData depthMD;
  m_ni_depth_generator.GetMetaData(depthMD);

  xn::ImageMetaData rgbMD;
  m_ni_rgb_generator.GetMetaData(rgbMD);

  int depth_width = depthMD.XRes();
  int depth_height = depthMD.YRes();

  int rgb_width = rgbMD.XRes();
  int rgb_height = rgbMD.YRes();

  m_calib_data->setRawRgbSize(cv::Size(rgb_width, rgb_height));
  m_calib_data->setRgbSize(cv::Size(rgb_width, rgb_height));
  m_calib_data->raw_depth_size = cv::Size(depth_width, depth_height);
  m_calib_data->depth_size = cv::Size(depth_width, depth_height);

  float width_ratio = float(rgb_width)/depth_width;
  float height_ratio = float(rgb_height)/depth_height;

  float rgb_fx = fx * width_ratio;
  // Pixels are square on a Kinect.
  // Image height gets cropped when going from 1280x1024 in 640x480.
  // The ratio remains 2.
  float rgb_fy = rgb_fx;
  float rgb_cx = cx * width_ratio;
  float rgb_cy = cy * width_ratio;

  m_calib_data->rgb_intrinsics = cv::Mat1d(3,3);
  setIdentity(m_calib_data->rgb_intrinsics);
  m_calib_data->rgb_intrinsics(0,0) = rgb_fx;
  m_calib_data->rgb_intrinsics(1,1) = rgb_fy;
  m_calib_data->rgb_intrinsics(0,2) = rgb_cx;
  m_calib_data->rgb_intrinsics(1,2) = rgb_cy;

  m_calib_data->rgb_distortion = Mat1d(1,5);
  m_calib_data->rgb_distortion = 0.;
  m_calib_data->zero_rgb_distortion = true;

  // After getAlternativeViewpoint, both camera have the same parameters.

  m_calib_data->depth_intrinsics = cv::Mat1d(3,3);
  setIdentity(m_calib_data->depth_intrinsics);
  m_calib_data->depth_intrinsics(0,0) = fx;
  m_calib_data->depth_intrinsics(1,1) = fy;
  m_calib_data->depth_intrinsics(0,2) = cx;
  m_calib_data->depth_intrinsics(1,2) = cy;

  m_calib_data->depth_distortion = Mat1d(1,5);
  m_calib_data->depth_distortion = 0.;
  m_calib_data->zero_depth_distortion = true;

  m_calib_data->R = Mat1d(3,3);
  setIdentity(m_calib_data->R);

  m_calib_data->T = Mat1d(3,1);
  m_calib_data->T = 0.;

  m_calib_data->depth_pose = new Pose3D();
  m_calib_data->depth_pose->setCameraParametersFromOpencv(m_calib_data->depth_intrinsics);

  m_calib_data->rgb_pose = new Pose3D();
  m_calib_data->rgb_pose->toRightCamera(m_calib_data->rgb_intrinsics,
                                        m_calib_data->R,
                                        m_calib_data->T);
}