//! [Main function]
int main()
//! [Main function]
{
  //! [Create data structures]
  std::vector< cv::Point3d > wX;
  std::vector< cv::Point2d >  x;
  //! [Create data structures]

  //! [Simulation]
  // Ground truth pose used to generate the data
  cv::Mat ctw_truth = (cv::Mat_<double>(3,1) << -0.1, 0.1, 1.2); // Translation vector
  cv::Mat crw_truth = (cv::Mat_<double>(3,1) << CV_PI/180*(5), CV_PI/180*(0), CV_PI/180*(45)); // Rotation vector
  cv::Mat cRw_truth(3,3,cv::DataType<double>::type); // Rotation matrix
  cv::Rodrigues(crw_truth, cRw_truth);

  // Input data: 3D coordinates of at least 6 non coplanar points
  double L = 0.2;
  wX.push_back( cv::Point3d(  -L, -L, 0  ) ); // wX_0 ( -L, -L, 0  )^T
  wX.push_back( cv::Point3d( 2*L, -L, 0.2) ); // wX_1 (-2L, -L, 0.2)^T
  wX.push_back( cv::Point3d(   L,  L, 0.2) ); // wX_2 (  L,  L, 0.2)^T
  wX.push_back( cv::Point3d(  -L,  L, 0  ) ); // wX_3 ( -L,  L, 0  )^T
  wX.push_back( cv::Point3d(-2*L,  L, 0  ) ); // wX_4 (-2L,  L, 0  )^T
  wX.push_back( cv::Point3d(   0,  0, 0.5) ); // wX_5 (  0,  0, 0.5)^T

  // Input data: 2D coordinates of the points on the image plane
  for(int i = 0; i < wX.size(); i++) {
    cv::Mat cX = cRw_truth*cv::Mat(wX[i]) + ctw_truth; // Update cX, cY, cZ
    x.push_back( cv::Point2d( cX.at<double>(0, 0)/cX.at<double>(2, 0),
                              cX.at<double>(1, 0)/cX.at<double>(2, 0) ) ); // x = (cX/cZ, cY/cZ)
  }
  //! [Simulation]

  //! [Call function]
  cv::Mat ctw(3, 1, CV_64F); // Translation vector
  cv::Mat cRw(3, 3, CV_64F); // Rotation matrix

  pose_dlt(wX, x, ctw, cRw);
  //! [Call function]

  std::cout << "ctw (ground truth):\n" << ctw_truth << std::endl;
  std::cout << "ctw (computed with DLT):\n" << ctw << std::endl;
  std::cout << "cRw (ground truth):\n" << cRw_truth << std::endl;
  std::cout << "cRw (computed with DLT):\n" << cRw << std::endl;

  return 0;
}
void  get_pose_and_image(const  std::vector<AprilTags::TagDetection> &detections,const cv_bridge::CvImagePtr &cv_ptr, const std_msgs::Header &header) {

  cv::Mat image_rgb;
  cv::cvtColor(cv_ptr->image, image_rgb, CV_GRAY2RGB);

  if (detections.size()) {
    std::vector<Point2> pi; // Points in image
    std::vector<Point3> pw; // Points in world
    for (auto it = detections.begin(); it != detections.end(); it++) {
      const int id = it->id;
      const Point2 c2 = Point2(it->cxy.first, it->cxy.second);

      for (int j = 0; j < 4; j++) {
        const Point2 p2 = Point2(it->p[j].first, it->p[j].second);
        pi.push_back(p2);
        Point3 p3(tagsWorld[id].p[j].x, tagsWorld[id].p[j].y, 0.0);
        pw.push_back(p3);

        // Display tag corners
        cv::circle(image_rgb, p2, 6, colors[j], 2);
      }
      // Display tag id
      std::ostringstream ss;
      ss << id;
      auto color = cv::Scalar(0, 255, 255);
      if (tagsWorld.find(id) != tagsWorld.end()) {
        color = cv::Scalar(255,255,0);
      }
      cv::putText(image_rgb, ss.str(), Point2(c2.x - 5, c2.y + 5),
                  cv::FONT_HERSHEY_PLAIN, 2, color, 2);
    }

    // Get pose
    static cv::Mat r = cv::Mat::zeros(cv::Size(1, 3), CV_64F);
    static cv::Mat cTw = cv::Mat::zeros(cv::Size(1, 3), CV_64F);
    cv::Mat wTc(cv::Size(3, 3), CV_64F);
    cv::Mat cRw(cv::Size(3, 3), CV_64F), wRc(cv::Size(3, 3), CV_64F);
    cv::solvePnP(pw, pi, K, D, r, cTw, false);
    cv::Rodrigues(r, cRw);
    wRc = cRw.inv();
    wTc = -wRc * cTw;
    // ROS_INFO("%f, %f, %f", r.at<double>(0,0), r.at<double>(1,0), r.at<double>(2,0));
    cv::Mat q = rodriguesToQuat(r);

    // Publish
    geometry_msgs::PoseStamped pose_cam;
    pose_cam.header.stamp = header.stamp;
    pose_cam.header.frame_id = "0";

    double *pt = wTc.ptr<double>();
    pose_cam.pose.position.x = pt[0];
    pose_cam.pose.position.y = pt[1];
    pose_cam.pose.position.z = pt[2];

    double *pq = q.ptr<double>();
    pose_cam.pose.orientation.w = pq[0];
    pose_cam.pose.orientation.x = pq[1];
    pose_cam.pose.orientation.y = pq[2];
    pose_cam.pose.orientation.z = pq[3];

    pose_pub.publish(pose_cam);

  }

  // Publish image
  cv_bridge::CvImage cv_image(header, sensor_msgs::image_encodings::BGR8,
                              image_rgb);
  image_pub.publish(cv_image.toImageMsg());
  // cv::imshow("image", image_rgb);
  // cv::waitKey(1);
}
void cam_callback(const sensor_msgs::ImageConstPtr &image,
                  const sensor_msgs::CameraInfoConstPtr &cinfo) {
  // Get camera info
  static bool init_cam = false;
  static cv::Mat K = cv::Mat::zeros(cv::Size(3, 3), CV_64F);
  static cv::Mat D = cv::Mat::zeros(cv::Size(1, 5), CV_64F);

  // Stop if camera not calibrated
  if (cinfo->K[0] == 0.0) throw std::runtime_error("Camera not calibrated.");

  // TODO: convert to function later
  // Assign camera info only once
  if (!init_cam) {
    for (int i = 0; i < 3; ++i) {
      double *pk = K.ptr<double>(i);
      for (int j = 0; j < 3; ++j) {
        pk[j] = cinfo->K[3 * i + j];
      }
    }
    double *pd = D.ptr<double>();
    for (int k = 0; k < 5; k++) {
      pd[k] = cinfo->D[k];
    }
    init_cam = true;
  }

  // use cv_bridge and convert to grayscale image
  cv_bridge::CvImagePtr cv_ptr;
  // use toCvCopy because we will modify the image
  cv_ptr = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::MONO8);

  cv::Mat image_rgb;
  cv::cvtColor(cv_ptr->image, image_rgb, CV_GRAY2RGB);

#if defined(BUILD_UMICH)
  // Use apriltag_umich
  // Currently not using this version
  static april_tag_family_t *tf = tag36h11_create();
  static april_tag_detector_t *td = april_tag_detector_create(tf);

  image_u8_t *im = image_u8_create_from_gray(
      cv_ptr->image.cols, cv_ptr->image.rows, cv_ptr->image.data);
  zarray_t *detections = april_tag_detector_detect(td, im);

  ROS_INFO("Tags detected: %d", zarray_size(detections));

  for (size_t i = 0; i < zarray_size(detections); i++) {
    april_tag_detection_t *det;
    zarray_get(detections, i, &det);

    for (int j = 0; j < 4; j++) {
      const Point2 p = Point2(det->p[j][0], det->p[j][1]);
    }
    april_tag_detection_destroy(det);
  }

  zarray_destroy(detections);
  image_u8_destroy(im);

#elif defined(BUILD_MIT)
  // Use apriltag_mit
  static AprilTags::TagDetector tag_detector(AprilTags::tagCodes36h11);
  std::vector<AprilTags::TagDetection> detections =
      tag_detector.extractTags(cv_ptr->image);

  // Check detection size, only do work if there's tag detected
  if (detections.size()) {
    std::vector<Point2> pi;  // Points in image
    std::vector<Point3> pw;  // Points in world
    for (auto it = detections.begin(); it != detections.end(); it++) {
      const int id = it->id;
      const Point2 c2 = Point2(it->cxy.first, it->cxy.second);

      for (int j = 0; j < 4; j++) {
        const Point2 p2 = Point2(it->p[j].first, it->p[j].second);
        pi.push_back(p2);
        Point3 p3(tagsWorld[id].p[j].x, tagsWorld[id].p[j].y, 0.0);
        pw.push_back(p3);

        // Display tag corners
        cv::circle(image_rgb, p2, 6, colors[j], 2);
      }
      // Display tag id
      std::ostringstream ss;
      ss << id;
      auto color = cv::Scalar(0, 255, 255);
      if (tagsWorld.find(id) != tagsWorld.end()) {
        color = cv::Scalar(255, 255, 0);
      }
      cv::putText(image_rgb, ss.str(), Point2(c2.x - 5, c2.y + 5),
                  cv::FONT_HERSHEY_PLAIN, 2, color, 2);
    }

    // Get pose
    static cv::Mat r = cv::Mat::zeros(cv::Size(1, 3), CV_64F);
    static cv::Mat cTw = cv::Mat::zeros(cv::Size(1, 3), CV_64F);
    cv::Mat wTc(cv::Size(3, 3), CV_64F);
    cv::Mat cRw(cv::Size(3, 3), CV_64F), wRc(cv::Size(3, 3), CV_64F);
    cv::solvePnP(pw, pi, K, D, r, cTw, false);
    cv::Rodrigues(r, cRw);
    wRc = cRw.inv();
    wTc = -wRc * cTw;
    // ROS_INFO("%f, %f, %f", r.at<double>(0,0), r.at<double>(1,0),
    // r.at<double>(2,0));
    cv::Mat q = rodriguesToQuat(r);

    // Publish
    geometry_msgs::PoseStamped pose_cam;
    pose_cam.header.stamp = image->header.stamp;
    pose_cam.header.frame_id = "0";

    double *pt = wTc.ptr<double>();
    pose_cam.pose.position.x = pt[0];
    pose_cam.pose.position.y = pt[1];
    pose_cam.pose.position.z = pt[2];

    double *pq = q.ptr<double>();
    pose_cam.pose.orientation.w = pq[0];
    pose_cam.pose.orientation.x = pq[1];
    pose_cam.pose.orientation.y = pq[2];
    pose_cam.pose.orientation.z = pq[3];

    pose_pub.publish(pose_cam);
  }
#endif

  // Publish image
  cv_bridge::CvImage cv_image(image->header, sensor_msgs::image_encodings::BGR8,
                              image_rgb);
  image_pub.publish(cv_image.toImageMsg());
  // cv::imshow("image", image_rgb);
  // cv::waitKey(1);
}