CalibrationParams CalibrationParams::createCalibrationParams(std::string serial, cv::FileNode node) { cv::Mat camera_matrix (cv::Mat::eye(3, 3, CV_64FC1)); cv::Mat dist_coeffs (5, 1, CV_64FC1, cv::Scalar::all(0)); cv::Mat r_mat (3, 3, CV_64FC1, cv::Scalar::all(0)); cv::Mat t (3, 1, CV_64FC1, cv::Scalar::all(0)); node["camera_matrix"] >> camera_matrix; node["distortion_coefficients"] >> dist_coeffs; node["rotation"] >> r_mat; node["translation"] >> t; float fx = camera_matrix.ptr<double>(0)[0]; float fy = camera_matrix.ptr<double>(0)[4]; float cx = camera_matrix.ptr<double>(0)[2]; float cy = camera_matrix.ptr<double>(0)[5]; Eigen::Matrix4f tmp_mat = Eigen::Matrix4f::Identity(); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { tmp_mat(i, j) = r_mat.at<double>(i, j); } } for (int i = 0; i < 3; i++) { tmp_mat(i, 3) = t.at<double>(i); } return { serial, camera_matrix, dist_coeffs, r_mat, t, tmp_mat.inverse(), fx, fy, cx, cy }; }
void frame(laxion::appwindow::input_batch *input, float deltatime) { if (!s_terrain) { s_terrain = terrain::create(s_terrain_config); } int x0, y0, x1, y1; laxion::appwindow::get_client_rect(window, &x0, &y0, &x1, &y1); kosmos::render::begin(x1-x0, y1-x0, true, true, 0xff00ff); float out[16]; gtime += 0.1f * deltatime; camera_pos[0] = sinf(0.3f * gtime) * 100; camera_pos[2] = -80.0f * gtime + cosf(0.05f * gtime) * 100; camera_pos[1] = terrain::get_terrain_height(s_terrain, camera_pos[0], camera_pos[2]) + 1.0f; camera_matrix(out); glEnable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); draw_world(out); kosmos::render::end(); if (liveupdate) { if (!putki::liveupdate::connected(liveupdate)) { putki::liveupdate::disconnect(liveupdate); liveupdate = 0; } else { putki::liveupdate::update(liveupdate); } } }
int main(int argc, char *argv[]) { auto options = std::unique_ptr<getopt_t, void(*)(getopt_t*)> (getopt_create(), getopt_destroy); // getopt_t *options = getopt_create(); getopt_add_bool(options.get(), 'h', "help", 0, "Show this help"); getopt_add_bool(options.get(), 'd', "debug", 0, "Enable debugging output (slow)"); getopt_add_bool(options.get(), 'w', "window", 1, "Show the detected tags in a window"); getopt_add_bool(options.get(), 'q', "quiet", 0, "Reduce output"); getopt_add_int(options.get(), '\0', "border", "1", "Set tag family border size"); getopt_add_int(options.get(), 't', "threads", "4", "Use this many CPU threads"); getopt_add_double(options.get(), 'x', "decimate", "1.0", "Decimate input image by this factor"); getopt_add_double(options.get(), 'b', "blur", "0.0", "Apply low-pass blur to input"); getopt_add_bool(options.get(), '0', "refine-edges", 1, "Spend more time trying to align edges of tags"); getopt_add_bool(options.get(), '1', "refine-decode", 0, "Spend more time trying to decode tags"); getopt_add_bool(options.get(), '2', "refine-pose", 0, "Spend more time trying to precisely localize tags"); getopt_add_double(options.get(), 's', "size", "0.04047", "Physical side-length of the tag (meters)"); getopt_add_int(options.get(), 'c', "camera", "0", "Camera ID"); getopt_add_int(options.get(), 'i', "tag_id", "-1", "Tag ID (-1 for all tags in family)"); if (!getopt_parse(options.get(), argc, argv, 1) || getopt_get_bool(options.get(), "help")) { printf("Usage: %s [options]\n", argv[0]); getopt_do_usage(options.get()); exit(0); } AprilTagDetector tag_detector(options); auto lcm = std::make_shared<lcm::LCM>(); Eigen::Matrix3d camera_matrix = Eigen::Matrix3d::Identity(); // camera_matrix(0,0) = bot_camtrans_get_focal_length_x(mCamTransLeft); // camera_matrix(1,1) = bot_camtrans_get_focal_length_y(mCamTransLeft); // camera_matrix(0,2) = bot_camtrans_get_principal_x(mCamTransLeft); // camera_matrix(1,2) = bot_camtrans_get_principal_y(mCamTransLeft); camera_matrix(0,0) = 535.04778754; camera_matrix(1,1) = 533.37100256; camera_matrix(0,2) = 302.83654976; camera_matrix(1,2) = 237.69023961; Eigen::Vector4d distortion_coefficients(-7.74010810e-02, -1.97835565e-01, -4.47956948e-03, -5.42361499e-04); // camera matrix: // [[ 535.04778754 0. 302.83654976] // [ 0. 533.37100256 237.69023961] // [ 0. 0. 1. ]] // distortion coefficients: [ -7.74010810e-02 -1.97835565e-01 -4.47956948e-03 -5.42361499e-04 // 9.30985112e-01] cv::VideoCapture capture(getopt_get_int(options.get(), "camera")); if (!capture.isOpened()) { std::cout << "Cannot open the video cam" << std::endl; return -1; } cv::Mat frame; Eigen::Isometry3d tag_to_camera = Eigen::Isometry3d::Identity(); crazyflie_t::webcam_pos_t tag_to_camera_msg; while (capture.read(frame)) { std::vector<TagMatch> tags = tag_detector.detectTags(frame); if (tags.size() > 0) { tag_to_camera = getRelativeTransform(tags[0], camera_matrix, distortion_coefficients, tag_detector.getTagSize()); tag_to_camera_msg = encodeWebcamPos(tag_to_camera); tag_to_camera_msg.frame_id = 1; } else { tag_to_camera_msg = encodeWebcamPos(tag_to_camera); tag_to_camera_msg.frame_id = -1; } tag_to_camera_msg.timestamp = timestamp_now(); lcm->publish("WEBCAM_POS", &tag_to_camera_msg); } return 0; }
void SolvePNP_method(vector <Point3f> points3d, vector <Point2f> imagePoints, MatrixXf &R,Vector3f &t, MatrixXf &xcam_, MatrixXf K, const PointCloud<PointXYZRGB>::Ptr& cloudRGB,//projected data const PointCloud<PointXYZ>::Ptr& cloud_laser_cord,//laser coordinates MatrixXf& points_projected,Mat image, PointCloud<PointXYZ>::ConstPtr cloud) { Mat_<float> camera_matrix (3, 3); camera_matrix(0,0)=K(0,0); camera_matrix(0,1)=K(0,1); camera_matrix(0,2)=K(0,2); camera_matrix(1,0)=K(1,0); camera_matrix(1,1)=K(1,1); camera_matrix(1,2)=K(1,2); camera_matrix(2,0)=K(2,0); camera_matrix(2,1)=K(2,1); camera_matrix(2,2)=K(2,2); vector<double> rv(3), tv(3); Mat rvec(rv),tvec(tv); Mat_<float> dist_coef (5, 1); dist_coef = 0; cout <<camera_matrix<<endl; cout<< imagePoints<<endl; cout<< points3d <<endl; solvePnP( points3d, imagePoints, camera_matrix, dist_coef, rvec, tvec); //////////////////////////////////////////////////7 //convert data //////////////////////////////////////////////////7 double rot[9] = {0}; Mat R_2(3,3,CV_64FC1,rot); //change results only debugging Rodrigues(rvec, R_2); R=Matrix3f::Zero(3,3); t=Vector3f(0,0,0); double* _r = R_2.ptr<double>(); double* _t = tvec.ptr<double>(); t(0)=_t[0]; t(1)=_t[1]; t(2)=_t[2]; R(0,0)=_r[0]; R(0,1)=_r[1]; R(0,2)=_r[2]; R(1,0)=_r[3]; R(1,1)=_r[4]; R(1,2)=_r[5]; R(2,0)=_r[6]; R(2,1)=_r[7]; R(2,2)=_r[8]; points_projected=MatrixXf::Zero(2,cloud->points.size ()); for (int j=0;j<cloud->points.size ();j++){ PointCloud<PointXYZRGB> PointAuxRGB; PointAuxRGB.points.resize(1); Vector3f xw=Vector3f(cloud->points[j].x, cloud->points[j].y, cloud->points[j].z); xw=K*( R*xw+t); xw= xw/xw(2); int col,row; col=(int)xw(0); row=(int)xw(1); points_projected(0,j)=(int)xw(0); points_projected(1,j)=(int)xw(1); int b,r,g; if ((col<0 || row<0) || (col>image.cols || row>image.rows)){ r=0; g=0; b=0; }else{ // b = img->imageData[img->widthStep * row+ col * 3]; // g = img->imageData[img->widthStep * row+ col * 3 + 1]; //r = img->imageData[img->widthStep * row+ col * 3 + 2]; r=image.at<cv::Vec3b>(row,col)[0]; g=image.at<cv::Vec3b>(row,col)[1]; b=image.at<cv::Vec3b>(row,col)[2]; //std::cout << image.at<cv::Vec3b>(row,col)[0] << " " << image.at<cv::Vec3b>(row,col)[1] << " " << image.at<cv::Vec3b>(row,col)[2] << std::endl; } //std::cout << "( "<<r <<","<<g<<","<<b<<")"<<std::endl; uint8_t r_i = r; uint8_t g_i = g; uint8_t b_i = b; uint32_t rgb = ((uint32_t)r << 16 | (uint32_t)g << 8 | (uint32_t)b); //int32_t rgb = (r_i << 16) | (g_i << 8) | b_i; PointAuxRGB.points[0].x=cloud->points[j].x; PointAuxRGB.points[0].y=cloud->points[j].y; PointAuxRGB.points[0].z=cloud->points[j].z; PointAuxRGB.points[0].rgb=*reinterpret_cast<float*>(&rgb); cloudRGB->points.push_back (PointAuxRGB.points[0]); //project point to the image } }