void videocallback(IplImage *image) { static IplImage *rgba; bool flip_image = (image->origin?true:false); if (flip_image) { cvFlip(image); image->origin = !image->origin; } if (init) { init = false; cout<<"Loading calibration: "<<calibrationFilename.str(); if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) { cout<<" [Ok]"<<endl; } else { cam.SetRes(image->width, image->height); cout<<" [Fail]"<<endl; } double p[16]; cam.GetOpenglProjectionMatrix(p,image->width,image->height); GlutViewer::SetGlProjectionMatrix(p); for (int i=0; i<32; i++) { d[i].SetScale(marker_size); } rgba = CvTestbed::Instance().CreateImageWithProto("RGBA", image, 0, 4); } static MarkerDetector<MarkerData> marker_detector; marker_detector.SetMarkerSize(marker_size); // for marker ids larger than 255, set the content resolution accordingly //static MarkerDetector<MarkerArtoolkit> marker_detector; //marker_detector.SetMarkerSize(2.8, 3, 1.5); // Here we try to use RGBA just to make sure that also it works... //cvCvtColor(image, rgba, CV_RGB2RGBA); marker_detector.Detect(image, &cam, true, true); GlutViewer::DrawableClear(); for (size_t i=0; i<marker_detector.markers->size(); i++) { if (i >= 32) break; Pose p = (*(marker_detector.markers))[i].pose; p.GetMatrixGL(d[i].gl_mat); int id = (*(marker_detector.markers))[i].GetId(); double r = 1.0 - double(id+1)/32.0; double g = 1.0 - double(id*3%32+1)/32.0; double b = 1.0 - double(id*7%32+1)/32.0; d[i].SetColor(r, g, b); GlutViewer::DrawableAdd(&(d[i])); } if (flip_image) { cvFlip(image); image->origin = !image->origin; } }
int main(int, char **) { VideoCapture cap(0); static MarkerDetector<MarkerData> marker_detector; marker_detector.SetMarkerSize(15); // for marker ids larger than 255, set the content resolution accordingly if (cap.isOpened()) { frame; namedWindow("portal"); while(true) { cap >> frame; IplImage image = frame; marker_detector.Detect(&image, &cam, true, true); if (marker_detector.markers->size() > 0) cout << "Detected " << marker_detector.markers->size() << endl; for (size_t i=0; i<marker_detector.markers->size(); i++) { if (i >= 32) break; Pose p = (*(marker_detector.markers))[i].pose; vector<PointDouble> corners = (*(marker_detector.markers))[i].marker_corners_img; //cout << "corners size(4) == " << corners.size() << endl; /* line(frame, bestSquares[i][0], bestSquares[i][1], Scalar(0,255,0), 2); line(frame, bestSquares[i][1], bestSquares[i][2], Scalar(0,255,0), 2); line(frame, bestSquares[i][2], bestSquares[i][3], Scalar(0,255,0), 2); line(frame, bestSquares[i][3], bestSquares[i][0], Scalar(0,255,0), 2);*/ } imshow("portal", frame); if(waitKey(30) >= 0) break; } }
int main(int argc, char *argv[]) { ros::init (argc, argv, "marker_detect"); ros::NodeHandle n; if(argc < 7){ std::cout << std::endl; cout << "Not enough arguments provided." << endl; cout << "Usage: ./individualMarkers <marker size in cm> <max new marker error> <max track error> <cam image topic> <cam info topic> <output frame>" << endl; std::cout << std::endl; return 0; } // Get params from command line marker_size = atof(argv[1]); max_new_marker_error = atof(argv[2]); max_track_error = atof(argv[3]); cam_image_topic = argv[4]; cam_info_topic = argv[5]; output_frame = argv[6]; marker_detector.SetMarkerSize(marker_size); cam = new Camera(n, cam_info_topic); tf_listener = new tf::TransformListener(n); tf_broadcaster = new tf::TransformBroadcaster(); arMarkerPub_ = n.advertise < ar_track_alvar::AlvarMarkers > ("ar_pose_marker", 0); rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); //Give tf a chance to catch up before the camera callback starts asking for transforms ros::Duration(1.0).sleep(); ros::spinOnce(); ROS_INFO ("Subscribing to image topic"); image_transport::ImageTransport it_(n); cam_sub_ = it_.subscribe (cam_image_topic, 1, &getCapCallback); ros::spin (); return 0; }
int main(int argc, char *argv[]) { ros::init (argc, argv, "marker_detect"); ros::NodeHandle n; if(argc < 8){ std::cout << std::endl; cout << "Not enough arguments provided." << endl; cout << "Usage: ./findMarkerBundles <marker size in cm> <max new marker error> <max track error> <cam image topic> <cam info topic> <output frame> <list of bundle XML files...>" << endl; std::cout << std::endl; return 0; } // Get params from command line marker_size = atof(argv[1]); max_new_marker_error = atof(argv[2]); max_track_error = atof(argv[3]); cam_image_topic = argv[4]; cam_info_topic = argv[5]; output_frame = argv[6]; int n_args_before_list = 7; n_bundles = argc - n_args_before_list; marker_detector.SetMarkerSize(marker_size); multi_marker_bundles = new MultiMarkerBundle*[n_bundles]; bundlePoses = new Pose[n_bundles]; master_id = new int[n_bundles]; bundle_indices = new std::vector<int>[n_bundles]; bundles_seen = new bool[n_bundles]; // Load the marker bundle XML files for(int i=0; i<n_bundles; i++){ bundlePoses[i].Reset(); MultiMarker loadHelper; if(loadHelper.Load(argv[i + n_args_before_list], FILE_FORMAT_XML)){ vector<int> id_vector = loadHelper.getIndices(); multi_marker_bundles[i] = new MultiMarkerBundle(id_vector); multi_marker_bundles[i]->Load(argv[i + n_args_before_list], FILE_FORMAT_XML); master_id[i] = multi_marker_bundles[i]->getMasterId(); bundle_indices[i] = multi_marker_bundles[i]->getIndices(); } else{ cout<<"Cannot load file "<< argv[i + n_args_before_list] << endl; return 0; } } // Set up camera, listeners, and broadcasters cam = new Camera(n, cam_info_topic); tf_listener = new tf::TransformListener(n); tf_broadcaster = new tf::TransformBroadcaster(); arMarkerPub_ = n.advertise < ar_track_alvar::AlvarMarkers > ("ar_pose_marker", 0); rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); //Give tf a chance to catch up before the camera callback starts asking for transforms ros::Duration(1.0).sleep(); ros::spinOnce(); //Subscribe to topics and set up callbacks ROS_INFO ("Subscribing to image topic"); image_transport::ImageTransport it_(n); cam_sub_ = it_.subscribe (cam_image_topic, 1, &getCapCallback); ros::spin(); return 0; }
double GetMultiMarkerPose(IplImage *image, Pose &pose) { static bool init=true; if (init) { init=false; vector<int> id_vector; for(int i = 0; i < nof_markers; ++i) id_vector.push_back(i); // We make the initialization for MultiMarkerBundle using MultiMarkerInitializer // Each marker needs to be visible in at least two images and at most 64 image are used. multi_marker_init = new MultiMarkerInitializer(id_vector, 2, 64); pose.Reset(); multi_marker_init->PointCloudAdd(id_vector[0], marker_size, pose); multi_marker_bundle = new MultiMarkerBundle(id_vector); marker_detector.SetMarkerSize(marker_size); } double error = -1; if (!optimize_done) { if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) { error = multi_marker_init->Update(marker_detector.markers, cam, pose); } } else { if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) { error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); if ((multi_marker_bundle->SetTrackMarkers(marker_detector, cam, pose, image) > 0) && (marker_detector.DetectAdditional(image, cam, false) > 0)) { error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); } } } if (add_measurement) { cout << "Markers seen: " << marker_detector.markers->size() << "\n"; if (marker_detector.markers->size() >= 2) { cout<<"Adding measurement..."<<endl; multi_marker_init->MeasurementsAdd(marker_detector.markers); } else{ cout << "Not enough markers to capture measurement\n"; } add_measurement=false; } if (optimize) { cout<<"Initializing..."<<endl; if (!multi_marker_init->Initialize(cam)) { cout<<"Initialization failed, add some more measurements."<<endl; } else { // Reset the bundle adjuster. multi_marker_bundle->Reset(); multi_marker_bundle->MeasurementsReset(); // Copy all measurements into the bundle adjuster. for (int i = 0; i < multi_marker_init->getMeasurementCount(); ++i) { Pose p2; multi_marker_init->getMeasurementPose(i, cam, p2); const std::vector<MultiMarkerInitializer::MarkerMeasurement, Eigen::aligned_allocator<MultiMarkerInitializer::MarkerMeasurement> > markers = multi_marker_init->getMeasurementMarkers(i); multi_marker_bundle->MeasurementsAdd(&markers, p2); } // Initialize the bundle adjuster with initial marker poses. multi_marker_bundle->PointCloudCopy(multi_marker_init); cout<<"Optimizing..."<<endl; if (multi_marker_bundle->Optimize(cam, 0.01, 20)) { cout<<"Optimizing done"<<endl; optimize_done=true; } else { cout<<"Optimizing FAILED!"<<endl; } } optimize=false; } return error; }
int main(int argc, char *argv[]) { ros::init (argc, argv, "marker_detect"); ros::NodeHandle n; if(argc < 8){ std::cout << std::endl; cout << "Not enough arguments provided." << endl; cout << "Usage: ./trainMarkerBundle <num of markers> <marker size in cm> <max new marker error> <max track error> <cam image topic> <cam info topic> <output frame>" << endl; std::cout << std::endl; return 0; } // Get params from command line nof_markers = atoi(argv[1]); marker_size = atof(argv[2]); max_new_marker_error = atof(argv[3]); max_track_error = atof(argv[4]); cam_image_topic = argv[5]; cam_info_topic = argv[6]; output_frame = argv[7]; marker_detector.SetMarkerSize(marker_size); cam = new Camera(n, cam_info_topic); tf_listener = new tf::TransformListener(n); tf_broadcaster = new tf::TransformBroadcaster(); arMarkerPub_ = n.advertise < ar_track_alvar_msgs::AlvarMarkers > ("ar_pose_marker", 0); rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); //Give tf a chance to catch up before the camera callback starts asking for transforms ros::Duration(1.0).sleep(); ros::spinOnce(); //Subscribe to camera message ROS_INFO ("Subscribing to image topic"); image_transport::ImageTransport it_(n); cam_sub_ = it_.subscribe (cam_image_topic, 1, &getCapCallback); // Output usage message std::cout << std::endl; std::cout << "Keyboard Shortcuts:" << std::endl; std::cout << " l: load marker configuration from mmarker.txt" << std::endl; std::cout << " s: save marker configuration to mmarker.txt" << std::endl; std::cout << " r: reset marker configuration" << std::endl; std::cout << " p: add measurement" << std::endl; std::cout << " a: auto add measurements (captures once a second for 5 seconds)" << std::endl; std::cout << " o: optimize bundle" << std::endl; std::cout << " q: quit" << std::endl; std::cout << std::endl; std::cout << "Please type commands with the openCV window selected" << std::endl; std::cout << std::endl; cvNamedWindow("Command input window", CV_WINDOW_AUTOSIZE); while(1){ int key = cvWaitKey(20); if(key >= 0) keyProcess(key); ros::spinOnce(); } return 0; }
int main(int argc, char *argv[]) { ros::init (argc, argv, "marker_detect"); ros::NodeHandle n, pn("~"); if(argc < 7){ std::cout << std::endl; cout << "Not enough arguments provided." << endl; cout << "Usage: ./individualMarkersNoKinect <marker size in cm> <max new marker error> " << "<max track error> <cam image topic> <cam info topic> <output frame> [ <max frequency> ]"; std::cout << std::endl; return 0; } // Get params from command line marker_size = atof(argv[1]); max_new_marker_error = atof(argv[2]); max_track_error = atof(argv[3]); cam_image_topic = argv[4]; cam_info_topic = argv[5]; output_frame = argv[6]; marker_detector.SetMarkerSize(marker_size); if (argc > 7) max_frequency = atof(argv[7]); // Set dynamically configurable parameters so they don't get replaced by default values pn.setParam("marker_size", marker_size); pn.setParam("max_new_marker_error", max_new_marker_error); pn.setParam("max_track_error", max_track_error); if (argc > 7) pn.setParam("max_frequency", max_frequency); cam = new Camera(n, cam_info_topic); tf_listener = new tf::TransformListener(n); tf_broadcaster = new tf::TransformBroadcaster(); arMarkerPub_ = n.advertise < ar_track_alvar_msgs::AlvarMarkers > ("ar_pose_marker", 0); rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); // Prepare dynamic reconfiguration dynamic_reconfigure::Server < ar_track_alvar::ParamsConfig > server; dynamic_reconfigure::Server<ar_track_alvar::ParamsConfig>::CallbackType f; f = boost::bind(&configCallback, _1, _2); server.setCallback(f); //Give tf a chance to catch up before the camera callback starts asking for transforms // It will also reconfigure parameters for the first time, setting the default values ros::Duration(1.0).sleep(); ros::spinOnce(); image_transport::ImageTransport it_(n); if (enabled == true) { // This always happens, as enable is true by default ROS_INFO("Subscribing to image topic"); cam_sub_ = it_.subscribe (cam_image_topic, 1, &getCapCallback); } // Run at the configured rate, discarding pointcloud msgs if necessary ros::Rate rate(max_frequency); while (ros::ok()) { ros::spinOnce(); rate.sleep(); if (std::abs((rate.expectedCycleTime() - ros::Duration(1.0 / max_frequency)).toSec()) > 0.001) { // Change rate dynamically; if must be above 0, as 0 will provoke a segfault on next spinOnce ROS_DEBUG("Changing frequency from %.2f to %.2f", 1.0 / rate.expectedCycleTime().toSec(), max_frequency); rate = ros::Rate(max_frequency); } if (enableSwitched == true) { // Enable/disable switch: subscribe/unsubscribe to make use of pointcloud processing nodelet // lazy publishing policy; in CPU-scarce computer as TurtleBot's laptop this is a huge saving if (enabled == false) cam_sub_.shutdown(); else cam_sub_ = it_.subscribe(cam_image_topic, 1, &getCapCallback); enableSwitched = false; } } return 0; }
void videocallback(IplImage *image) { static Camera cam; Pose pose; bool flip_image = (image->origin?true:false); if (flip_image) { cvFlip(image); image->origin = !image->origin; } static bool init = true; if (init) { init = false; // Initialize camera cout<<"Loading calibration: "<<calibrationFilename.str(); if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) { cout<<" [Ok]"<<endl; } else { cam.SetRes(image->width, image->height); cout<<" [Fail]"<<endl; } vector<int> id_vector; for(int i = 0; i < nof_markers; ++i) id_vector.push_back(i); // We make the initialization for MultiMarkerBundle using a fixed marker field (can be printed from ALVAR.pdf) marker_detector.SetMarkerSize(marker_size); marker_detector.SetMarkerSizeForId(0, marker_size*2); multi_marker = new MultiMarker(id_vector); pose.Reset(); multi_marker->PointCloudAdd(0, marker_size*2, pose); pose.SetTranslation(-marker_size*2.5, +marker_size*1.5, 0); multi_marker->PointCloudAdd(1, marker_size, pose); pose.SetTranslation(+marker_size*2.5, +marker_size*1.5, 0); multi_marker->PointCloudAdd(2, marker_size, pose); pose.SetTranslation(-marker_size*2.5, -marker_size*1.5, 0); multi_marker->PointCloudAdd(3, marker_size, pose); pose.SetTranslation(+marker_size*2.5, -marker_size*1.5, 0); multi_marker->PointCloudAdd(4, marker_size, pose); } double error=-1; if (marker_detector.Detect(image, &cam, true, (visualize == 1), 0.0)) { if (detect_additional) { error = multi_marker->Update(marker_detector.markers, &cam, pose); multi_marker->SetTrackMarkers(marker_detector, &cam, pose, visualize ? image : NULL); marker_detector.DetectAdditional(image, &cam, (visualize == 1)); } if (visualize == 2) error = multi_marker->Update(marker_detector.markers, &cam, pose, image); else error = multi_marker->Update(marker_detector.markers, &cam, pose); } static Marker foo; foo.SetMarkerSize(marker_size*4); if ((error >= 0) && (error < 5)) { foo.pose = pose; } foo.Visualize(image, &cam); if (flip_image) { cvFlip(image); image->origin = !image->origin; } }