void cvTackBarEvents(int pos,void*)
{
    if (iThresParam1<3) iThresParam1=3;
    if (iThresParam1%2!=1) iThresParam1++;
    if (ThresParam2<1) ThresParam2=1;
    ThresParam1=iThresParam1;
    ThresParam2=iThresParam2;
    MDetector.setThresholdParams(ThresParam1,ThresParam2);
//recompute
    MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters);
    TheInputImage.copyTo(TheInputImageCopy);
    for (unsigned int i=0;i<TheMarkers.size();i++)	TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
    //print other rectangles that contains no valid markers
    /*for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
        aruco::Marker m( MDetector.getCandidates()[i],999);
        m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
    }*/

//draw a 3d cube in each marker if there is 3d info
    if (TheCameraParameters.isValid())
        for (unsigned int i=0;i<TheMarkers.size();i++)
            CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);

    cv::imshow("in",TheInputImageCopy);
    cv::imshow("thres",MDetector.getThresholdedImage());
}
JNIEXPORT void JNICALL
Java_cz_email_michalchomo_cardboardkeyboard_MainActivity_FindFeatures(JNIEnv *env, jobject instance,
                                                                      jlong matAddrGr,
                                                                      jlong matAddrRgba) {

    Mat &mGr = *(Mat *) matAddrGr;
    Mat &mRgb = *(Mat *) matAddrRgba;
    //vector<KeyPoint> v;
    MarkerDetector markerDetector;
    vector<Marker> markers;

    markerDetector.detect(mGr, markers);
    for (unsigned int i=0; i < markers.size(); i++) {
        cout << markers[i] << endl;
        markers[i].draw(mRgb,Scalar(0,0,255),2);
    }

    /*Ptr<FeatureDetector> detector = FastFeatureDetector::create(50);
    detector->detect(mGr, v);
    for (unsigned int i = 0; i < v.size(); i++) {
        const KeyPoint &kp = v[i];
        circle(mRgb, Point(kp.pt.x, kp.pt.y), 10, Scalar(255, 0, 0, 255));
    }*/

}
void cvTackBarEvents(int pos, void*)
{
    (void)(pos);
    if (iThresParam1 < 3)
        iThresParam1 = 3;
    if (iThresParam1 % 2 != 1)
        iThresParam1++;
    if (ThresParam2 < 1)
        ThresParam2 = 1;
    ThresParam1 = iThresParam1;
    ThresParam2 = iThresParam2;
    TheMarkerDetector.setThresholdParams(ThresParam1, ThresParam2);

    // detect, print, get pose, and print

    // detect
    vector<aruco::Marker> detected_markers = TheMarkerDetector.detect(TheInputImage);
    // print the markers detected that belongs to the markerset
    for (auto idx : TheMarkerMapConfig.getIndices(detected_markers))
        detected_markers[idx].draw(TheInputImageCopy, Scalar(0, 0, 255), 2);
    // detect 3d info if possible
    if (TheMSPoseTracker.isValid())
    {
        TheMSPoseTracker.estimatePose(detected_markers);
        aruco::CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheCameraParameters, TheMSPoseTracker.getRvec(),
                                          TheMSPoseTracker.getTvec(), TheMarkerMapConfig[0].getMarkerSize() * 2);
    }

    cv::imshow("in", TheInputImageCopy);
    cv::imshow("thres", TheMarkerDetector.getThresholdedImage());
}
Exemple #4
0
int main(int argc,char **argv)
{
    try
    {
        if (argc!=2) {
            cerr<<"Usage: in.jpg "<<endl;
            return -1;
        }
        MarkerDetector MDetector;
        vector<Marker> Markers;
        //read the input image
        cv::Mat InImage;
        InImage=cv::imread(argv[1]);
        //Ok, let's detect
        MDetector.detect(InImage,Markers);
        //for each marker, draw info and its boundaries in the image
        for (unsigned int i=0; i<Markers.size(); i++) {
            cout<<Markers[i]<<endl;
            cout<<Markers[0][0].x<<endl;
            cout<<Markers[0].Tvec.at<float>(0,0)<<endl;  //x value of Tvec and Markers[0].Tvec then whole Tvec will be printed
            Markers[i].draw(InImage,Scalar(0,0,255),2);
        }
        cv::imshow("in",InImage);
        cv::waitKey(0);//wait for key to be pressed
    } catch (std::exception &ex)
    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

    return 0;
}
JNIEXPORT jboolean JNICALL Java_wrapper_MarkerDetector_Jwarp(JNIEnv * env, jobject obj, jobject inAddr, jobject outAddr, jdouble width, jdouble height, jobject points){
    MarkerDetector *inst = getHandle<MarkerDetector>(env, obj);
    cv::Mat* in = (cv::Mat*)inAddr;
    cv::Mat* out = (cv::Mat*)outAddr;
    Size size = Size((double)width,  (double)height);
    return inst->warp(in, out, size, points);
}
int main(int argc,char **argv)
{
	try
	{
		if(argc<3) {cerr<<"Usage: image  boardConfig.yml [cameraParams.yml] [markerSize]  [outImage]"<<endl;exit(0);}
		aruco::CameraParameters CamParam;
		MarkerDetector MDetector;
		vector<aruco::Marker> Markers;
		float MarkerSize=-1;
		BoardConfiguration TheBoardConfig;
		BoardDetector TheBoardDetector;
		Board TheBoardDetected;
		
		cv::Mat InImage=cv::imread(argv[1]);
		TheBoardConfig.readFromFile(argv[2]);
		if (argc>=4) {
		  CamParam.readFromXMLFile(argv[3]);
		  //resizes the parameters to fit the size of the input image
		  CamParam.resize( InImage.size());
		}

		if (argc>=5) 
		  MarkerSize=atof(argv[4]);
		
		cv::namedWindow("in",1);
		MDetector.detect(InImage,Markers);//detect markers without computing R and T information
		//Detection of the board
		float probDetect=TheBoardDetector.detect( Markers, TheBoardConfig,TheBoardDetected, CamParam,MarkerSize);
		
		//for each marker, draw info and its boundaries in the image
		for(unsigned int i=0;i<Markers.size();i++){
			cout<<Markers[i]<<endl;
			Markers[i].draw(InImage,Scalar(0,0,255),2);
		}

		//draw a 3d cube in each marker if there is 3d info
		if (  CamParam.isValid()){
 		  for(unsigned int i=0;i<Markers.size();i++){
 		    CvDrawingUtils::draw3dCube(InImage,Markers[i],CamParam);
 		    CvDrawingUtils::draw3dAxis(InImage,Markers[i],CamParam);
 		  }
		  CvDrawingUtils::draw3dAxis(InImage,TheBoardDetected,CamParam);
		  cout<<TheBoardDetected.Rvec<<" "<<TheBoardDetected.Tvec<<endl;
		}
		//draw board axis
		
		//show input with augmented information
		cv::imshow("in",InImage);
		cv::waitKey(0);//wait for key to be pressed
		if(argc>=6) cv::imwrite(argv[5],InImage);
		
	}catch(std::exception &ex)

	{
		cout<<"Exception :"<<ex.what()<<endl;
	}

}
Exemple #7
0
void videocallback(IplImage *image)
{
    static IplImage *rgba;
    bool flip_image = (image->origin?true:false);
    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }

    if (init) {
        init = false;
        cout<<"Loading calibration: "<<calibrationFilename.str();
        if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) {
            cout<<" [Ok]"<<endl;
        } else {
            cam.SetRes(image->width, image->height);
            cout<<" [Fail]"<<endl;
        }
        double p[16];
        cam.GetOpenglProjectionMatrix(p,image->width,image->height);
        GlutViewer::SetGlProjectionMatrix(p);
        for (int i=0; i<32; i++) {
            d[i].SetScale(marker_size);
        }
        rgba = CvTestbed::Instance().CreateImageWithProto("RGBA", image, 0, 4);
    }
    static MarkerDetector<MarkerData> marker_detector;
    marker_detector.SetMarkerSize(marker_size); // for marker ids larger than 255, set the content resolution accordingly
    //static MarkerDetector<MarkerArtoolkit> marker_detector;
    //marker_detector.SetMarkerSize(2.8, 3, 1.5);

    // Here we try to use RGBA just to make sure that also it works...
    //cvCvtColor(image, rgba, CV_RGB2RGBA);
    marker_detector.Detect(image, &cam, true, true);
    GlutViewer::DrawableClear();
    for (size_t i=0; i<marker_detector.markers->size(); i++) {
        if (i >= 32) break;
        
        Pose p = (*(marker_detector.markers))[i].pose;
        p.GetMatrixGL(d[i].gl_mat);

        int id = (*(marker_detector.markers))[i].GetId();
        double r = 1.0 - double(id+1)/32.0;
        double g = 1.0 - double(id*3%32+1)/32.0;
        double b = 1.0 - double(id*7%32+1)/32.0;
        d[i].SetColor(r, g, b);

        GlutViewer::DrawableAdd(&(d[i]));
    }

    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }
}
// Updates the bundlePoses of the multi_marker_bundles by detecting markers and using all markers in a bundle to infer the master tag's position
void GetMultiMarkerPoses(IplImage *image) {

  if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)){
    for(int i=0; i<n_bundles; i++)
      multi_marker_bundles[i]->Update(marker_detector.markers, cam, bundlePoses[i]);
    
    if(marker_detector.DetectAdditional(image, cam, false) > 0){
      for(int i=0; i<n_bundles; i++){
	if ((multi_marker_bundles[i]->SetTrackMarkers(marker_detector, cam, bundlePoses[i], image) > 0))
	  multi_marker_bundles[i]->Update(marker_detector.markers, cam, bundlePoses[i]);
      }
    }
  }
}
Exemple #9
0
//This function is called everytime a new image is published
void imageCallback(const sensor_msgs::ImageConstPtr& original_image)
{
    //Convert from the ROS image message to a CvImage suitable for working with OpenCV for processing
    cv_bridge::CvImageConstPtr cv_ptr;
    try
    {
        //Always copy, returning a mutable CvImage
        //OpenCV expects color images to use BGR channel order.
        //cv_ptr = cv_bridge::toCvShare(original_image, "");
        cv_ptr = cv_bridge::toCvCopy(original_image, enc::RGB8);
    }
    catch (cv_bridge::Exception& e)
    {
        //if there is an error during conversion, display it
        ROS_ERROR("main.cpp::cv_bridge exception: %s", e.what());
        return;
    }
    
    try
    {
        MarkerDetector MDetector;
        vector<Marker> Markers;
        //read the input image
        cv::Mat InImage;
        InImage = cv_ptr->image;
		//Ok, let's detect
        MDetector.detect(InImage,Markers);
        //for each marker, draw info and its boundaries in the image
        for (unsigned int i=0;i<Markers.size();i++) {
            //cout<<Markers[i]<<endl;
            Markers[i].draw(InImage,Scalar(0,0,255),2);
        }
        cv::imshow(WINDOW,InImage);
        //Add some delay in milliseconds
        cv::waitKey(3);
    } catch (std::exception &ex)
    {
        ROS_ERROR("main.cpp::aruco exception: %s", ex.what());
    }
    /**
    * The publish() function is how you send messages. The parameter
    * is the message object. The type of this object must agree with the type
    * given as a template parameter to the advertise<>() call, as was done
    * in the constructor in main().
    */
    //Convert the CvImage to a ROS image message and publish it on the "camera/image_processed" topic.
    pub.publish(cv_ptr->toImageMsg());
 }
void vIdle() {
    if (TheCaptureFlag) {
        // capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve(TheInputImage);
        TheUndInputImage.create(TheInputImage.size(), CV_8UC3);
        // by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage, TheInputImage, CV_BGR2RGB);
        // remove distorion in image
        cv::undistort(TheInputImage, TheUndInputImage, TheCameraParams.CameraMatrix,
                      TheCameraParams.Distorsion);
        // detect markers
        MDetector.detect(TheUndInputImage, TheMarkers);
        // Detection of the board
        TheBoardDetected.second = TheBoardDetector.detect(
            TheMarkers, TheBoardConfig, TheBoardDetected.first, TheCameraParams, TheMarkerSize);
        // chekc the speed by calculating the mean speed of all iterations
        // resize the image to the size of the GL window
        cv::resize(TheUndInputImage, TheResizedImage, TheGlWindowSize);
        // create mask. It is a syntetic mask consisting of a simple rectangle, just to show a example of
        // opengl with mask
        TheMask = createSyntheticMask(TheResizedImage); // lets create with the same size of the resized
                                                        // image, i.e. the size of the opengl window
    }
    glutPostRedisplay();
}
Exemple #11
0
JNIEXPORT void JNICALL Java_com_polysfactory_n3_jni_NativeMarkerDetector_nativeFindMarkers(
		JNIEnv * jenv, jobject, jlong thiz, jlong imageBgra, jlong transMat, jfloat scale) {
	try {
		MarkerDetector* markerDetector = (MarkerDetector*) thiz;
		Mat* image = (Mat*) imageBgra;
		Mat* transMatObj = (Mat*) transMat;
		markerDetector->processFrame(*image, (float) scale);
		std::vector<Transformation> transformations =
				markerDetector->getTransformations();
		vector_Transformation_to_Mat(transformations, *transMatObj);
	} catch (...) {
		LOGD("nativeDetect caught unknown exception");
		jclass je = jenv->FindClass("java/lang/Exception");
		jenv->ThrowNew(je,
				"Unknown exception in JNI code {highgui::VideoCapture_n_1VideoCapture__()}");
	}
}
int main(int, char **)
{
  VideoCapture cap(0);
  static MarkerDetector<MarkerData> marker_detector;
  marker_detector.SetMarkerSize(15); // for marker ids larger than 255, set the content resolution accordingly

      
  if (cap.isOpened())
  {
    frame;
    namedWindow("portal");
    while(true)
    {
      cap >> frame;
      
      
      IplImage image = frame;
      marker_detector.Detect(&image, &cam, true, true);
      if (marker_detector.markers->size() > 0)
	cout << "Detected " << marker_detector.markers->size() << endl;
      
      for (size_t i=0; i<marker_detector.markers->size(); i++)
      {
        if (i >= 32) break;
        
        Pose p = (*(marker_detector.markers))[i].pose;
	
	vector<PointDouble> corners = (*(marker_detector.markers))[i].marker_corners_img;
	
	//cout << "corners size(4) == " << corners.size() << endl;
	/*
	line(frame, bestSquares[i][0], bestSquares[i][1], Scalar(0,255,0), 2);
	line(frame, bestSquares[i][1], bestSquares[i][2], Scalar(0,255,0), 2);
	line(frame, bestSquares[i][2], bestSquares[i][3], Scalar(0,255,0), 2);
	line(frame, bestSquares[i][3], bestSquares[i][0], Scalar(0,255,0), 2);*/
      }
      
      imshow("portal", frame);
      
      
      if(waitKey(30) >= 0) break;
    }
  }
Exemple #13
0
void cvTackBarEvents(int pos, void*)
{
    (void)(pos);
    if (iThresParam1 < 3)
        iThresParam1 = 3;
    if (iThresParam1 % 2 != 1)
        iThresParam1++;
    if (iThresParam1 < 1)
        iThresParam1 = 1;
    MDetector.setThresholdParams(iThresParam1, iThresParam2);

    if (iEnclosedMarkers){
        auto params=MDetector.getParams();
        params._doErosion=true;
         params._cornerMethod=aruco::MarkerDetector::SUBPIX;
        MDetector.setParams(params);
    }
    else{
        auto params=MDetector.getParams();
        params._doErosion=false;
         params._cornerMethod=aruco::MarkerDetector::LINES;
        MDetector.setParams(params);
    }

    MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. );  // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)

    // recompute
    MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters);
    TheInputImage.copyTo(TheInputImageCopy);
    if (iShowAllCandidates){
        auto candidates=MDetector.getCandidates();
        for(auto cand:candidates)
            Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
    }

    for (unsigned int i = 0; i < TheMarkers.size(); i++)
        TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));

    // draw a 3d cube in each marker if there is 3d info
    if (TheCameraParameters.isValid())
        for (unsigned int i = 0; i < TheMarkers.size(); i++)
            CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);

    cv::imshow("in", resize(TheInputImageCopy, 1280));
    cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));
}
JNIEXPORT void JNICALL Java_wrapper_MarkerDetector_Jdetect(JNIEnv * env, jobject obj, jlong img, jobject markers, jobject camParams, jfloat markerSizeMeters){
    MarkerDetector *inst = getHandle<MarkerDetector>(env, obj);
        
    // use the Array list
    jclass ArrayList_class = env->FindClass( "java/util/ArrayList" );
    
    jclass Marker_class = env->FindClass("wrapper/Marker");
    
    jmethodID Add_method = env->GetMethodID(ArrayList_class,"add", "(Ljava/lang/Object;)Z");
    jmethodID Get_method = env->GetMethodID(ArrayList_class,"get", "(I)Ljava/lang/Object;");
    
    jmethodID Marker_Constructor_method = env->GetMethodID(Marker_class,"<init>","(J)V");
    jmethodID Marker_CopyConstructor_method = env->GetMethodID(Marker_class,"<init>","(Lwrapper/Marker;)V");
    jmethodID Marker_EmptyConstructor_method = env->GetMethodID(Marker_class,"<init>","()V");
    
    jmethodID Marker_SetNativeHandle_method = env->GetMethodID(Marker_class,"setNativeHandle","(J)V");

    cv::Mat* inMat = (cv::Mat*)img;
    aruco::CameraParameters *camParam = getHandle<CameraParameters>(env, camParams);
    
    std::vector <Marker> vMarkers;

    inst->detect(*inMat, vMarkers, *camParam, markerSizeMeters);
    
    jobject newMarkers[vMarkers.size()];
    
    for (int i=0; i< vMarkers.size(); i++) {   
        Marker * markPtr = &vMarkers[i];
        newMarkers[i] = env->NewObject(Marker_class,Marker_Constructor_method,(long) markPtr);
        env->CallBooleanMethod(markers,Add_method,newMarkers[i]);
        env->DeleteLocalRef(newMarkers[i]);
        }
    
    env->DeleteLocalRef(ArrayList_class);
    env->DeleteLocalRef(Marker_class);
}
void GetMarkerPoses(IplImage *image, ARCloud &cloud) {

  //Detect and track the markers
  if (marker_detector.Detect(image, cam, true, false, max_new_marker_error,
			     max_track_error, CVSEQ, true)) 
    {
      printf("\n--------------------------\n\n");
      for (size_t i=0; i<marker_detector.markers->size(); i++)
     	{
	  vector<cv::Point, Eigen::aligned_allocator<cv::Point> > pixels;
	  Marker *m = &((*marker_detector.markers)[i]);
	  int id = m->GetId();
	  cout << "******* ID: " << id << endl;

	  int resol = m->GetRes();
	  int ori = m->ros_orientation;
      
	  PointDouble pt1, pt2, pt3, pt4;
	  pt4 = m->ros_marker_points_img[0];
	  pt3 = m->ros_marker_points_img[resol-1];
	  pt1 = m->ros_marker_points_img[(resol*resol)-resol];
	  pt2 = m->ros_marker_points_img[(resol*resol)-1];
	  
	  m->ros_corners_3D[0] = cloud(pt1.x, pt1.y);
	  m->ros_corners_3D[1] = cloud(pt2.x, pt2.y);
	  m->ros_corners_3D[2] = cloud(pt3.x, pt3.y);
	  m->ros_corners_3D[3] = cloud(pt4.x, pt4.y);
	  
	  if(ori >= 0 && ori < 4){
	    if(ori != 0){
	      std::rotate(m->ros_corners_3D.begin(), m->ros_corners_3D.begin() + ori, m->ros_corners_3D.end());
	    }
	  }
	  else
	    ROS_ERROR("FindMarkerBundles: Bad Orientation: %i for ID: %i", ori, id);

	  //Get the 3D marker points
	  BOOST_FOREACH (const PointDouble& p, m->ros_marker_points_img)
	    pixels.push_back(cv::Point(p.x, p.y));	  
	  ARCloud::Ptr selected_points = ata::filterCloud(cloud, pixels);

	  //Use the kinect data to find a plane and pose for the marker
	  int ret = PlaneFitPoseImprovement(i, m->ros_corners_3D, selected_points, cloud, m->pose);	
	}
    }
void vIdle()
{
    if (TheCaptureFlag) {
        //capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve( TheInputImage);
        TheUndInputImage.create(TheInputImage.size(),CV_8UC3);
        //transform color that by default is BGR to RGB because windows systems do not allow reading BGR images with opengl properly
        cv::cvtColor(TheInputImage,TheInputImage,CV_BGR2RGB);
        //remove distorion in image
        cv::undistort(TheInputImage,TheUndInputImage, TheCameraParams.CameraMatrix, TheCameraParams.Distorsion);
        //detect markers
        PPDetector.detect(TheUndInputImage,TheMarkers, TheCameraParams.CameraMatrix,Mat(),TheMarkerSize,false);
        //resize the image to the size of the GL window
        cv::resize(TheUndInputImage,TheResizedImage,TheGlWindowSize);
    }
    glutPostRedisplay();
}
Exemple #17
0
/*!
 *  
 */
static void vIdle()
{
  if (capture)
  {
    //capture image
    vCapturer.grab();
    vCapturer.retrieve( inImg);
    undImg.create(inImg.size(),CV_8UC3);
    //transform color that by default is BGR to RGB because windows systems do not allow reading
    //BGR images with opengl properly
//    cv::cvtColor(inImg,inImg,CV_BGR2RGB);
    //remove distorion in image
    cv::undistort(inImg,undImg, camParams.getCamMatrix(), camParams.getDistor());
    //detect markers
    mDetector.detect(undImg,markers, camParams.getCamMatrix(),Mat(),msiz->dval[0]);
    //resize the image to the size of the GL window
    cv::resize(undImg,resImg,glSize);
  }
  glutPostRedisplay();
}
void vIdle()
{
    if (TheCaptureFlag) {
        //capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve( TheInputImage);
        TheUndInputImage.create(TheInputImage.size(),CV_8UC3);
        //by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage,TheInputImage,CV_BGR2RGB);
        //remove distorion in image
        cv::undistort(TheInputImage,TheUndInputImage, TheCameraParams.CameraMatrix,TheCameraParams.Distorsion);
        //detect markers
        MDetector.detect(TheUndInputImage,TheMarkers,TheCameraParams.CameraMatrix,Mat(),TheMarkerSize);
        //Detection of the board
        TheBoardDetected.second=TheBoardDetector.detect( TheMarkers, TheBoardConfig,TheBoardDetected.first, TheCameraParams,TheMarkerSize);
        //chekc the speed by calculating the mean speed of all iterations
        //resize the image to the size of the GL window
        cv::resize(TheUndInputImage,TheResizedImage,TheGlWindowSize);
    }
    glutPostRedisplay();
}
int main(int argc, char *argv[])
{
	ros::init (argc, argv, "marker_detect");
	ros::NodeHandle n;
	
	if(argc < 7){
		std::cout << std::endl;
		cout << "Not enough arguments provided." << endl;
		cout << "Usage: ./individualMarkers <marker size in cm> <max new marker error> <max track error> <cam image topic> <cam info topic> <output frame>" << endl;
		std::cout << std::endl;
		return 0;
	}

	// Get params from command line
	marker_size = atof(argv[1]);
	max_new_marker_error = atof(argv[2]);
	max_track_error = atof(argv[3]);
	cam_image_topic = argv[4];
	cam_info_topic = argv[5];
    output_frame = argv[6];
	marker_detector.SetMarkerSize(marker_size);

	cam = new Camera(n, cam_info_topic);
	tf_listener = new tf::TransformListener(n);
	tf_broadcaster = new tf::TransformBroadcaster();
	arMarkerPub_ = n.advertise < ar_track_alvar::AlvarMarkers > ("ar_pose_marker", 0);
	rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0);
	
	//Give tf a chance to catch up before the camera callback starts asking for transforms
	ros::Duration(1.0).sleep();
	ros::spinOnce();	
	 
	ROS_INFO ("Subscribing to image topic");
	image_transport::ImageTransport it_(n);
    	cam_sub_ = it_.subscribe (cam_image_topic, 1, &getCapCallback);

	ros::spin ();

    return 0;
}
/*!
 *  
 */
static void vIdle()
{
  if (capture)
  {
    //capture image
    vCapturer >> inImg;
    assert(inImg.empty()==false);
    undImg.create(inImg.size(),CV_8UC3);
    //by default, opencv works in BGR, so we must convert to RGB because OpenGL in windows prefer
//    cv::cvtColor(inImg,inImg,CV_BGR2RGB);
    //remove distortion in image
    cv::undistort(inImg,undImg, camParams.getCamMatrix(),camParams.getDistor());
    //detect markers
    mDetector.detect(undImg,markers,camParams.getCamMatrix(),Mat(),msiz->dval[0]);
    //Detection of the board
    board.second=bDetector.detect(markers, boardConfig,board.first, camParams,msiz->dval[0]);
    //check the speed by calculating the mean speed of all iterations
    //resize the image to the size of the GL window
    cv::resize(undImg, resImg, glSize);
  }
  glutPostRedisplay();
}
Exemple #21
0
int main(int argc, char** argv)
{
    try
    {
        CmdLineParser cml(argc, argv);
        if (argc < 2 || cml["-h"])
        {
            cerr << "Invalid number of arguments" << endl;
            cerr << "Usage: (in.avi|live[:camera_index(e.g 0 or 1)]) [-c camera_params.yml] [-s  marker_size_in_meters] [-d "
                    "dictionary:ARUCO by default] [-h]"
                 << endl;
            cerr << "\tDictionaries: ";
            for (auto dict : aruco::Dictionary::getDicTypes())
                cerr << dict << " ";
            cerr << endl;
            cerr << "\t Instead of these, you can directly indicate the path to a file with your own generated "
                    "dictionary"
                 << endl;
            return false;
        }

        ///////////  PARSE ARGUMENTS
        string TheInputVideo = argv[1];
        // read camera parameters if passed
        if (cml["-c"])
            TheCameraParameters.readFromXMLFile(cml("-c"));
        float TheMarkerSize = std::stof(cml("-s", "-1"));
        // aruco::Dictionary::DICT_TYPES  TheDictionary= Dictionary::getTypeFromString( cml("-d","ARUCO") );

        ///////////  OPEN VIDEO
        // read from camera or from  file
        if (TheInputVideo.find("live") != string::npos)
        {
            int vIdx = 0;
            // check if the :idx is here
            char cad[100];
            if (TheInputVideo.find(":") != string::npos)
            {
                std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
                sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
            }
            cout << "Opening camera index " << vIdx << endl;
            TheVideoCapturer.open(vIdx);
            waitTime = 10;
        }
        else
            TheVideoCapturer.open(TheInputVideo);
        // check video is open
        if (!TheVideoCapturer.isOpened())
            throw std::runtime_error("Could not open video");

        ///// CONFIGURE DATA
        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;
        if (TheCameraParameters.isValid())
            TheCameraParameters.resize(TheInputImage.size());
        dictionaryString=cml("-d", "ARUCO");
        MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. );  // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
        MDetector.setThresholdParams(7, 7);
        MDetector.setThresholdParamRange(2, 0);

        // gui requirements : the trackbars to change this parameters
        iThresParam1 = static_cast<int>(MDetector.getParams()._thresParam1);
        iThresParam2 = static_cast<int>(MDetector.getParams()._thresParam2);
        cv::namedWindow("in");
        cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
        cv::createTrackbar("correction_rate", "in", &iCorrectionRate, 10, cvTackBarEvents);
        cv::createTrackbar("EnclosedMarkers", "in", &iEnclosedMarkers, 1, cvTackBarEvents);
        cv::createTrackbar("ShowAllCandidates", "in", &iShowAllCandidates, 1, cvTackBarEvents);

        // go!
        char key = 0;
        int index = 0,indexSave=0;
        // capture until press ESC or until the end of the video
        do
        {
            TheVideoCapturer.retrieve(TheInputImage);
            // copy image
            double tick = (double)getTickCount();  // for checking the speed
            // Detection of markers in the image passed
            TheMarkers = MDetector.detect(TheInputImage, TheCameraParameters, TheMarkerSize);
            // chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
            AvrgTime.second++;
            cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second
                 << " milliseconds nmarkers=" << TheMarkers.size() << std::endl;

            // print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);

            if (iShowAllCandidates){
                auto candidates=MDetector.getCandidates();
                for(auto cand:candidates)
                    Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
            }

            for (unsigned int i = 0; i < TheMarkers.size(); i++)
            {
                cout << TheMarkers[i] << endl;
                TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
            }

            // draw a 3d cube in each marker if there is 3d info
            if (TheCameraParameters.isValid() && TheMarkerSize > 0)
                for (unsigned int i = 0; i < TheMarkers.size(); i++)
                {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                }

            // DONE! Easy, right?
            // show input with augmented information and  the thresholded image
            cv::imshow("in", resize(TheInputImageCopy, 1280));
            cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));

            key = cv::waitKey(waitTime);  // wait for key to be pressed
            if (key == 's')
                waitTime = waitTime == 0 ? 10 : 0;
            if (key == 'w'){//writes current input image
                string number=std::to_string(indexSave++);
                while(number.size()!=3)number+="0";
                string imname="arucoimage"+number+".png";
                cv::imwrite(imname,TheInputImage);
                cout<<"saved "<<imname<<endl;
            }
            index++;  // number of images captured

        } while (key != 27 && (TheVideoCapturer.grab()));
    }
    catch (std::exception& ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
int main(int argc, char *argv[])
{
  ros::init (argc, argv, "marker_detect");
  ros::NodeHandle n;

  if(argc < 8){
    std::cout << std::endl;
    cout << "Not enough arguments provided." << endl;
    cout << "Usage: ./findMarkerBundles <marker size in cm> <max new marker error> <max track error> <cam image topic> <cam info topic> <output frame> <list of bundle XML files...>" << endl;
    std::cout << std::endl;
    return 0;
  }

  // Get params from command line
  marker_size = atof(argv[1]);
  max_new_marker_error = atof(argv[2]);
  max_track_error = atof(argv[3]);
  cam_image_topic = argv[4];
  cam_info_topic = argv[5];
  output_frame = argv[6];
  int n_args_before_list = 7;
  n_bundles = argc - n_args_before_list;

  marker_detector.SetMarkerSize(marker_size);
  multi_marker_bundles = new MultiMarkerBundle*[n_bundles];	
  bundlePoses = new Pose[n_bundles];
  master_id = new int[n_bundles]; 
  bundle_indices = new std::vector<int>[n_bundles]; 
  bundles_seen = new bool[n_bundles]; 	

  // Load the marker bundle XML files
  for(int i=0; i<n_bundles; i++){	
    bundlePoses[i].Reset();		
    MultiMarker loadHelper;
    if(loadHelper.Load(argv[i + n_args_before_list], FILE_FORMAT_XML)){
      vector<int> id_vector = loadHelper.getIndices();
      multi_marker_bundles[i] = new MultiMarkerBundle(id_vector);	
      multi_marker_bundles[i]->Load(argv[i + n_args_before_list], FILE_FORMAT_XML);
      master_id[i] = multi_marker_bundles[i]->getMasterId();
      bundle_indices[i] = multi_marker_bundles[i]->getIndices();
    }
    else{
      cout<<"Cannot load file "<< argv[i + n_args_before_list] << endl;	
      return 0;
    }		
  }  

  // Set up camera, listeners, and broadcasters
  cam = new Camera(n, cam_info_topic);
  tf_listener = new tf::TransformListener(n);
  tf_broadcaster = new tf::TransformBroadcaster();
  arMarkerPub_ = n.advertise < ar_track_alvar::AlvarMarkers > ("ar_pose_marker", 0);
  rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0);
	
  //Give tf a chance to catch up before the camera callback starts asking for transforms
  ros::Duration(1.0).sleep();
  ros::spinOnce();			
	 
  //Subscribe to topics and set up callbacks
  ROS_INFO ("Subscribing to image topic");
  image_transport::ImageTransport it_(n);
  cam_sub_ = it_.subscribe (cam_image_topic, 1, &getCapCallback);

  ros::spin();

  return 0;
}
Exemple #23
0
int main(int argc, char **argv) {
    try {
        if (argc < 2) {
            cerr << "Usage: (in.jpg|in.avi) [cameraParams.yml] [markerSize] [outImage]" << endl;
            exit(0);
        }


        aruco::CameraParameters CamParam;
        MarkerDetector MDetector;
        vector< Marker > Markers;
        float MarkerSize = -1;
        // read the input image
        cv::Mat InImage;
        // try opening first as video
        VideoCapture vreader(argv[1]);
        if (vreader.isOpened()) {
            vreader.grab();
            vreader.retrieve(InImage);
        } else {
            InImage = cv::imread(argv[1]);
        }
        // at this point, we should have the image in InImage
        // if empty, exit
        if (InImage.total() == 0) {
            cerr << "Could not open input" << endl;
            return 0;
        }

        // read camera parameters if specifed
        if (argc >= 3) {
            CamParam.readFromXMLFile(argv[2]);
            // resizes the parameters to fit the size of the input image
            CamParam.resize(InImage.size());
        }
        // read marker size if specified
        if (argc >= 4)
            MarkerSize = atof(argv[3]);
        cv::namedWindow("in", 1);


        // Ok, let's detect
        MDetector.detect(InImage, Markers, CamParam, MarkerSize);
        // for each marker, draw info and its boundaries in the image
        for (unsigned int i = 0; i < Markers.size(); i++) {
            cout << Markers[i] << endl;
            Markers[i].draw(InImage, Scalar(0, 0, 255), 2);
        }
        // draw a 3d cube in each marker if there is 3d info
        if (CamParam.isValid() && MarkerSize != -1)
            for (unsigned int i = 0; i < Markers.size(); i++) {
                CvDrawingUtils::draw3dCube(InImage, Markers[i], CamParam);
            }
        // show input with augmented information
        cv::imshow("in", InImage);
        // show also the internal image resulting from the threshold operation
        cv::imshow("thes", MDetector.getThresholdedImage());
        cv::waitKey(0); // wait for key to be pressed


        if (argc >= 5)
            cv::imwrite(argv[4], InImage);
    } catch (std::exception &ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
int main(int argc, char** argv)
{
    try
    {
        CmdLineParser cml(argc, argv);
        if (argc < 3 || cml["-h"])
        {
            cerr << "Invalid number of arguments" << endl;
            cerr << "Usage: (in.avi|live) marksetconfig.yml  [optional_arguments] \n\t[-c camera_intrinsics.yml] "
                    "\n\t[-s marker_size] \n\t[-pcd out_pcd_file_with_camera_poses] \n\t[-poses out_file_with_poses] "
                    "\n\t[-corner <corner_refinement_method> (0: LINES(default),1 SUBPIX) ][-h]"
                 << endl;
            return false;
        }
        TheMarkerMapConfig.readFromFile(argv[2]);

        TheMarkerMapConfigFile = argv[2];
        TheMarkerSize = stof(cml("-s", "1"));
        // read from camera or from  file
        if (string(argv[1]) == "live")
        {
            TheVideoCapturer.open(0);
        }
        else
            TheVideoCapturer.open(argv[1]);
        // check video is open
        if (!TheVideoCapturer.isOpened())
            throw std::runtime_error("Could not open video");

        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;

        // read camera parameters if passed
        if (cml["-c"])
        {
            TheCameraParameters.readFromXMLFile(cml("-c"));
            TheCameraParameters.resize(TheInputImage.size());
        }
        // prepare the detector
        string dict =
            TheMarkerMapConfig
                .getDictionary();  // see if the dictrionary is already indicated in the configuration file. It should!
        if (dict.empty())
            dict = "ARUCO";
        TheMarkerDetector.setDictionary(
            dict);  /// DO NOT FORGET THAT!!! Otherwise, the ARUCO dictionary will be used by default!
        if (stoi(cml("-corner", "0")) == 0)
            TheMarkerDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        else
        {
            MarkerDetector::Params params = TheMarkerDetector.getParams();
            params._cornerMethod = MarkerDetector::SUBPIX;
            // search corner subpix in a 5x5 widow area
            params._subpix_wsize = static_cast<int>((15.f / 2000.f) * float(TheInputImage.cols));
            TheMarkerDetector.setParams(params);
        }

        // prepare the pose tracker if possible
        // if the camera parameers are avaiable, and the markerset can be expressed in meters, then go

        if (TheMarkerMapConfig.isExpressedInPixels() && TheMarkerSize > 0)
            TheMarkerMapConfig = TheMarkerMapConfig.convertToMeters(TheMarkerSize);
        cout << "TheCameraParameters.isValid()=" << TheCameraParameters.isValid() << " "
             << TheMarkerMapConfig.isExpressedInMeters() << endl;
        if (TheCameraParameters.isValid() && TheMarkerMapConfig.isExpressedInMeters())
            TheMSPoseTracker.setParams(TheCameraParameters, TheMarkerMapConfig);

        // Create gui

        cv::namedWindow("thres", 1);
        cv::namedWindow("in", 1);

        TheMarkerDetector.getThresholdParams(ThresParam1, ThresParam2);
        iThresParam1 = static_cast<int>(ThresParam1);
        iThresParam2 = static_cast<int>(ThresParam2);
        cv::createTrackbar("ThresParam1", "in", &iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
        char key = 0;
        int index = 0;
        // capture until press ESC or until the end of the video
        cout << "Press 's' to start/stop video" << endl;
        do
        {
            TheVideoCapturer.retrieve(TheInputImage);
            TheInputImage.copyTo(TheInputImageCopy);
            index++;  // number of images captured
            // Detection of the board
            vector<aruco::Marker> detected_markers = TheMarkerDetector.detect(TheInputImage);
            // print the markers detected that belongs to the markerset
            for (auto idx : TheMarkerMapConfig.getIndices(detected_markers))
                detected_markers[idx].draw(TheInputImageCopy, Scalar(0, 0, 255), 2);
            // detect 3d info if possible
            if (TheMSPoseTracker.isValid())
            {
                if (TheMSPoseTracker.estimatePose(detected_markers))
                {
                    aruco::CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheCameraParameters,
                                                      TheMSPoseTracker.getRvec(), TheMSPoseTracker.getTvec(),
                                                      TheMarkerMapConfig[0].getMarkerSize() * 2);
                    frame_pose_map.insert(make_pair(index, TheMSPoseTracker.getRTMatrix()));
                    cout << "pose rt=" << TheMSPoseTracker.getRvec() << " " << TheMSPoseTracker.getTvec() << endl;
                }
            }

            // show input with augmented information and  the thresholded image
            cv::imshow("in", TheInputImageCopy);
            cv::imshow("thres", TheMarkerDetector.getThresholdedImage());

            key = cv::waitKey(waitTime);  // wait for key to be pressed
            processKey(key);

        } while (key != 27 && TheVideoCapturer.grab());

        // save a beatiful pcd file (pcl library) showing the results (you can use pcl_viewer to see it)
        if (cml["-pcd"])
        {
            savePCDFile(cml("-pcd"), TheMarkerMapConfig, frame_pose_map);
        }

        // save the poses to a file in tum rgbd data format
        if (cml["-poses"])
        {
            savePosesToFile(cml("-poses"), frame_pose_map);
        }
    }
    catch (std::exception& ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
void getJointPositions(Mat imgOrg, Arm *arm_left, Arm *arm_right, Chest *chest)
{
	// reset found flag from all joints and both arms
	arm_left->resetJ1Found();
	arm_left->resetJ2Found();
	arm_left->resetJ3Found();
	arm_left->resetArmFound();

	arm_right->resetJ1Found();
	arm_right->resetJ2Found();
	arm_right->resetJ3Found();
	arm_right->resetArmFound();

  // Create array of camera parameters
	CameraParameters TheCameraParameters;

	// set camera parameters
	Mat dist(1,5,CV_32FC1);
	dist.at<float>(0,0) = -0.0648763971625288;
	dist.at<float>(0,1) = 0.0612520196884308;
	dist.at<float>(0,2) = 0.0038281538281731;
	dist.at<float>(0,3) = -0.00551104078371959;
	dist.at<float>(0,4) = 0.000000;

	Mat cameraP(3,3,CV_32FC1);
	cameraP.at<float>(0,0) = 558.570339530768;
	cameraP.at<float>(0,1) = 0.000000;
	cameraP.at<float>(0,2) = 308.885375457296;
	cameraP.at<float>(1,0) = 0.000000;
	cameraP.at<float>(1,1) = 556.122943034837;
	cameraP.at<float>(1,2) = 247.600724811385;
	cameraP.at<float>(2,0) = 0.000000;
	cameraP.at<float>(2,1) = 0.000000;
	cameraP.at<float>(2,2) = 1.000000;

	TheCameraParameters.setParams(cameraP, dist, CAMERA_RESOLUTION);
	TheCameraParameters.resize(CAMERA_RESOLUTION);

	// create vectors for joints
  Vector3d j1Left, j2Left, j3Left;
	Vector3d j1Right, j2Right, j3Right;

	// create objects for marker
	MarkerDetector MDetector;
  vector<Marker> Markers;

	// set marker settings
	MDetector.setWarpSize(100);
	MDetector.enableLockedCornersMethod(true);
	MDetector.setMinMaxSize(0.01, 0.5);

	// convert image to gray
  Mat imgGray = imgOrg.clone();
  cvtColor(imgGray, imgGray, CV_BGR2GRAY);

	// detect all markers in the picture
  MDetector.detect(imgGray, Markers, TheCameraParameters);

	// go through all found markers
  for(int i = 0; i < Markers.size(); i++)
  {
			Markers[i].calculateExtrinsics(0.09, TheCameraParameters);
			Markers[i].draw(imgOrg, Scalar(0,0,255), 2);

			// get abs position in picture for every joint
			switch(Markers[i].id)
			{
			case SHOULDER_LEFT:
					// switch x and y axis
					j1Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j1Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j1Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ1Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos SHOULDER_LEFT: " << j1Left(0) << "\t" << j1Left(1) << "\t" << j1Left(2) << endl;
					}
					break;

			case ELBOW_LEFT:
					// switch x and y axis
					j2Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j2Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j2Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ2Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos ELBOW_LEFT: " << j2Left(0) << "\t" << j2Left(1) << "\t" << j2Left(2) << endl;
					}
					break;

			case WRIST_LEFT:
					// switch x and y axis
					j3Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j3Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j3Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ3Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos WRIST_LEFT: " << j3Left(0) << "\t" << j3Left(1) << "\t" << j3Left(2) << endl;
					}
					break;

				case SHOULDER_RIGHT:
						// switch x and y axis
						j1Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j1Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j1Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ1Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos SHOULDER_RIGHT: " << j1Right(0) << "\t" << j1Right(1) << "\t" << j1Right(2) << endl;
						}
						break;

				case ELBOW_RIGHT:
						// switch x and y axis
						j2Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j2Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j2Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ2Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos ELBOW_RIGHT: " << j2Right(0) << "\t" << j2Right(1) << "\t" << j2Right(2) << endl;
						}
						break;

				case WRIST_RIGHT:
						// switch x and y axis
						j3Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j3Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j3Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ3Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos WRIST_RIGHT: " << j3Right(0) << "\t" << j3Right(1) << "\t" << j3Right(2) << endl;
						}
						break;
				}
		}

		// Display camera imape with detected marker
		if(SHOW_ARUCO_FOUND_IMG == ON)
	  		cv::imshow("arruco", imgOrg);


		// set initial position for upper boddy
		if((chest->getInit() == false) && arm_left->getJ1Found() && arm_right->getJ1Found())
		{
				// set bool to true
				chest->setInit();

				// set initial position between shoulders -> both shoulders are needed
				if(arm_left->getJ1Found() && arm_right->getJ1Found())
					chest->setInitPos((-j1Left(2) -j1Right(2))/2);

				if(COUT_CHEST_INIT_POS == ON)
						cout << "chest init pos: " << chest->getInitPos() << endl;
		}


		// check if upper boddy moved
		if(chest->getInit() && (arm_left->getJ1Found() || arm_right->getJ1Found()))
		{
				// reset bool
				chest->resetTorsoMoved();

				// calculate current chest position
				double chestCurPos;
				if(arm_left->getJ1Found() && arm_right->getJ1Found())
						chestCurPos = (-j1Left(2) -j1Right(2))/2;
				else if(arm_left->getJ1Found())
						chestCurPos = -j1Left(2);
				else if(arm_right->getJ1Found())
						chestCurPos = -j1Right(2);

				if(COUT_CHEST_CUR_POS == ON)
						cout << "current pos: "	<< chestCurPos << endl;


				// calculate distance
				double dist = chest->getInitPos() - chestCurPos;

				if(COUT_CHEST_DIST == ON)
					cout << "chest dist: " << dist << endl;

				// check if distance is greater than threshold
				if(abs(dist) > CHEST_DIST_THRESH)
				{
						// set bool: thresh moved
						chest->setTorsoMoved();

						// calculate angle for torso
						chest->setAngle(asin(dist/TORSO_LENGTH));

						if(COUT_CHEST_ANGLE == ON)
							cout << "angle" << chest->getAngle() << endl;
				}
		}


		// calculate relative position for the left arm
		if(arm_left->getJ1Found() && arm_left->getJ2Found() && arm_left->getJ3Found())
		{
				// calculate distance
				Vector3d d1 = j2Left - j1Left;
				Vector3d d2 = j3Left - j1Left;

				// check min dist
				for(int i = 0; i < 3; i++)
				{
						if(abs(d1(i)) < MIN_DIST)
								d1(i) = 0.0;

						if(abs(d2(i)) < MIN_DIST)
								d2(i) = 0.0;
				}

//				cout << "d1 " << d1(0) << "\t" << d1(1) << "\t" << d1(2) << endl;
//				cout << "d2 " << d2(0) << "\t" << d2(1) << "\t" << d2(2) << endl;

				// set relative coordinates
				j1Left(0) = 0;
				j1Left(1) = 0;
				j1Left(2) = 0;

				j2Left(0) = -d1(2);
				j2Left(1) = d1(1);
				j2Left(2) = -d1(0);

				j3Left(0) = -d2(2);
				j3Left(1) = d2(1);
				j3Left(2) = -d2(0);

				// write into arm
				arm_left->setJ1Coord(j1Left);
				arm_left->setJ2Coord(j2Left);
				arm_left->setJ3Coord(j3Left);

				// set bool that all markers for the left arm have been found
				arm_left->setArmFound();

				if(COUT_JOINT_REL_POS == ON)
				{
						cout << "rel pos SHOULDER_LEFT: " 	<< j1Left(0) << "\t\t" 	<< j1Left(1) << "\t\t" 	<< j1Left(2) << endl;
						cout << "rel pos ELBOW_LEFT:    " 	<< j2Left(0) << "\t" 		<< j2Left(1) << "\t" 		<< j2Left(2) << endl;
						cout << "rel pos WRIST_LEFT:    " 	<< j3Left(0) << "\t" 		<< j3Left(1) << "\t" 		<< j3Left(2) << endl;

						cout << endl << endl;
				}
		}


		// calculate relative position for the right arm
		if(arm_right->getJ1Found() && arm_right->getJ2Found() && arm_right->getJ3Found())
		{
				// calculate distance
				Vector3d d1 = j2Right - j1Right;
				Vector3d d2 = j3Right - j1Right;

				// check min dist
				for(int i = 0; i < 3; i++)
				{
						if(abs(d1(i)) < MIN_DIST)
								d1(i) = 0.0;

						if(abs(d2(i)) < MIN_DIST)
								d2(i) = 0.0;
				}

//				cout << "d1 " << d1(0) << "\t" << d1(1) << "\t" << d1(2) << endl;
//				cout << "d2 " << d2(0) << "\t" << d2(1) << "\t" << d2(2) << endl;

				// set relative coordinates
				j1Right(0) = 0;
				j1Right(1) = 0;
				j1Right(2) = 0;

				j2Right(0) = -d1(2);
				j2Right(1) = d1(1);
				j2Right(2) = -d1(0);

				j3Right(0) = -d2(2);
				j3Right(1) = d2(1);
				j3Right(2) = -d2(0);

				// write into arm
				arm_right->setJ1Coord(j1Right);
				arm_right->setJ2Coord(j2Right);
				arm_right->setJ3Coord(j3Right);

				// set bool that all markers for the right arm have been found
				arm_right->setArmFound();

				if(COUT_JOINT_REL_POS == ON)
				{
						cout << "rel pos SHOULDER_RIGHT: " 	<< j1Right(0) << "\t\t" 	<< j1Right(1) << "\t\t" 	<< j1Right(2) << endl;
						cout << "rel pos ELBOW_RIGHT:    " 	<< j2Right(0) << "\t" 		<< j2Right(1) << "\t" 		<< j2Right(2) << endl;
						cout << "rel pos WRIST_RIGHT:    " 	<< j3Right(0) << "\t" 		<< j3Right(1) << "\t" 		<< j3Right(2) << endl;

						cout << endl << endl;
				}
		}
}
int main(int argc,char **argv)
{
	
    
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }
        

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);

        //begin copy-paste from http://stackoverflow.com/questions/11550021/converting-a-mat-file-from-matlab-into-cvmat-matrix-in-opencv
		Mat oneVect;
		Mat useVecLat;
		Mat someVects;
		Mat zeroYzero;

		string demoFile  = "demo.yml";

		FileStorage fsDemo( demoFile, FileStorage::READ);
		fsDemo["oneVect"] >> oneVect;
		
		fsDemo["oneVect"] >> useVecLat;
		
		
		fsDemo["oneVect"] >> zeroYzero;

		fsDemo["someVects"] >> someVects;

		
		cout << "Print the contents of oneVect:" << endl;
		cout << oneVect << endl;
		
		
		
		fsDemo.release(); //close the file
		
		
		
		// Declare what you need
		// FileStorage fileOutt("reading_positions.yml", FileStorage::WRITE);
		
		//end copy-paste from http://stackoverflow.com/questions/11550021/converting-a-mat-file-from-matlab-into-cvmat-matrix-in-opencv
		
		
		cout << "an element oneVect:" << endl;
		cout << oneVect.at<float>(0,1) << endl;
		
		// to access the 42 in this YAML:
		
		//oneVect: !!opencv-matrix
		//   rows: 1
		//   cols: 3
		//   dt: f
		//   data: [ 4, 3, 42, 55]
		
		//  do oneVect.at<float>(0,2)


		//end data input



        //Create gui

        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);

        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab())
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured
            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
			cout << "q" ;
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //note that this function outputs the marker info, for some reason.
            
            
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            
            //cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
            
            
            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
				cout<<TheMarkers[i].Tvec.at<float>(0,0)<<","<<
					TheMarkers[i].Tvec.at<float>(0,1)<<","<<
					TheMarkers[i].Tvec.at<float>(0,2)<<",";
				
//                cout<<TheMarkers[i]<<endl;
                if (TheMarkers[i].id == 605 && 1 == 0) { // THIS WILL NEVER HAPPEN!!! 1 is not zero.   
					Mat R33;
					cv::Rodrigues(TheMarkers[i].Rvec,R33);
					cout << R33 << endl;
					cout << TheMarkers[i].id << endl;
					
					for (unsigned int maytr=0;maytr<TheMarkers.size();maytr++) {  //take 0 , 1 , 0, inverse transform first, then transform.
						if (TheMarkers[maytr].id == 500) {
							
							
							zeroYzero.at<float>(0,0) = 0;
							zeroYzero.at<float>(0,1) = 2;
							zeroYzero.at<float>(0,2) = 0;
							zeroYzero.at<float>(0,3) = 0;
							zeroYzero.at<float>(0,4) = 0;
							zeroYzero.at<float>(0,5) = 0;
							
							cout << zeroYzero << endl;
							Mat R33for500;
							cout << TheMarkers[maytr].id<< "food"<< endl;
							cv::Rodrigues(TheMarkers[maytr].Rvec,R33for500);
							cout << R33for500 << endl;
							//cout << TheMarkers[i].id << endl;
							
							//R33for500 * R33.t() * zeroYzero; //transpose method
							
							//Mat afterDouble; 
							//cout << TheMarkers[maytr].id<< "water"<< endl;
							//afterDouble = R33for500 * (R33.inv() * zeroYzero); //inversion method
							//R33for500 * (R33.inv() * zeroYzero); //inversion method
							//cout << "shelll"<< endl;
							//cout << afterDouble << endl;
							//useVecLat.at<float>(0,0) = 0;
							//useVecLat.at<float>(0,1) = 0;
							//useVecLat.at<float>(0,2) = 0;
							//useVecLat.at<float>(0,3) = afterDouble.at<float>(0,0);
							//useVecLat.at<float>(0,4) = afterDouble.at<float>(0,1);
							//useVecLat.at<float>(0,5) = afterDouble.at<float>(0,2);
							
							//drawVecAtPos(TheInputImageCopy,TheMarkers[maytr],TheCameraParameters,afterDouble); //oneVect);
						}
					}
				}
                
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
                
                //time date stuff
				std::time_t result = std::time(NULL); //nullptr);
				std::cout // << std::asctime(std::localtime(&result))
						  << result; // <<  " seconds since the Epoch\n";
						  
				// fileOutt << "time" << result ; //<< endl;

				cout<<endl; // <<endl<<endl;
            }
            
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    //Never use this; just reference // CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    
                    draw3dAxisj(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    drawVecAtPos(TheInputImageCopy,TheMarkers[i],TheCameraParameters,oneVect);
                    drawVecsAtPosTesting(TheInputImageCopy,TheMarkers[i],TheCameraParameters,someVects);
                    						
                }
            //DONE! Easy, right?
            
            
            //show input with augmented information and  the thresholded image
            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",MDetector.getThresholdedImage());

            key=cv::waitKey(waitTime);//wait for key to be pressed
        }
        // fileOutt.release();

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}
JNIEXPORT void JNICALL Java_wrapper_MarkerDetector_JgetThresholdedImage(JNIEnv * env, jobject obj, jlong imAddr){
    MarkerDetector *inst = getHandle<MarkerDetector>(env, obj);
    cv::Mat* img = (cv::Mat*)imAddr;
    inst->getThresholdedImage(*img);
  }
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    //        double ticksBefore = cv::getTickCount();
    static tf::TransformBroadcaster br;
    if(cam_info_received)
    {
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect
        mDetector.detect(inImage, markers, camParam, marker_size, false);
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker
          if(markers[i].id == marker_id)
          {
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
            tf::StampedTransform stampedTransform(transform, ros::Time::now(),
                                                  parent_name, child_name);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = parent_name;
            poseMsg.header.stamp = ros::Time::now();
            pose_pub.publish(poseMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            //CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = ros::Time::now();
          out_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = ros::Time::now();
          debug_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }

        //            ROS_INFO("runtime: %f ms",
        //                     1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    static tf::TransformBroadcaster br;

    
    if(cam_info_received)
    {
      ros::Time curr_stamp(ros::Time::now());
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect

        mDetector.detect(inImage, markers, camParam, marker_size, false);
		
		//ROS_INFO("x: %0.3f y: %0.3f z: %0.3f", markers[0].Tvec.at<float>(0,0), markers[0].Tvec.at<float>(0,1), markers[0].Tvec.at<float>(0,2));	
		
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker

          //----------------------add all pose messages of markers detected --------------------------------------
          //if(markers[i].id == marker_id)


          if(markers[i].id == 26 || markers[i].id == 35 || markers[i].id == 58 || markers[i].id == 163 || markers[i].id == 640 || markers[i].id == 512 || markers[i].id == 43 || markers[i].id == 291 || markers[i].id == 355)

          {
		  // by Weiwei
			//ROS_INFO("x: %0.3f y: %0.3f z: %0.3f", markers[i].Tvec.at<float>(0,0), markers[i].Tvec.at<float>(0,1), markers[i].Tvec.at<float>(0,2));		
			// 
			
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
			
			

			
            tf::StampedTransform cameraToReference;
            cameraToReference.setIdentity();

            if ( reference_frame != camera_frame )
            {
              getTransform(reference_frame,
                           camera_frame,
                           cameraToReference);
            }

            transform = 
              static_cast<tf::Transform>(cameraToReference) 
              * static_cast<tf::Transform>(rightToLeft) 
              * transform;

            tf::StampedTransform stampedTransform(transform, curr_stamp,
                                                  reference_frame, marker_frame);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
			
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = reference_frame;
            poseMsg.header.stamp = curr_stamp;


            double aruco_roll_, aruco_pitch_, aruco_yaw_;
            tf::Quaternion aruco_quat_;
            tf::quaternionMsgToTF(poseMsg.pose.orientation, aruco_quat_);
            tf::Matrix3x3(aruco_quat_).getRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);

            //ROS_INFO("April Tag RPY: [%0.3f, %0.3f, %0.3f]", aruco_roll_*180/3.1415926, aruco_pitch_*180/3.1415926, aruco_yaw_*180/3.1415926);
//-------------------------unified coordinate systems of pose----------------------------
            aruco_yaw_ = aruco_yaw_*180/3.141592;
            printf("Original [x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n", poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);

            if (markers[i].id == 26 || markers[i].id == 58)
            {
              float PI = 3.1415926;
              float ang = PI*3/4;
              float x_0 = -0.045;//-0.015;
              float y_0 = -0.015;//0.045;
              

              poseMsg.pose.position.x = x_0 + poseMsg.pose.position.x;// + poseMsg.pose.position.x * cos(-ang) - poseMsg.pose.position.y * sin(-ang); 
              poseMsg.pose.position.y = y_0 + poseMsg.pose.position.y;// + poseMsg.pose.position.x * sin(-ang) + poseMsg.pose.position.y * cos(-ang);

              //printf("[x, y] = [%0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y);

              aruco_yaw_ = aruco_yaw_ + ang*180/PI; 
              printf("[x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);

              //printf("-----------unify the coordinate system ---------------------\n"); 
            }

            //printf("------------------------------------------------------------------\n-----------------aruco_yaw_ = %0.3f\n", aruco_yaw_);
            //printf("------------------------------------------------------------------\n");

            double temp_x = poseMsg.pose.position.x;
            double temp_y = poseMsg.pose.position.y;

            poseMsg.pose.position.x = -temp_y;
            poseMsg.pose.position.y = -temp_x;

            tf::Quaternion quat = tf::createQuaternionFromRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);
            
            poseMsg.pose.orientation.x = quat.x();
            poseMsg.pose.orientation.y = quat.y();
            poseMsg.pose.orientation.z = quat.z();
            poseMsg.pose.orientation.w = quat.w();   

            pose_pub.publish(poseMsg);


            geometry_msgs::TransformStamped transformMsg;
            tf::transformStampedTFToMsg(stampedTransform, transformMsg);
            transform_pub.publish(transformMsg);

            geometry_msgs::Vector3Stamped positionMsg;
            positionMsg.header = transformMsg.header;
            positionMsg.vector = transformMsg.transform.translation;
            position_pub.publish(positionMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,cv::Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = curr_stamp;
          out_msg.encoding = sensor_msgs::image_encodings::RGB8;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = curr_stamp;
          debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
    void image_callback(const sensor_msgs::ImageConstPtr& msg)
    {
        static tf::TransformBroadcaster br;
        if(cam_info_received)
        {
            cv_bridge::CvImagePtr cv_ptr;
            try
            {
                cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
                inImage = cv_ptr->image;

                //detection results will go into "markers"
                markers.clear();
                //Ok, let's detect
                mDetector.detect(inImage, markers, camParam, marker_size, false);
                //for each marker, draw info and its boundaries in the image
                for(size_t i=0; i<markers.size(); ++i)
                {
                    // only publishing the selected marker
                    if(markers[i].id == marker_id)
                    {
                        tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
                        tf::StampedTransform cameraToReference;
                        cameraToReference.setIdentity();

                        if ( reference_frame != camera_frame )
                        {
                            getTransform(reference_frame,
                                         camera_frame,
                                         cameraToReference);
                        }

                        transform =
                            static_cast<tf::Transform>(cameraToReference)
                            * static_cast<tf::Transform>(rightToLeft)
                            * transform;

                        tf::StampedTransform stampedTransform(transform, ros::Time::now(),
                                                              reference_frame, marker_frame);
                        br.sendTransform(stampedTransform);
                        geometry_msgs::PoseStamped poseMsg;
                        tf::poseTFToMsg(transform, poseMsg.pose);
                        poseMsg.header.frame_id = reference_frame;
                        poseMsg.header.stamp = ros::Time::now();
                        pose_pub.publish(poseMsg);

                        geometry_msgs::TransformStamped transformMsg;
                        tf::transformStampedTFToMsg(stampedTransform, transformMsg);
                        transform_pub.publish(transformMsg);

                        geometry_msgs::Vector3Stamped positionMsg;
                        positionMsg.header = transformMsg.header;
                        positionMsg.vector = transformMsg.transform.translation;
                        position_pub.publish(positionMsg);
                    }
                    // but drawing all the detected markers
                    markers[i].draw(inImage,cv::Scalar(0,0,255),2);
                }

                //draw a 3d cube in each marker if there is 3d info
                if(camParam.isValid() && marker_size!=-1)
                {
                    for(size_t i=0; i<markers.size(); ++i)
                    {
                        CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
                    }
                }

                if(image_pub.getNumSubscribers() > 0)
                {
                    //show input with augmented information
                    cv_bridge::CvImage out_msg;
                    out_msg.header.stamp = ros::Time::now();
                    out_msg.encoding = sensor_msgs::image_encodings::RGB8;
                    out_msg.image = inImage;
                    image_pub.publish(out_msg.toImageMsg());
                }

                if(debug_pub.getNumSubscribers() > 0)
                {
                    //show also the internal image resulting from the threshold operation
                    cv_bridge::CvImage debug_msg;
                    debug_msg.header.stamp = ros::Time::now();
                    debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
                    debug_msg.image = mDetector.getThresholdedImage();
                    debug_pub.publish(debug_msg.toImageMsg());
                }
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR("cv_bridge exception: %s", e.what());
                return;
            }
        }
    }