Exemple #1
0
int main(int argc,char **argv)
{
    try
    {
        if (argc!=2) {
            cerr<<"Usage: in.jpg "<<endl;
            return -1;
        }
        MarkerDetector MDetector;
        vector<Marker> Markers;
        //read the input image
        cv::Mat InImage;
        InImage=cv::imread(argv[1]);
        //Ok, let's detect
        MDetector.detect(InImage,Markers);
        //for each marker, draw info and its boundaries in the image
        for (unsigned int i=0; i<Markers.size(); i++) {
            cout<<Markers[i]<<endl;
            cout<<Markers[0][0].x<<endl;
            cout<<Markers[0].Tvec.at<float>(0,0)<<endl;  //x value of Tvec and Markers[0].Tvec then whole Tvec will be printed
            Markers[i].draw(InImage,Scalar(0,0,255),2);
        }
        cv::imshow("in",InImage);
        cv::waitKey(0);//wait for key to be pressed
    } catch (std::exception &ex)
    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

    return 0;
}
JNIEXPORT void JNICALL
Java_cz_email_michalchomo_cardboardkeyboard_MainActivity_FindFeatures(JNIEnv *env, jobject instance,
                                                                      jlong matAddrGr,
                                                                      jlong matAddrRgba) {

    Mat &mGr = *(Mat *) matAddrGr;
    Mat &mRgb = *(Mat *) matAddrRgba;
    //vector<KeyPoint> v;
    MarkerDetector markerDetector;
    vector<Marker> markers;

    markerDetector.detect(mGr, markers);
    for (unsigned int i=0; i < markers.size(); i++) {
        cout << markers[i] << endl;
        markers[i].draw(mRgb,Scalar(0,0,255),2);
    }

    /*Ptr<FeatureDetector> detector = FastFeatureDetector::create(50);
    detector->detect(mGr, v);
    for (unsigned int i = 0; i < v.size(); i++) {
        const KeyPoint &kp = v[i];
        circle(mRgb, Point(kp.pt.x, kp.pt.y), 10, Scalar(255, 0, 0, 255));
    }*/

}
void cvTackBarEvents(int pos,void*)
{
    if (iThresParam1<3) iThresParam1=3;
    if (iThresParam1%2!=1) iThresParam1++;
    if (ThresParam2<1) ThresParam2=1;
    ThresParam1=iThresParam1;
    ThresParam2=iThresParam2;
    MDetector.setThresholdParams(ThresParam1,ThresParam2);
//recompute
    MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters);
    TheInputImage.copyTo(TheInputImageCopy);
    for (unsigned int i=0;i<TheMarkers.size();i++)	TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
    //print other rectangles that contains no valid markers
    /*for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
        aruco::Marker m( MDetector.getCandidates()[i],999);
        m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
    }*/

//draw a 3d cube in each marker if there is 3d info
    if (TheCameraParameters.isValid())
        for (unsigned int i=0;i<TheMarkers.size();i++)
            CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);

    cv::imshow("in",TheInputImageCopy);
    cv::imshow("thres",MDetector.getThresholdedImage());
}
void cvTackBarEvents(int pos, void*)
{
    (void)(pos);
    if (iThresParam1 < 3)
        iThresParam1 = 3;
    if (iThresParam1 % 2 != 1)
        iThresParam1++;
    if (ThresParam2 < 1)
        ThresParam2 = 1;
    ThresParam1 = iThresParam1;
    ThresParam2 = iThresParam2;
    TheMarkerDetector.setThresholdParams(ThresParam1, ThresParam2);

    // detect, print, get pose, and print

    // detect
    vector<aruco::Marker> detected_markers = TheMarkerDetector.detect(TheInputImage);
    // print the markers detected that belongs to the markerset
    for (auto idx : TheMarkerMapConfig.getIndices(detected_markers))
        detected_markers[idx].draw(TheInputImageCopy, Scalar(0, 0, 255), 2);
    // detect 3d info if possible
    if (TheMSPoseTracker.isValid())
    {
        TheMSPoseTracker.estimatePose(detected_markers);
        aruco::CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheCameraParameters, TheMSPoseTracker.getRvec(),
                                          TheMSPoseTracker.getTvec(), TheMarkerMapConfig[0].getMarkerSize() * 2);
    }

    cv::imshow("in", TheInputImageCopy);
    cv::imshow("thres", TheMarkerDetector.getThresholdedImage());
}
void vIdle() {
    if (TheCaptureFlag) {
        // capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve(TheInputImage);
        TheUndInputImage.create(TheInputImage.size(), CV_8UC3);
        // by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage, TheInputImage, CV_BGR2RGB);
        // remove distorion in image
        cv::undistort(TheInputImage, TheUndInputImage, TheCameraParams.CameraMatrix,
                      TheCameraParams.Distorsion);
        // detect markers
        MDetector.detect(TheUndInputImage, TheMarkers);
        // Detection of the board
        TheBoardDetected.second = TheBoardDetector.detect(
            TheMarkers, TheBoardConfig, TheBoardDetected.first, TheCameraParams, TheMarkerSize);
        // chekc the speed by calculating the mean speed of all iterations
        // resize the image to the size of the GL window
        cv::resize(TheUndInputImage, TheResizedImage, TheGlWindowSize);
        // create mask. It is a syntetic mask consisting of a simple rectangle, just to show a example of
        // opengl with mask
        TheMask = createSyntheticMask(TheResizedImage); // lets create with the same size of the resized
                                                        // image, i.e. the size of the opengl window
    }
    glutPostRedisplay();
}
int main(int argc,char **argv)
{
	try
	{
		if(argc<3) {cerr<<"Usage: image  boardConfig.yml [cameraParams.yml] [markerSize]  [outImage]"<<endl;exit(0);}
		aruco::CameraParameters CamParam;
		MarkerDetector MDetector;
		vector<aruco::Marker> Markers;
		float MarkerSize=-1;
		BoardConfiguration TheBoardConfig;
		BoardDetector TheBoardDetector;
		Board TheBoardDetected;
		
		cv::Mat InImage=cv::imread(argv[1]);
		TheBoardConfig.readFromFile(argv[2]);
		if (argc>=4) {
		  CamParam.readFromXMLFile(argv[3]);
		  //resizes the parameters to fit the size of the input image
		  CamParam.resize( InImage.size());
		}

		if (argc>=5) 
		  MarkerSize=atof(argv[4]);
		
		cv::namedWindow("in",1);
		MDetector.detect(InImage,Markers);//detect markers without computing R and T information
		//Detection of the board
		float probDetect=TheBoardDetector.detect( Markers, TheBoardConfig,TheBoardDetected, CamParam,MarkerSize);
		
		//for each marker, draw info and its boundaries in the image
		for(unsigned int i=0;i<Markers.size();i++){
			cout<<Markers[i]<<endl;
			Markers[i].draw(InImage,Scalar(0,0,255),2);
		}

		//draw a 3d cube in each marker if there is 3d info
		if (  CamParam.isValid()){
 		  for(unsigned int i=0;i<Markers.size();i++){
 		    CvDrawingUtils::draw3dCube(InImage,Markers[i],CamParam);
 		    CvDrawingUtils::draw3dAxis(InImage,Markers[i],CamParam);
 		  }
		  CvDrawingUtils::draw3dAxis(InImage,TheBoardDetected,CamParam);
		  cout<<TheBoardDetected.Rvec<<" "<<TheBoardDetected.Tvec<<endl;
		}
		//draw board axis
		
		//show input with augmented information
		cv::imshow("in",InImage);
		cv::waitKey(0);//wait for key to be pressed
		if(argc>=6) cv::imwrite(argv[5],InImage);
		
	}catch(std::exception &ex)

	{
		cout<<"Exception :"<<ex.what()<<endl;
	}

}
Exemple #7
0
//This function is called everytime a new image is published
void imageCallback(const sensor_msgs::ImageConstPtr& original_image)
{
    //Convert from the ROS image message to a CvImage suitable for working with OpenCV for processing
    cv_bridge::CvImageConstPtr cv_ptr;
    try
    {
        //Always copy, returning a mutable CvImage
        //OpenCV expects color images to use BGR channel order.
        //cv_ptr = cv_bridge::toCvShare(original_image, "");
        cv_ptr = cv_bridge::toCvCopy(original_image, enc::RGB8);
    }
    catch (cv_bridge::Exception& e)
    {
        //if there is an error during conversion, display it
        ROS_ERROR("main.cpp::cv_bridge exception: %s", e.what());
        return;
    }
    
    try
    {
        MarkerDetector MDetector;
        vector<Marker> Markers;
        //read the input image
        cv::Mat InImage;
        InImage = cv_ptr->image;
		//Ok, let's detect
        MDetector.detect(InImage,Markers);
        //for each marker, draw info and its boundaries in the image
        for (unsigned int i=0;i<Markers.size();i++) {
            //cout<<Markers[i]<<endl;
            Markers[i].draw(InImage,Scalar(0,0,255),2);
        }
        cv::imshow(WINDOW,InImage);
        //Add some delay in milliseconds
        cv::waitKey(3);
    } catch (std::exception &ex)
    {
        ROS_ERROR("main.cpp::aruco exception: %s", ex.what());
    }
    /**
    * The publish() function is how you send messages. The parameter
    * is the message object. The type of this object must agree with the type
    * given as a template parameter to the advertise<>() call, as was done
    * in the constructor in main().
    */
    //Convert the CvImage to a ROS image message and publish it on the "camera/image_processed" topic.
    pub.publish(cv_ptr->toImageMsg());
 }
Exemple #8
0
void cvTackBarEvents(int pos, void*)
{
    (void)(pos);
    if (iThresParam1 < 3)
        iThresParam1 = 3;
    if (iThresParam1 % 2 != 1)
        iThresParam1++;
    if (iThresParam1 < 1)
        iThresParam1 = 1;
    MDetector.setThresholdParams(iThresParam1, iThresParam2);

    if (iEnclosedMarkers){
        auto params=MDetector.getParams();
        params._doErosion=true;
         params._cornerMethod=aruco::MarkerDetector::SUBPIX;
        MDetector.setParams(params);
    }
    else{
        auto params=MDetector.getParams();
        params._doErosion=false;
         params._cornerMethod=aruco::MarkerDetector::LINES;
        MDetector.setParams(params);
    }

    MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. );  // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)

    // recompute
    MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters);
    TheInputImage.copyTo(TheInputImageCopy);
    if (iShowAllCandidates){
        auto candidates=MDetector.getCandidates();
        for(auto cand:candidates)
            Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
    }

    for (unsigned int i = 0; i < TheMarkers.size(); i++)
        TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));

    // draw a 3d cube in each marker if there is 3d info
    if (TheCameraParameters.isValid())
        for (unsigned int i = 0; i < TheMarkers.size(); i++)
            CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);

    cv::imshow("in", resize(TheInputImageCopy, 1280));
    cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));
}
void vIdle()
{
    if (TheCaptureFlag) {
        //capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve( TheInputImage);
        TheUndInputImage.create(TheInputImage.size(),CV_8UC3);
        //transform color that by default is BGR to RGB because windows systems do not allow reading BGR images with opengl properly
        cv::cvtColor(TheInputImage,TheInputImage,CV_BGR2RGB);
        //remove distorion in image
        cv::undistort(TheInputImage,TheUndInputImage, TheCameraParams.CameraMatrix, TheCameraParams.Distorsion);
        //detect markers
        PPDetector.detect(TheUndInputImage,TheMarkers, TheCameraParams.CameraMatrix,Mat(),TheMarkerSize,false);
        //resize the image to the size of the GL window
        cv::resize(TheUndInputImage,TheResizedImage,TheGlWindowSize);
    }
    glutPostRedisplay();
}
Exemple #10
0
/*!
 *  
 */
static void vIdle()
{
  if (capture)
  {
    //capture image
    vCapturer.grab();
    vCapturer.retrieve( inImg);
    undImg.create(inImg.size(),CV_8UC3);
    //transform color that by default is BGR to RGB because windows systems do not allow reading
    //BGR images with opengl properly
//    cv::cvtColor(inImg,inImg,CV_BGR2RGB);
    //remove distorion in image
    cv::undistort(inImg,undImg, camParams.getCamMatrix(), camParams.getDistor());
    //detect markers
    mDetector.detect(undImg,markers, camParams.getCamMatrix(),Mat(),msiz->dval[0]);
    //resize the image to the size of the GL window
    cv::resize(undImg,resImg,glSize);
  }
  glutPostRedisplay();
}
void vIdle()
{
    if (TheCaptureFlag) {
        //capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve( TheInputImage);
        TheUndInputImage.create(TheInputImage.size(),CV_8UC3);
        //by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage,TheInputImage,CV_BGR2RGB);
        //remove distorion in image
        cv::undistort(TheInputImage,TheUndInputImage, TheCameraParams.CameraMatrix,TheCameraParams.Distorsion);
        //detect markers
        MDetector.detect(TheUndInputImage,TheMarkers,TheCameraParams.CameraMatrix,Mat(),TheMarkerSize);
        //Detection of the board
        TheBoardDetected.second=TheBoardDetector.detect( TheMarkers, TheBoardConfig,TheBoardDetected.first, TheCameraParams,TheMarkerSize);
        //chekc the speed by calculating the mean speed of all iterations
        //resize the image to the size of the GL window
        cv::resize(TheUndInputImage,TheResizedImage,TheGlWindowSize);
    }
    glutPostRedisplay();
}
/*!
 *  
 */
static void vIdle()
{
  if (capture)
  {
    //capture image
    vCapturer >> inImg;
    assert(inImg.empty()==false);
    undImg.create(inImg.size(),CV_8UC3);
    //by default, opencv works in BGR, so we must convert to RGB because OpenGL in windows prefer
//    cv::cvtColor(inImg,inImg,CV_BGR2RGB);
    //remove distortion in image
    cv::undistort(inImg,undImg, camParams.getCamMatrix(),camParams.getDistor());
    //detect markers
    mDetector.detect(undImg,markers,camParams.getCamMatrix(),Mat(),msiz->dval[0]);
    //Detection of the board
    board.second=bDetector.detect(markers, boardConfig,board.first, camParams,msiz->dval[0]);
    //check the speed by calculating the mean speed of all iterations
    //resize the image to the size of the GL window
    cv::resize(undImg, resImg, glSize);
  }
  glutPostRedisplay();
}
JNIEXPORT void JNICALL Java_wrapper_MarkerDetector_Jdetect(JNIEnv * env, jobject obj, jlong img, jobject markers, jobject camParams, jfloat markerSizeMeters){
    MarkerDetector *inst = getHandle<MarkerDetector>(env, obj);
        
    // use the Array list
    jclass ArrayList_class = env->FindClass( "java/util/ArrayList" );
    
    jclass Marker_class = env->FindClass("wrapper/Marker");
    
    jmethodID Add_method = env->GetMethodID(ArrayList_class,"add", "(Ljava/lang/Object;)Z");
    jmethodID Get_method = env->GetMethodID(ArrayList_class,"get", "(I)Ljava/lang/Object;");
    
    jmethodID Marker_Constructor_method = env->GetMethodID(Marker_class,"<init>","(J)V");
    jmethodID Marker_CopyConstructor_method = env->GetMethodID(Marker_class,"<init>","(Lwrapper/Marker;)V");
    jmethodID Marker_EmptyConstructor_method = env->GetMethodID(Marker_class,"<init>","()V");
    
    jmethodID Marker_SetNativeHandle_method = env->GetMethodID(Marker_class,"setNativeHandle","(J)V");

    cv::Mat* inMat = (cv::Mat*)img;
    aruco::CameraParameters *camParam = getHandle<CameraParameters>(env, camParams);
    
    std::vector <Marker> vMarkers;

    inst->detect(*inMat, vMarkers, *camParam, markerSizeMeters);
    
    jobject newMarkers[vMarkers.size()];
    
    for (int i=0; i< vMarkers.size(); i++) {   
        Marker * markPtr = &vMarkers[i];
        newMarkers[i] = env->NewObject(Marker_class,Marker_Constructor_method,(long) markPtr);
        env->CallBooleanMethod(markers,Add_method,newMarkers[i]);
        env->DeleteLocalRef(newMarkers[i]);
        }
    
    env->DeleteLocalRef(ArrayList_class);
    env->DeleteLocalRef(Marker_class);
}
Exemple #14
0
int main(int argc, char **argv) {
    try {
        if (argc < 2) {
            cerr << "Usage: (in.jpg|in.avi) [cameraParams.yml] [markerSize] [outImage]" << endl;
            exit(0);
        }


        aruco::CameraParameters CamParam;
        MarkerDetector MDetector;
        vector< Marker > Markers;
        float MarkerSize = -1;
        // read the input image
        cv::Mat InImage;
        // try opening first as video
        VideoCapture vreader(argv[1]);
        if (vreader.isOpened()) {
            vreader.grab();
            vreader.retrieve(InImage);
        } else {
            InImage = cv::imread(argv[1]);
        }
        // at this point, we should have the image in InImage
        // if empty, exit
        if (InImage.total() == 0) {
            cerr << "Could not open input" << endl;
            return 0;
        }

        // read camera parameters if specifed
        if (argc >= 3) {
            CamParam.readFromXMLFile(argv[2]);
            // resizes the parameters to fit the size of the input image
            CamParam.resize(InImage.size());
        }
        // read marker size if specified
        if (argc >= 4)
            MarkerSize = atof(argv[3]);
        cv::namedWindow("in", 1);


        // Ok, let's detect
        MDetector.detect(InImage, Markers, CamParam, MarkerSize);
        // for each marker, draw info and its boundaries in the image
        for (unsigned int i = 0; i < Markers.size(); i++) {
            cout << Markers[i] << endl;
            Markers[i].draw(InImage, Scalar(0, 0, 255), 2);
        }
        // draw a 3d cube in each marker if there is 3d info
        if (CamParam.isValid() && MarkerSize != -1)
            for (unsigned int i = 0; i < Markers.size(); i++) {
                CvDrawingUtils::draw3dCube(InImage, Markers[i], CamParam);
            }
        // show input with augmented information
        cv::imshow("in", InImage);
        // show also the internal image resulting from the threshold operation
        cv::imshow("thes", MDetector.getThresholdedImage());
        cv::waitKey(0); // wait for key to be pressed


        if (argc >= 5)
            cv::imwrite(argv[4], InImage);
    } catch (std::exception &ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
Exemple #15
0
int main(int argc,char **argv)
{
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);


        //Create gui

	MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);

	/*
        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);
	*/
	
        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab() ) // && index <= 50)
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured

            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            //cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
	    
            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
	      if (AllMarkers.count( TheMarkers[i].id ) == 0)
		AllMarkers[TheMarkers[i].id] = map<int,Marker>();
	      AllMarkers[TheMarkers[i].id][index] = TheMarkers[i];
	      
	      cout<<index<<endl;
                cout<<TheMarkers[i]<<endl;
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
            }
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                }
            //DONE! Easy, right?
            cout<<endl<<endl<<endl;
            //show input with augmented information and  the thresholded image
            //cv::imshow("in",TheInputImageCopy);
            //cv::imshow("thres",MDetector.getThresholdedImage());

            //key=cv::waitKey(waitTime);//wait for key to be pressed
        }

	lastFrame = index;

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

    cout << "All done."<< endl;

    map<int, Markers>::const_iterator i;
    for( i = AllMarkers.begin(); i != AllMarkers.end(); ++i ) {
      int markerId = (*i).first;
      map<int, Marker> markers = (*i).second;

      int frameCount = markers.size();

      cout << "frameCount = " << frameCount << endl;

      std::vector<double> x(frameCount);
      std::vector<double> m0x(frameCount);
      std::vector<double> m0y(frameCount);
      std::vector<double> m1x(frameCount);
      std::vector<double> m1y(frameCount);
      std::vector<double> m2x(frameCount);
      std::vector<double> m2y(frameCount);
      std::vector<double> m3x(frameCount);
      std::vector<double> m3y(frameCount);
      std::vector<double> tx(frameCount);
      std::vector<double> ty(frameCount);
      std::vector<double> tz(frameCount);
      std::vector<double> rx(frameCount);
      std::vector<double> ry(frameCount);
      std::vector<double> rz(frameCount);

      map<int, Marker>::const_iterator j;
      int index = 0;
      for( j = markers.begin(); j != markers.end(); ++j, index++ ) {
	int frameIndex = (*j).first;
	Marker marker = (*j).second;

	x[index] = frameIndex;
	m0x[index] = marker[0].x;
	m0y[index] = marker[0].y;
	m1x[index] = marker[1].x;
	m1y[index] = marker[1].y;
	m2x[index] = marker[2].x;
	m2y[index] = marker[2].y;
	m3x[index] = marker[3].x;
	m3y[index] = marker[3].y;
	tx[index] = marker.Tvec.ptr<float>(0)[0];
	ty[index] = marker.Tvec.ptr<float>(0)[1];
	tz[index] = marker.Tvec.ptr<float>(0)[2];
	rx[index] = marker.Rvec.ptr<float>(0)[0];
	ry[index] = marker.Rvec.ptr<float>(0)[1];
	rz[index] = marker.Rvec.ptr<float>(0)[2];
	
	cout << frameIndex << endl;
      }

#define SPLINE(VAR) gsl_spline *spline_ ## VAR = gsl_spline_alloc (gsl_interp_cspline, frameCount); gsl_spline_init (spline_ ## VAR, &x[0], &VAR[0], frameCount)

      SPLINE(m0x);
      SPLINE(m0y);
      SPLINE(m1x);
      SPLINE(m1y);
      SPLINE(m2x);
      SPLINE(m2y);
      SPLINE(m3x);
      SPLINE(m3y);
      SPLINE(tx);
      SPLINE(ty);
      SPLINE(tz);
      SPLINE(rx);
      SPLINE(ry);
      SPLINE(rz);

      for( index = 0; index < lastFrame; index++ ) {

	double m0x = gsl_spline_eval (spline_m0x, index, NULL);
	double m0y = gsl_spline_eval (spline_m0y, index, NULL);
	double m1x = gsl_spline_eval (spline_m1x, index, NULL);
	double m1y = gsl_spline_eval (spline_m1y, index, NULL);
	double m2x = gsl_spline_eval (spline_m2x, index, NULL);
	double m2y = gsl_spline_eval (spline_m2y, index, NULL);
	double m3x = gsl_spline_eval (spline_m3x, index, NULL);
	double m3y = gsl_spline_eval (spline_m3y, index, NULL);
	double tx = gsl_spline_eval (spline_tx, index, NULL);
	double ty = gsl_spline_eval (spline_ty, index, NULL);
	double tz = gsl_spline_eval (spline_tz, index, NULL);
	double rx = gsl_spline_eval (spline_rx, index, NULL);
	double ry = gsl_spline_eval (spline_ry, index, NULL);
	double rz = gsl_spline_eval (spline_rz, index, NULL);

	cv::Point2f m0 = cv::Point2f(m0x,m0y);
	cv::Point2f m1 = cv::Point2f(m1x,m1y);
	cv::Point2f m2 = cv::Point2f(m2x,m2y);
	cv::Point2f m3 = cv::Point2f(m3x,m3y);

	std::vector<cv::Point2f> corners(4);
	corners[0] = m0;
	corners[1] = m1;
	corners[2] = m2;
	corners[3] = m3;
	
	Marker interpolated = Marker(corners, markerId);

	interpolated.Rvec.create(3,1,CV_32FC1);
        interpolated.Tvec.create(3,1,CV_32FC1);
	interpolated.Tvec.at<float>(0,0) = tx;
	interpolated.Tvec.at<float>(1,0) = ty;
	interpolated.Tvec.at<float>(2,0) = tz;
	interpolated.Rvec.at<float>(0,0) = rx;
	interpolated.Rvec.at<float>(1,0) = ry;
	interpolated.Rvec.at<float>(2,0) = rz;

	cout << index << endl;
	cout << interpolated << endl;
      }
      

      
      gsl_spline_free (spline_m0x);
      gsl_spline_free (spline_m0y);
      gsl_spline_free (spline_m1x);
      gsl_spline_free (spline_m1y);
      gsl_spline_free (spline_m2x);
      gsl_spline_free (spline_m2y);
      gsl_spline_free (spline_m3x);
      gsl_spline_free (spline_m3y);
      gsl_spline_free (spline_tx);
      gsl_spline_free (spline_ty);
      gsl_spline_free (spline_tz);
      gsl_spline_free (spline_rx);
      gsl_spline_free (spline_ry);
      gsl_spline_free (spline_rz);

      
      //map<int, Marker>::const_iterator j;
      
      
      
      //cout << "id = " << markerId << endl;

      
    }
    
    //cout << TheFrames << endl;
}
		void image_callback(const sensor_msgs::ImageConstPtr& msg)
		{
			if(!cam_info_received) return;

			cv_bridge::CvImagePtr cv_ptr;
			try
			{
				cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
				inImage = cv_ptr->image;
				resultImg = cv_ptr->image.clone();

				//detection results will go into "markers"
				markers.clear();

				//Ok, let's detect
				double min_size = boards[0].marker_size;
				for (int board_index = 1; board_index < boards.size(); board_index++)
					if (min_size > boards[board_index].marker_size) min_size = boards[board_index].marker_size;
				mDetector.detect(inImage, markers, camParam, min_size, false);


				for (int board_index = 0; board_index < boards.size(); board_index++)
				{
					Board board_detected;

					//Detection of the board
					float probDetect = the_board_detector.detect(markers, boards[board_index].config, board_detected, camParam, boards[board_index].marker_size);
					if (probDetect > 0.0)
					{
						tf::Transform transform = ar_sys::getTf(board_detected.Rvec, board_detected.Tvec);

						tf::StampedTransform stampedTransform(transform, ros::Time::now(), "apollon_cam_right", boards[board_index].name +"_right");

						//_tfBraodcaster.sendTransform(stampedTransform);		// from phillip

						/*geometry_msgs::PoseStamped poseMsg;
						tf::poseTFToMsg(transform, poseMsg.pose);
						poseMsg.header.frame_id = msg->header.frame_id;
						poseMsg.header.stamp = msg->header.stamp;
						pose_pub.publish(poseMsg);*/

						geometry_msgs::TransformStamped transformMsg;
						tf::transformStampedTFToMsg(stampedTransform, transformMsg);
						transform_pub.publish(transformMsg);

						/*geometry_msgs::Vector3Stamped positionMsg;
						positionMsg.header = transformMsg.header;
						positionMsg.vector = transformMsg.transform.translation;
						position_pub.publish(positionMsg);*/

						if(camParam.isValid())
						{
							//draw board axis
							CvDrawingUtils::draw3dAxis(resultImg, board_detected, camParam);
						}
					}
				}

				//for each marker, draw info and its boundaries in the image
				for(size_t i=0; draw_markers && i < markers.size(); ++i)
				{
					markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
				}

				if(camParam.isValid())
				{
					//draw a 3d cube in each marker if there is 3d info
					for(size_t i=0; i<markers.size(); ++i)
					{
						if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
						if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
					}
				}

				if(image_pub.getNumSubscribers() > 0)
				{
					//show input with augmented information
					cv_bridge::CvImage out_msg;
					out_msg.header.frame_id = msg->header.frame_id;
					out_msg.header.stamp = msg->header.stamp;
					out_msg.encoding = sensor_msgs::image_encodings::RGB8;
					out_msg.image = resultImg;
					image_pub.publish(out_msg.toImageMsg());
				}

				if(debug_pub.getNumSubscribers() > 0)
				{
					//show also the internal image resulting from the threshold operation
					cv_bridge::CvImage debug_msg;
					debug_msg.header.frame_id = msg->header.frame_id;
					debug_msg.header.stamp = msg->header.stamp;
					debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
					debug_msg.image = mDetector.getThresholdedImage();
					debug_pub.publish(debug_msg.toImageMsg());
				}
			}
			catch (cv_bridge::Exception& e)
			{
				ROS_ERROR("cv_bridge exception: %s", e.what());
				return;
			}
		}
    void image_callback(const sensor_msgs::ImageConstPtr& msg)
    {
        static tf::TransformBroadcaster br;
        if(cam_info_received)
        {
            cv_bridge::CvImagePtr cv_ptr;
            try
            {
                cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
                inImage = cv_ptr->image;

                //detection results will go into "markers"
                markers.clear();
                //Ok, let's detect
                mDetector.detect(inImage, markers, camParam, marker_size, false);
                //for each marker, draw info and its boundaries in the image
                for(size_t i=0; i<markers.size(); ++i)
                {
                    // only publishing the selected marker
                    if(markers[i].id == marker_id)
                    {
                        tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
                        tf::StampedTransform cameraToReference;
                        cameraToReference.setIdentity();

                        if ( reference_frame != camera_frame )
                        {
                            getTransform(reference_frame,
                                         camera_frame,
                                         cameraToReference);
                        }

                        transform =
                            static_cast<tf::Transform>(cameraToReference)
                            * static_cast<tf::Transform>(rightToLeft)
                            * transform;

                        tf::StampedTransform stampedTransform(transform, ros::Time::now(),
                                                              reference_frame, marker_frame);
                        br.sendTransform(stampedTransform);
                        geometry_msgs::PoseStamped poseMsg;
                        tf::poseTFToMsg(transform, poseMsg.pose);
                        poseMsg.header.frame_id = reference_frame;
                        poseMsg.header.stamp = ros::Time::now();
                        pose_pub.publish(poseMsg);

                        geometry_msgs::TransformStamped transformMsg;
                        tf::transformStampedTFToMsg(stampedTransform, transformMsg);
                        transform_pub.publish(transformMsg);

                        geometry_msgs::Vector3Stamped positionMsg;
                        positionMsg.header = transformMsg.header;
                        positionMsg.vector = transformMsg.transform.translation;
                        position_pub.publish(positionMsg);
                    }
                    // but drawing all the detected markers
                    markers[i].draw(inImage,cv::Scalar(0,0,255),2);
                }

                //draw a 3d cube in each marker if there is 3d info
                if(camParam.isValid() && marker_size!=-1)
                {
                    for(size_t i=0; i<markers.size(); ++i)
                    {
                        CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
                    }
                }

                if(image_pub.getNumSubscribers() > 0)
                {
                    //show input with augmented information
                    cv_bridge::CvImage out_msg;
                    out_msg.header.stamp = ros::Time::now();
                    out_msg.encoding = sensor_msgs::image_encodings::RGB8;
                    out_msg.image = inImage;
                    image_pub.publish(out_msg.toImageMsg());
                }

                if(debug_pub.getNumSubscribers() > 0)
                {
                    //show also the internal image resulting from the threshold operation
                    cv_bridge::CvImage debug_msg;
                    debug_msg.header.stamp = ros::Time::now();
                    debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
                    debug_msg.image = mDetector.getThresholdedImage();
                    debug_pub.publish(debug_msg.toImageMsg());
                }
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR("cv_bridge exception: %s", e.what());
                return;
            }
        }
    }
void getJointPositions(Mat imgOrg, Arm *arm_left, Arm *arm_right, Chest *chest)
{
	// reset found flag from all joints and both arms
	arm_left->resetJ1Found();
	arm_left->resetJ2Found();
	arm_left->resetJ3Found();
	arm_left->resetArmFound();

	arm_right->resetJ1Found();
	arm_right->resetJ2Found();
	arm_right->resetJ3Found();
	arm_right->resetArmFound();

  // Create array of camera parameters
	CameraParameters TheCameraParameters;

	// set camera parameters
	Mat dist(1,5,CV_32FC1);
	dist.at<float>(0,0) = -0.0648763971625288;
	dist.at<float>(0,1) = 0.0612520196884308;
	dist.at<float>(0,2) = 0.0038281538281731;
	dist.at<float>(0,3) = -0.00551104078371959;
	dist.at<float>(0,4) = 0.000000;

	Mat cameraP(3,3,CV_32FC1);
	cameraP.at<float>(0,0) = 558.570339530768;
	cameraP.at<float>(0,1) = 0.000000;
	cameraP.at<float>(0,2) = 308.885375457296;
	cameraP.at<float>(1,0) = 0.000000;
	cameraP.at<float>(1,1) = 556.122943034837;
	cameraP.at<float>(1,2) = 247.600724811385;
	cameraP.at<float>(2,0) = 0.000000;
	cameraP.at<float>(2,1) = 0.000000;
	cameraP.at<float>(2,2) = 1.000000;

	TheCameraParameters.setParams(cameraP, dist, CAMERA_RESOLUTION);
	TheCameraParameters.resize(CAMERA_RESOLUTION);

	// create vectors for joints
  Vector3d j1Left, j2Left, j3Left;
	Vector3d j1Right, j2Right, j3Right;

	// create objects for marker
	MarkerDetector MDetector;
  vector<Marker> Markers;

	// set marker settings
	MDetector.setWarpSize(100);
	MDetector.enableLockedCornersMethod(true);
	MDetector.setMinMaxSize(0.01, 0.5);

	// convert image to gray
  Mat imgGray = imgOrg.clone();
  cvtColor(imgGray, imgGray, CV_BGR2GRAY);

	// detect all markers in the picture
  MDetector.detect(imgGray, Markers, TheCameraParameters);

	// go through all found markers
  for(int i = 0; i < Markers.size(); i++)
  {
			Markers[i].calculateExtrinsics(0.09, TheCameraParameters);
			Markers[i].draw(imgOrg, Scalar(0,0,255), 2);

			// get abs position in picture for every joint
			switch(Markers[i].id)
			{
			case SHOULDER_LEFT:
					// switch x and y axis
					j1Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j1Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j1Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ1Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos SHOULDER_LEFT: " << j1Left(0) << "\t" << j1Left(1) << "\t" << j1Left(2) << endl;
					}
					break;

			case ELBOW_LEFT:
					// switch x and y axis
					j2Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j2Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j2Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ2Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos ELBOW_LEFT: " << j2Left(0) << "\t" << j2Left(1) << "\t" << j2Left(2) << endl;
					}
					break;

			case WRIST_LEFT:
					// switch x and y axis
					j3Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j3Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j3Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ3Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos WRIST_LEFT: " << j3Left(0) << "\t" << j3Left(1) << "\t" << j3Left(2) << endl;
					}
					break;

				case SHOULDER_RIGHT:
						// switch x and y axis
						j1Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j1Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j1Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ1Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos SHOULDER_RIGHT: " << j1Right(0) << "\t" << j1Right(1) << "\t" << j1Right(2) << endl;
						}
						break;

				case ELBOW_RIGHT:
						// switch x and y axis
						j2Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j2Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j2Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ2Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos ELBOW_RIGHT: " << j2Right(0) << "\t" << j2Right(1) << "\t" << j2Right(2) << endl;
						}
						break;

				case WRIST_RIGHT:
						// switch x and y axis
						j3Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j3Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j3Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ3Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos WRIST_RIGHT: " << j3Right(0) << "\t" << j3Right(1) << "\t" << j3Right(2) << endl;
						}
						break;
				}
		}

		// Display camera imape with detected marker
		if(SHOW_ARUCO_FOUND_IMG == ON)
	  		cv::imshow("arruco", imgOrg);


		// set initial position for upper boddy
		if((chest->getInit() == false) && arm_left->getJ1Found() && arm_right->getJ1Found())
		{
				// set bool to true
				chest->setInit();

				// set initial position between shoulders -> both shoulders are needed
				if(arm_left->getJ1Found() && arm_right->getJ1Found())
					chest->setInitPos((-j1Left(2) -j1Right(2))/2);

				if(COUT_CHEST_INIT_POS == ON)
						cout << "chest init pos: " << chest->getInitPos() << endl;
		}


		// check if upper boddy moved
		if(chest->getInit() && (arm_left->getJ1Found() || arm_right->getJ1Found()))
		{
				// reset bool
				chest->resetTorsoMoved();

				// calculate current chest position
				double chestCurPos;
				if(arm_left->getJ1Found() && arm_right->getJ1Found())
						chestCurPos = (-j1Left(2) -j1Right(2))/2;
				else if(arm_left->getJ1Found())
						chestCurPos = -j1Left(2);
				else if(arm_right->getJ1Found())
						chestCurPos = -j1Right(2);

				if(COUT_CHEST_CUR_POS == ON)
						cout << "current pos: "	<< chestCurPos << endl;


				// calculate distance
				double dist = chest->getInitPos() - chestCurPos;

				if(COUT_CHEST_DIST == ON)
					cout << "chest dist: " << dist << endl;

				// check if distance is greater than threshold
				if(abs(dist) > CHEST_DIST_THRESH)
				{
						// set bool: thresh moved
						chest->setTorsoMoved();

						// calculate angle for torso
						chest->setAngle(asin(dist/TORSO_LENGTH));

						if(COUT_CHEST_ANGLE == ON)
							cout << "angle" << chest->getAngle() << endl;
				}
		}


		// calculate relative position for the left arm
		if(arm_left->getJ1Found() && arm_left->getJ2Found() && arm_left->getJ3Found())
		{
				// calculate distance
				Vector3d d1 = j2Left - j1Left;
				Vector3d d2 = j3Left - j1Left;

				// check min dist
				for(int i = 0; i < 3; i++)
				{
						if(abs(d1(i)) < MIN_DIST)
								d1(i) = 0.0;

						if(abs(d2(i)) < MIN_DIST)
								d2(i) = 0.0;
				}

//				cout << "d1 " << d1(0) << "\t" << d1(1) << "\t" << d1(2) << endl;
//				cout << "d2 " << d2(0) << "\t" << d2(1) << "\t" << d2(2) << endl;

				// set relative coordinates
				j1Left(0) = 0;
				j1Left(1) = 0;
				j1Left(2) = 0;

				j2Left(0) = -d1(2);
				j2Left(1) = d1(1);
				j2Left(2) = -d1(0);

				j3Left(0) = -d2(2);
				j3Left(1) = d2(1);
				j3Left(2) = -d2(0);

				// write into arm
				arm_left->setJ1Coord(j1Left);
				arm_left->setJ2Coord(j2Left);
				arm_left->setJ3Coord(j3Left);

				// set bool that all markers for the left arm have been found
				arm_left->setArmFound();

				if(COUT_JOINT_REL_POS == ON)
				{
						cout << "rel pos SHOULDER_LEFT: " 	<< j1Left(0) << "\t\t" 	<< j1Left(1) << "\t\t" 	<< j1Left(2) << endl;
						cout << "rel pos ELBOW_LEFT:    " 	<< j2Left(0) << "\t" 		<< j2Left(1) << "\t" 		<< j2Left(2) << endl;
						cout << "rel pos WRIST_LEFT:    " 	<< j3Left(0) << "\t" 		<< j3Left(1) << "\t" 		<< j3Left(2) << endl;

						cout << endl << endl;
				}
		}


		// calculate relative position for the right arm
		if(arm_right->getJ1Found() && arm_right->getJ2Found() && arm_right->getJ3Found())
		{
				// calculate distance
				Vector3d d1 = j2Right - j1Right;
				Vector3d d2 = j3Right - j1Right;

				// check min dist
				for(int i = 0; i < 3; i++)
				{
						if(abs(d1(i)) < MIN_DIST)
								d1(i) = 0.0;

						if(abs(d2(i)) < MIN_DIST)
								d2(i) = 0.0;
				}

//				cout << "d1 " << d1(0) << "\t" << d1(1) << "\t" << d1(2) << endl;
//				cout << "d2 " << d2(0) << "\t" << d2(1) << "\t" << d2(2) << endl;

				// set relative coordinates
				j1Right(0) = 0;
				j1Right(1) = 0;
				j1Right(2) = 0;

				j2Right(0) = -d1(2);
				j2Right(1) = d1(1);
				j2Right(2) = -d1(0);

				j3Right(0) = -d2(2);
				j3Right(1) = d2(1);
				j3Right(2) = -d2(0);

				// write into arm
				arm_right->setJ1Coord(j1Right);
				arm_right->setJ2Coord(j2Right);
				arm_right->setJ3Coord(j3Right);

				// set bool that all markers for the right arm have been found
				arm_right->setArmFound();

				if(COUT_JOINT_REL_POS == ON)
				{
						cout << "rel pos SHOULDER_RIGHT: " 	<< j1Right(0) << "\t\t" 	<< j1Right(1) << "\t\t" 	<< j1Right(2) << endl;
						cout << "rel pos ELBOW_RIGHT:    " 	<< j2Right(0) << "\t" 		<< j2Right(1) << "\t" 		<< j2Right(2) << endl;
						cout << "rel pos WRIST_RIGHT:    " 	<< j3Right(0) << "\t" 		<< j3Right(1) << "\t" 		<< j3Right(2) << endl;

						cout << endl << endl;
				}
		}
}
int main(int argc, char** argv)
{
    try
    {
        CmdLineParser cml(argc, argv);
        if (argc < 3 || cml["-h"])
        {
            cerr << "Invalid number of arguments" << endl;
            cerr << "Usage: (in.avi|live) marksetconfig.yml  [optional_arguments] \n\t[-c camera_intrinsics.yml] "
                    "\n\t[-s marker_size] \n\t[-pcd out_pcd_file_with_camera_poses] \n\t[-poses out_file_with_poses] "
                    "\n\t[-corner <corner_refinement_method> (0: LINES(default),1 SUBPIX) ][-h]"
                 << endl;
            return false;
        }
        TheMarkerMapConfig.readFromFile(argv[2]);

        TheMarkerMapConfigFile = argv[2];
        TheMarkerSize = stof(cml("-s", "1"));
        // read from camera or from  file
        if (string(argv[1]) == "live")
        {
            TheVideoCapturer.open(0);
        }
        else
            TheVideoCapturer.open(argv[1]);
        // check video is open
        if (!TheVideoCapturer.isOpened())
            throw std::runtime_error("Could not open video");

        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;

        // read camera parameters if passed
        if (cml["-c"])
        {
            TheCameraParameters.readFromXMLFile(cml("-c"));
            TheCameraParameters.resize(TheInputImage.size());
        }
        // prepare the detector
        string dict =
            TheMarkerMapConfig
                .getDictionary();  // see if the dictrionary is already indicated in the configuration file. It should!
        if (dict.empty())
            dict = "ARUCO";
        TheMarkerDetector.setDictionary(
            dict);  /// DO NOT FORGET THAT!!! Otherwise, the ARUCO dictionary will be used by default!
        if (stoi(cml("-corner", "0")) == 0)
            TheMarkerDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        else
        {
            MarkerDetector::Params params = TheMarkerDetector.getParams();
            params._cornerMethod = MarkerDetector::SUBPIX;
            // search corner subpix in a 5x5 widow area
            params._subpix_wsize = static_cast<int>((15.f / 2000.f) * float(TheInputImage.cols));
            TheMarkerDetector.setParams(params);
        }

        // prepare the pose tracker if possible
        // if the camera parameers are avaiable, and the markerset can be expressed in meters, then go

        if (TheMarkerMapConfig.isExpressedInPixels() && TheMarkerSize > 0)
            TheMarkerMapConfig = TheMarkerMapConfig.convertToMeters(TheMarkerSize);
        cout << "TheCameraParameters.isValid()=" << TheCameraParameters.isValid() << " "
             << TheMarkerMapConfig.isExpressedInMeters() << endl;
        if (TheCameraParameters.isValid() && TheMarkerMapConfig.isExpressedInMeters())
            TheMSPoseTracker.setParams(TheCameraParameters, TheMarkerMapConfig);

        // Create gui

        cv::namedWindow("thres", 1);
        cv::namedWindow("in", 1);

        TheMarkerDetector.getThresholdParams(ThresParam1, ThresParam2);
        iThresParam1 = static_cast<int>(ThresParam1);
        iThresParam2 = static_cast<int>(ThresParam2);
        cv::createTrackbar("ThresParam1", "in", &iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
        char key = 0;
        int index = 0;
        // capture until press ESC or until the end of the video
        cout << "Press 's' to start/stop video" << endl;
        do
        {
            TheVideoCapturer.retrieve(TheInputImage);
            TheInputImage.copyTo(TheInputImageCopy);
            index++;  // number of images captured
            // Detection of the board
            vector<aruco::Marker> detected_markers = TheMarkerDetector.detect(TheInputImage);
            // print the markers detected that belongs to the markerset
            for (auto idx : TheMarkerMapConfig.getIndices(detected_markers))
                detected_markers[idx].draw(TheInputImageCopy, Scalar(0, 0, 255), 2);
            // detect 3d info if possible
            if (TheMSPoseTracker.isValid())
            {
                if (TheMSPoseTracker.estimatePose(detected_markers))
                {
                    aruco::CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheCameraParameters,
                                                      TheMSPoseTracker.getRvec(), TheMSPoseTracker.getTvec(),
                                                      TheMarkerMapConfig[0].getMarkerSize() * 2);
                    frame_pose_map.insert(make_pair(index, TheMSPoseTracker.getRTMatrix()));
                    cout << "pose rt=" << TheMSPoseTracker.getRvec() << " " << TheMSPoseTracker.getTvec() << endl;
                }
            }

            // show input with augmented information and  the thresholded image
            cv::imshow("in", TheInputImageCopy);
            cv::imshow("thres", TheMarkerDetector.getThresholdedImage());

            key = cv::waitKey(waitTime);  // wait for key to be pressed
            processKey(key);

        } while (key != 27 && TheVideoCapturer.grab());

        // save a beatiful pcd file (pcl library) showing the results (you can use pcl_viewer to see it)
        if (cml["-pcd"])
        {
            savePCDFile(cml("-pcd"), TheMarkerMapConfig, frame_pose_map);
        }

        // save the poses to a file in tum rgbd data format
        if (cml["-poses"])
        {
            savePosesToFile(cml("-poses"), frame_pose_map);
        }
    }
    catch (std::exception& ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
int main(int argc,char **argv)
{
	
    
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }
        

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);

        //begin copy-paste from http://stackoverflow.com/questions/11550021/converting-a-mat-file-from-matlab-into-cvmat-matrix-in-opencv
		Mat oneVect;
		Mat useVecLat;
		Mat someVects;
		Mat zeroYzero;

		string demoFile  = "demo.yml";

		FileStorage fsDemo( demoFile, FileStorage::READ);
		fsDemo["oneVect"] >> oneVect;
		
		fsDemo["oneVect"] >> useVecLat;
		
		
		fsDemo["oneVect"] >> zeroYzero;

		fsDemo["someVects"] >> someVects;

		
		cout << "Print the contents of oneVect:" << endl;
		cout << oneVect << endl;
		
		
		
		fsDemo.release(); //close the file
		
		
		
		// Declare what you need
		// FileStorage fileOutt("reading_positions.yml", FileStorage::WRITE);
		
		//end copy-paste from http://stackoverflow.com/questions/11550021/converting-a-mat-file-from-matlab-into-cvmat-matrix-in-opencv
		
		
		cout << "an element oneVect:" << endl;
		cout << oneVect.at<float>(0,1) << endl;
		
		// to access the 42 in this YAML:
		
		//oneVect: !!opencv-matrix
		//   rows: 1
		//   cols: 3
		//   dt: f
		//   data: [ 4, 3, 42, 55]
		
		//  do oneVect.at<float>(0,2)


		//end data input



        //Create gui

        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);

        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab())
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured
            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
			cout << "q" ;
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //note that this function outputs the marker info, for some reason.
            
            
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            
            //cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
            
            
            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
				cout<<TheMarkers[i].Tvec.at<float>(0,0)<<","<<
					TheMarkers[i].Tvec.at<float>(0,1)<<","<<
					TheMarkers[i].Tvec.at<float>(0,2)<<",";
				
//                cout<<TheMarkers[i]<<endl;
                if (TheMarkers[i].id == 605 && 1 == 0) { // THIS WILL NEVER HAPPEN!!! 1 is not zero.   
					Mat R33;
					cv::Rodrigues(TheMarkers[i].Rvec,R33);
					cout << R33 << endl;
					cout << TheMarkers[i].id << endl;
					
					for (unsigned int maytr=0;maytr<TheMarkers.size();maytr++) {  //take 0 , 1 , 0, inverse transform first, then transform.
						if (TheMarkers[maytr].id == 500) {
							
							
							zeroYzero.at<float>(0,0) = 0;
							zeroYzero.at<float>(0,1) = 2;
							zeroYzero.at<float>(0,2) = 0;
							zeroYzero.at<float>(0,3) = 0;
							zeroYzero.at<float>(0,4) = 0;
							zeroYzero.at<float>(0,5) = 0;
							
							cout << zeroYzero << endl;
							Mat R33for500;
							cout << TheMarkers[maytr].id<< "food"<< endl;
							cv::Rodrigues(TheMarkers[maytr].Rvec,R33for500);
							cout << R33for500 << endl;
							//cout << TheMarkers[i].id << endl;
							
							//R33for500 * R33.t() * zeroYzero; //transpose method
							
							//Mat afterDouble; 
							//cout << TheMarkers[maytr].id<< "water"<< endl;
							//afterDouble = R33for500 * (R33.inv() * zeroYzero); //inversion method
							//R33for500 * (R33.inv() * zeroYzero); //inversion method
							//cout << "shelll"<< endl;
							//cout << afterDouble << endl;
							//useVecLat.at<float>(0,0) = 0;
							//useVecLat.at<float>(0,1) = 0;
							//useVecLat.at<float>(0,2) = 0;
							//useVecLat.at<float>(0,3) = afterDouble.at<float>(0,0);
							//useVecLat.at<float>(0,4) = afterDouble.at<float>(0,1);
							//useVecLat.at<float>(0,5) = afterDouble.at<float>(0,2);
							
							//drawVecAtPos(TheInputImageCopy,TheMarkers[maytr],TheCameraParameters,afterDouble); //oneVect);
						}
					}
				}
                
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
                
                //time date stuff
				std::time_t result = std::time(NULL); //nullptr);
				std::cout // << std::asctime(std::localtime(&result))
						  << result; // <<  " seconds since the Epoch\n";
						  
				// fileOutt << "time" << result ; //<< endl;

				cout<<endl; // <<endl<<endl;
            }
            
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    //Never use this; just reference // CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    
                    draw3dAxisj(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    drawVecAtPos(TheInputImageCopy,TheMarkers[i],TheCameraParameters,oneVect);
                    drawVecsAtPosTesting(TheInputImageCopy,TheMarkers[i],TheCameraParameters,someVects);
                    						
                }
            //DONE! Easy, right?
            
            
            //show input with augmented information and  the thresholded image
            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",MDetector.getThresholdedImage());

            key=cv::waitKey(waitTime);//wait for key to be pressed
        }
        // fileOutt.release();

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}
int main(int argc,char **argv)
{


	MarkerDetector MDetector;
	        vector<Marker> Markers;
	VideoCapture cap(1); // open the video camera no. 0

	    if (!cap.isOpened())  // if not success, exit program
	    {
	cout << "Cannot open the video cam" << endl;
	        return -1;
	    }

	   double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
	   double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video

	    cout << "Frame size : " << dWidth << " x " << dHeight << endl;

	    namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"

	    while (1)
	    {
	        Mat frame;

	        bool bSuccess = cap.read(frame); // read a new frame from video

	if (!bSuccess) //if not success, break loop
	{
	cout << "Cannot read a frame from video stream" << endl;
	break;
	}

	MDetector.detect(frame,Markers);

	for (unsigned int i=0;i<Markers.size();i++) {
	            cout<<Markers[i]<<endl;
	            Markers[i].draw(frame,Scalar(0,0,255),2);
	        }
	Point2f center[Markers.size()];
	for(int i=0;i<Markers.size();i++)
	{
		float a= (Markers[i][0].x+Markers[i][1].x+Markers[i][2].x+Markers[i][3].x)/4;
		float b= (Markers[i][0].y+Markers[i][1].y+Markers[i][2].y+Markers[i][3].y)/4;
		center[i].x= a;
		center[i].y= b;
		cout<<"tela";
		cout<<a<<","<<b<<endl;
	}


	        imshow("MyVideo", frame); //show the frame in "MyVideo" window

	        if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
	{
	cout << "esc key is pressed by user" << endl;
	break;
	}
	    }
	    return 0;


}
		void image_callback(const sensor_msgs::ImageConstPtr& msg)
		{
            static tf::TransformBroadcaster br;
            
			if(!cam_info_received) return;

			cv_bridge::CvImagePtr cv_ptr;
			try
			{
				cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
				inImage = cv_ptr->image;
				resultImg = cv_ptr->image.clone();

				//detection results will go into "markers"
				markers.clear();
				//Ok, let's detect
				mDetector.detect(inImage, markers, camParam, marker_size, false);
				//Detection of the board
				float probDetect=the_board_detector.detect(markers, the_board_config, the_board_detected, camParam, marker_size);
				if (probDetect > 0.0)
				{
					foundBoard = true; //added///////////////////////
					tf::Transform transform = ar_sys::getTf(the_board_detected.Rvec, the_board_detected.Tvec);

					tf::StampedTransform stampedTransform(transform, msg->header.stamp, msg->header.frame_id, board_frame);
                    
                    			if (publish_tf) 
						br.sendTransform(stampedTransform);

					geometry_msgs::PoseStamped poseMsg;
					tf::poseTFToMsg(transform, poseMsg.pose);
					poseMsg.header.frame_id = msg->header.frame_id;
					poseMsg.header.stamp = msg->header.stamp;
					pose_pub.publish(poseMsg);

					geometry_msgs::TransformStamped transformMsg;
					tf::transformStampedTFToMsg(stampedTransform, transformMsg);
					transform_pub.publish(transformMsg);

					geometry_msgs::Vector3Stamped positionMsg;
					positionMsg.header = transformMsg.header;
					positionMsg.vector = transformMsg.transform.translation;
					position_pub.publish(positionMsg);
					
					std_msgs::Float32 boardSizeMsg;
					boardSizeMsg.data=the_board_detected[0].ssize;
					boardSize_pub.publish(boardSizeMsg);				
					
									
				}
				//ADDED////////////////////////////////////////////////////////////////////////////
				if(rvec_pub.getNumSubscribers() > 0 && foundBoard)
				{
					cv_bridge::CvImage rvecMsg;
					rvecMsg.header.frame_id = msg->header.frame_id;
					rvecMsg.header.stamp = msg->header.stamp;
					rvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
					rvecMsg.image = the_board_detected[0].Rvec;
					rvec_pub.publish(rvecMsg.toImageMsg());
				}
	
				if(tvec_pub.getNumSubscribers() > 0 && foundBoard)
				{
					cv_bridge::CvImage tvecMsg;
					tvecMsg.header.frame_id = msg->header.frame_id;
					tvecMsg.header.stamp = msg->header.stamp;
					tvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
					tvecMsg.image = the_board_detected[0].Tvec;
					tvec_pub.publish(tvecMsg.toImageMsg());
				}
				///////////////////////////////////////////////////////////////////////////////
				
				//for each marker, draw info and its boundaries in the image
				for(size_t i=0; draw_markers && i < markers.size(); ++i)
				{
					
					markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
				}


				if(camParam.isValid() && marker_size != -1)
				{
					//draw a 3d cube in each marker if there is 3d info
					for(size_t i=0; i<markers.size(); ++i)
					{
						if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
						if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
					}
					//draw board axis
					if (probDetect > 0.0)
					{ 
						CvDrawingUtils::draw3dAxis(resultImg, the_board_detected, camParam);
						
					}
				}

				if(image_pub.getNumSubscribers() > 0)
				{
					//show input with augmented information
					cv_bridge::CvImage out_msg;
					out_msg.header.frame_id = msg->header.frame_id;
					out_msg.header.stamp = msg->header.stamp;
					out_msg.encoding = sensor_msgs::image_encodings::RGB8;
					out_msg.image = resultImg;
					image_pub.publish(out_msg.toImageMsg());
				}

				if(debug_pub.getNumSubscribers() > 0)
				{
					//show also the internal image resulting from the threshold operation
					cv_bridge::CvImage debug_msg;
					debug_msg.header.frame_id = msg->header.frame_id;
					debug_msg.header.stamp = msg->header.stamp;
					debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
					debug_msg.image = mDetector.getThresholdedImage();
					debug_pub.publish(debug_msg.toImageMsg());
				}
			}
			catch (cv_bridge::Exception& e)
			{
				ROS_ERROR("cv_bridge exception: %s", e.what());
				return;
			}
		}
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    static tf::TransformBroadcaster br;

    
    if(cam_info_received)
    {
      ros::Time curr_stamp(ros::Time::now());
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect

        mDetector.detect(inImage, markers, camParam, marker_size, false);
		
		//ROS_INFO("x: %0.3f y: %0.3f z: %0.3f", markers[0].Tvec.at<float>(0,0), markers[0].Tvec.at<float>(0,1), markers[0].Tvec.at<float>(0,2));	
		
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker

          //----------------------add all pose messages of markers detected --------------------------------------
          //if(markers[i].id == marker_id)


          if(markers[i].id == 26 || markers[i].id == 35 || markers[i].id == 58 || markers[i].id == 163 || markers[i].id == 640 || markers[i].id == 512 || markers[i].id == 43 || markers[i].id == 291 || markers[i].id == 355)

          {
		  // by Weiwei
			//ROS_INFO("x: %0.3f y: %0.3f z: %0.3f", markers[i].Tvec.at<float>(0,0), markers[i].Tvec.at<float>(0,1), markers[i].Tvec.at<float>(0,2));		
			// 
			
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
			
			

			
            tf::StampedTransform cameraToReference;
            cameraToReference.setIdentity();

            if ( reference_frame != camera_frame )
            {
              getTransform(reference_frame,
                           camera_frame,
                           cameraToReference);
            }

            transform = 
              static_cast<tf::Transform>(cameraToReference) 
              * static_cast<tf::Transform>(rightToLeft) 
              * transform;

            tf::StampedTransform stampedTransform(transform, curr_stamp,
                                                  reference_frame, marker_frame);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
			
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = reference_frame;
            poseMsg.header.stamp = curr_stamp;


            double aruco_roll_, aruco_pitch_, aruco_yaw_;
            tf::Quaternion aruco_quat_;
            tf::quaternionMsgToTF(poseMsg.pose.orientation, aruco_quat_);
            tf::Matrix3x3(aruco_quat_).getRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);

            //ROS_INFO("April Tag RPY: [%0.3f, %0.3f, %0.3f]", aruco_roll_*180/3.1415926, aruco_pitch_*180/3.1415926, aruco_yaw_*180/3.1415926);
//-------------------------unified coordinate systems of pose----------------------------
            aruco_yaw_ = aruco_yaw_*180/3.141592;
            printf("Original [x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n", poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);

            if (markers[i].id == 26 || markers[i].id == 58)
            {
              float PI = 3.1415926;
              float ang = PI*3/4;
              float x_0 = -0.045;//-0.015;
              float y_0 = -0.015;//0.045;
              

              poseMsg.pose.position.x = x_0 + poseMsg.pose.position.x;// + poseMsg.pose.position.x * cos(-ang) - poseMsg.pose.position.y * sin(-ang); 
              poseMsg.pose.position.y = y_0 + poseMsg.pose.position.y;// + poseMsg.pose.position.x * sin(-ang) + poseMsg.pose.position.y * cos(-ang);

              //printf("[x, y] = [%0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y);

              aruco_yaw_ = aruco_yaw_ + ang*180/PI; 
              printf("[x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);

              //printf("-----------unify the coordinate system ---------------------\n"); 
            }

            //printf("------------------------------------------------------------------\n-----------------aruco_yaw_ = %0.3f\n", aruco_yaw_);
            //printf("------------------------------------------------------------------\n");

            double temp_x = poseMsg.pose.position.x;
            double temp_y = poseMsg.pose.position.y;

            poseMsg.pose.position.x = -temp_y;
            poseMsg.pose.position.y = -temp_x;

            tf::Quaternion quat = tf::createQuaternionFromRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);
            
            poseMsg.pose.orientation.x = quat.x();
            poseMsg.pose.orientation.y = quat.y();
            poseMsg.pose.orientation.z = quat.z();
            poseMsg.pose.orientation.w = quat.w();   

            pose_pub.publish(poseMsg);


            geometry_msgs::TransformStamped transformMsg;
            tf::transformStampedTFToMsg(stampedTransform, transformMsg);
            transform_pub.publish(transformMsg);

            geometry_msgs::Vector3Stamped positionMsg;
            positionMsg.header = transformMsg.header;
            positionMsg.vector = transformMsg.transform.translation;
            position_pub.publish(positionMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,cv::Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = curr_stamp;
          out_msg.encoding = sensor_msgs::image_encodings::RGB8;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = curr_stamp;
          debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
Exemple #24
0
int main(int argc, char **argv) {
    try {
        if (readArguments(argc, argv) == false) {
            return 0;
        }
        // parse arguments

        // read from camera or from  file
        if (TheInputVideo.find("live") != string::npos) {
            int vIdx = 0;
            // check if the :idx is here
            char cad[100];
            if (TheInputVideo.find(":") != string::npos) {
                std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
                sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
            }
            cout << "Opening camera index " << vIdx << endl;
            TheVideoCapturer.open(vIdx);
            waitTime = 10;
        } else
            TheVideoCapturer.open(TheInputVideo);
        // check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr << "Could not open video" << endl;
            return -1;
        }
        bool isVideoFile = false;
        if (TheInputVideo.find(".avi") != std::string::npos || TheInputVideo.find("live") != string::npos)
            isVideoFile = true;
        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;

        // read camera parameters if passed
        if (TheIntrinsicFile != "") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        // Configure other parameters
        if (ThePyrDownLevel > 0)
            MDetector.pyrDown(ThePyrDownLevel);


        // Create gui

        cv::namedWindow("thres", 1);
        cv::namedWindow("in", 1);

        MDetector.setThresholdParams(7, 7);
        MDetector.setThresholdParamRange(2, 0);
        // 	MDetector.enableLockedCornersMethod(true);
        //         MDetector.setCornerRefinementMethod ( MarkerDetector::SUBPIX );
        MDetector.getThresholdParams(ThresParam1, ThresParam2);
        iThresParam1 = ThresParam1;
        iThresParam2 = ThresParam2;
        //cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
        //cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);

        char key = 0;
        int index = 0;
        // capture until press ESC or until the end of the video
        TheVideoCapturer.retrieve(TheInputImage);

        cv::Size sz = TheInputImage.size();
        MDetector.createCudaBuffers(sz.width, sz.height);

        do {

            // copy image

            index++; // number of images captured
            double tick = (double)getTickCount(); // for checking the speed
            // Detection of markers in the image passed
            MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters, TheMarkerSize);
            // chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
            AvrgTime.second++;
            cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second << " milliseconds nmarkers=" << TheMarkers.size() << std::flush;

            // print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);

            for (unsigned int i = 0; i < TheMarkers.size(); i++) {
                cout << endl << TheMarkers[i];
                TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255), 1);
            }
            if (TheMarkers.size() != 0)
                cout << endl;
            // print other rectangles that contains no valid markers
            /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                     aruco::Marker m( MDetector.getCandidates()[i],999);
                     m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
                 }*/



            // draw a 3d cube in each marker if there is 3d info
            if (TheCameraParameters.isValid())
                for (unsigned int i = 0; i < TheMarkers.size(); i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                }
            // DONE! Easy, right?
            // show input with augmented information and  the thresholded image
            cv::imshow("in", TheInputImageCopy);
            cv::imshow("thres", MDetector.getThresholdedImage());
            //cv::imshow("thres_gpu", MDetector.getThresholdedImageGPU());

            key = cv::waitKey(waitTime); // wait for key to be pressed
            if (isVideoFile)
                TheVideoCapturer.retrieve(TheInputImage);

        } while (key != 27 && (TheVideoCapturer.grab() || !isVideoFile));

    } catch (std::exception &ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
Exemple #25
0
int main( int argc, char** argv )
{
	MarkerDetector MDetector;
	vector<Marker> markers;
	Point action(-1,-1);

	namedWindow("Ustawienia",CV_WINDOW_NORMAL);
	createTrackbar("Threeshold","Ustawienia",&three,200);
	createTrackbar("Zmienna ","Ustawienia",&zmienna,100);
	createTrackbar("Balans bieli ","Ustawienia",&white,10000);
	createTrackbar("Min area ","Ustawienia",&minArea,100);
			createTrackbar("Max arrea ","Ustawienia",&maxArea,100);


	Game game("lukasz",985,"daniel",838,"25.172.199.151",6121,1280,720);
//	game.server.addPlayer(1,"lukasz");
	//game.server.addPlayer(2,"daniel");
	vector<CardB> bkarty;
	cout<<"Wczytywanie kart"<<endl;
	fstream plik("cards.txt", ios::in );
	string dane;
	while(!plik.eof())
	{
		int id=0,t=0,cost=0,att,def;
		char name[10];
		string name1;
		char src[10];
		getline(plik,dane);
		sscanf(dane.c_str(),"%d %s %d %d %d %d",&id,name,&t,&cost,&att,&def);
		name1=name;
		Type tt;
		if(t==0) tt=CREATURE;
		else tt=LAND;
		Mat a=imread("C:/umk/"+name1+".jpg");
		if(!a.data) {cout<<"Nie znaleziono karty "<< name<<".jpg"<<endl; continue;}
		bkarty.push_back(CardB(a,id,name,Red,tt,att,def,cost));

	}
	cout<<"Wczytalem karty"<<endl;

	VideoCapture capture(0); 
	Mat frame;
	vector<Card> karty;
	capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280 );
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720 );
	capture.set(CV_CAP_PROP_FOCUS, 13 );


	capture.read(frame);
	cout<<"ALA"<<endl;
	while(1)
	{
		capture.read(frame);
		markers.clear();
		MDetector.detect(frame,markers);
		draw_s(markers,frame,game);
		if(frame.data)
		Wykryj_karty(frame,three,karty,bkarty,game);
			if(cv::waitKey(30)==27) break;

	}
		cout<<"ALA"<<endl;
		capture.release();
	cv::waitKey(0);
	return 0;
	while(1)
	{

		markers.clear();

		capture.read(frame);
		MDetector.detect(frame,markers);
		for(int i=0;i<markers.size();i++) 
		{
			markers[i].draw(frame,Scalar(0,0,255),3);
			if(markers[i].id==ACTION)
			{
				if(action.x==-1) 
				{ 
					action=markers[i].getCenter();
				}
				else
				{
					if(odleglos(action,markers[i].getCenter())>=100)
					{
						action=markers[i].getCenter();
						game.nextPhase();
						if(game.phase==ATAK)
						{
							for(int j=0;j<karty.size();j++)
							{
								if(karty[j].owner==game.getCurrentPlayer())
								{
									karty[j].prepareToAttack();
								}
							}
						}
						else if(game.phase==OBRONA)
						{
							for(int j=0;j<karty.size();j++)
							{
								if(!(karty[j].owner==game.getCurrentPlayer()))
								{
									karty[j].prepareToBlock();
								}
							}
						}
						else if(game.phase==WYMIANA)
						{
							for(int j=0;j<karty.size();j++)
							{
								karty[j].Clear();
							}
						}
					}
				}
			}

			if(game.player1.markerId==markers[i].id && game.aPlayer!=game.player1.markerId) {game.aPlayer=markers[i].id; cout<<"Zmiana gracza"<<endl; break;}
			if(game.player2.markerId==markers[i].id && game.aPlayer!=game.player2.markerId) {game.aPlayer=markers[i].id; cout<<"Zmiana gracza"<<endl;break;}
	
		draw_s(markers,frame,game);
		
		}

		Wykryj_karty(frame,three,karty,bkarty,game);

		game.Draw();
		if(cv::waitKey(30)==27) break;
	}
	capture.release();
	cv::waitKey(0);
	return 0;
}
int main(int argc,char **argv)
{
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);


        //Create gui

        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);

        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab())
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured
            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;

            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
                cout<<TheMarkers[i]<<endl;
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
            }
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                }
            //DONE! Easy, right?
            cout<<endl<<endl<<endl;
            //show input with augmented information and  the thresholded image
            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",MDetector.getThresholdedImage());

            key=cv::waitKey(waitTime);//wait for key to be pressed
        }

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}
Exemple #27
0
int main(int argc, char** argv)
{
    try
    {
        CmdLineParser cml(argc, argv);
        if (argc < 2 || cml["-h"])
        {
            cerr << "Invalid number of arguments" << endl;
            cerr << "Usage: (in.avi|live[:camera_index(e.g 0 or 1)]) [-c camera_params.yml] [-s  marker_size_in_meters] [-d "
                    "dictionary:ARUCO by default] [-h]"
                 << endl;
            cerr << "\tDictionaries: ";
            for (auto dict : aruco::Dictionary::getDicTypes())
                cerr << dict << " ";
            cerr << endl;
            cerr << "\t Instead of these, you can directly indicate the path to a file with your own generated "
                    "dictionary"
                 << endl;
            return false;
        }

        ///////////  PARSE ARGUMENTS
        string TheInputVideo = argv[1];
        // read camera parameters if passed
        if (cml["-c"])
            TheCameraParameters.readFromXMLFile(cml("-c"));
        float TheMarkerSize = std::stof(cml("-s", "-1"));
        // aruco::Dictionary::DICT_TYPES  TheDictionary= Dictionary::getTypeFromString( cml("-d","ARUCO") );

        ///////////  OPEN VIDEO
        // read from camera or from  file
        if (TheInputVideo.find("live") != string::npos)
        {
            int vIdx = 0;
            // check if the :idx is here
            char cad[100];
            if (TheInputVideo.find(":") != string::npos)
            {
                std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
                sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
            }
            cout << "Opening camera index " << vIdx << endl;
            TheVideoCapturer.open(vIdx);
            waitTime = 10;
        }
        else
            TheVideoCapturer.open(TheInputVideo);
        // check video is open
        if (!TheVideoCapturer.isOpened())
            throw std::runtime_error("Could not open video");

        ///// CONFIGURE DATA
        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;
        if (TheCameraParameters.isValid())
            TheCameraParameters.resize(TheInputImage.size());
        dictionaryString=cml("-d", "ARUCO");
        MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. );  // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
        MDetector.setThresholdParams(7, 7);
        MDetector.setThresholdParamRange(2, 0);

        // gui requirements : the trackbars to change this parameters
        iThresParam1 = static_cast<int>(MDetector.getParams()._thresParam1);
        iThresParam2 = static_cast<int>(MDetector.getParams()._thresParam2);
        cv::namedWindow("in");
        cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
        cv::createTrackbar("correction_rate", "in", &iCorrectionRate, 10, cvTackBarEvents);
        cv::createTrackbar("EnclosedMarkers", "in", &iEnclosedMarkers, 1, cvTackBarEvents);
        cv::createTrackbar("ShowAllCandidates", "in", &iShowAllCandidates, 1, cvTackBarEvents);

        // go!
        char key = 0;
        int index = 0,indexSave=0;
        // capture until press ESC or until the end of the video
        do
        {
            TheVideoCapturer.retrieve(TheInputImage);
            // copy image
            double tick = (double)getTickCount();  // for checking the speed
            // Detection of markers in the image passed
            TheMarkers = MDetector.detect(TheInputImage, TheCameraParameters, TheMarkerSize);
            // chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
            AvrgTime.second++;
            cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second
                 << " milliseconds nmarkers=" << TheMarkers.size() << std::endl;

            // print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);

            if (iShowAllCandidates){
                auto candidates=MDetector.getCandidates();
                for(auto cand:candidates)
                    Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
            }

            for (unsigned int i = 0; i < TheMarkers.size(); i++)
            {
                cout << TheMarkers[i] << endl;
                TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
            }

            // draw a 3d cube in each marker if there is 3d info
            if (TheCameraParameters.isValid() && TheMarkerSize > 0)
                for (unsigned int i = 0; i < TheMarkers.size(); i++)
                {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                }

            // DONE! Easy, right?
            // show input with augmented information and  the thresholded image
            cv::imshow("in", resize(TheInputImageCopy, 1280));
            cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));

            key = cv::waitKey(waitTime);  // wait for key to be pressed
            if (key == 's')
                waitTime = waitTime == 0 ? 10 : 0;
            if (key == 'w'){//writes current input image
                string number=std::to_string(indexSave++);
                while(number.size()!=3)number+="0";
                string imname="arucoimage"+number+".png";
                cv::imwrite(imname,TheInputImage);
                cout<<"saved "<<imname<<endl;
            }
            index++;  // number of images captured

        } while (key != 27 && (TheVideoCapturer.grab()));
    }
    catch (std::exception& ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
    cout<< "get a Image."<<endl;
    Mat Image;
    geometry_msgs::Vector3 outMsg;

    cv_bridge::CvImagePtr cv_ptr;
    cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    Image = cv_ptr -> image;



    MarkerDetector MDetector;
    vector<Marker> Markers;

    //read the input image
    cv::Mat InImage;
    InImage = Image;

    //Ok, let's detect
    MDetector.detect(InImage, Markers, camParam, MKsize);

    //for each marker, draw info and its boundaries in the image
    for (unsigned int i=0;i<Markers.size();i++)
    {

        //cout<<Markers[i]<<endl;
        //cout<<Markers[i].Rvec<<endl;
        //cout<<Markers[i].id<<endl;
        cout<<Markers[i].Tvec<<endl;
        //cout<<Markers[i].ssize<<endl;
        //cout<<Markers[i][0].x<<" "<<Markers[i][0].y<<endl;
        //cout<<Markers[i][1].x<<" "<<Markers[i][1].y<<endl;
        //cout<<Markers[i][2].x<<" "<<Markers[i][2].y<<endl;
        //cout<<Markers[i][3].x<<" "<<Markers[i][3].y<<endl;

        Markers[i].draw(InImage,Scalar(0,255,255),2);
        CvDrawingUtils::draw3dCube(InImage, Markers[i], camParam);
    }

    numOfMarkersDetected = Markers.size();
    if (numOfMarkersDetected == 0)
    {
        outMsg.x = 0;
        outMsg.y = 0;
        outMsg.z = 0;
        pub.publish(outMsg);
        //return;
    }
    else
    {
        Rodrigues(Markers[0].Rvec, Rmtx);
        pos_tmp = -Rmtx.t()*Markers[0].Tvec;
        posX = pos_tmp.at<float>(0,0);
        posY = pos_tmp.at<float>(1,0);
        posZ = pos_tmp.at<float>(2,0);
        cout<<"posXYZ| "<<posX<<" | "<<posY<<" | "<<posZ<<endl;

        center_x = ( Markers[0][0].x + Markers[0][1].x + Markers[0][2].x + Markers[0][3].x )/4;
        center_y = ( Markers[0][0].y + Markers[0][1].y + Markers[0][2].y + Markers[0][3].y )/4;
        circle(InImage,Point2f(center_x,center_y),3,Scalar(0,255,255),4,8);

        outMsg.x = center_x - 320;
        outMsg.y = center_y - 240;
        outMsg.z = 1.0;
        pub.publish(outMsg);

        cout<<center_x<<" "<<center_y<<endl;

        //return;
    }


    cv::imshow("debug",InImage);

    //wait for key to be pressed
    cv::waitKey(1);
    cout<< "ALL done."<<endl<<endl;

    waitKey(1);
}
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    //        double ticksBefore = cv::getTickCount();
    static tf::TransformBroadcaster br;
    if(cam_info_received)
    {
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect
        mDetector.detect(inImage, markers, camParam, marker_size, false);
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker
          if(markers[i].id == marker_id)
          {
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
            tf::StampedTransform stampedTransform(transform, ros::Time::now(),
                                                  parent_name, child_name);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = parent_name;
            poseMsg.header.stamp = ros::Time::now();
            pose_pub.publish(poseMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            //CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = ros::Time::now();
          out_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = ros::Time::now();
          debug_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }

        //            ROS_INFO("runtime: %f ms",
        //                     1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
  double ticksBefore = cv::getTickCount();
  static tf::TransformBroadcaster br;
  if(cam_info_received)
  {
    cv_bridge::CvImagePtr cv_ptr;
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
      inImage = cv_ptr->image;

      if(normalizeImageIllumination)
      {
        cv::Mat inImageNorm;
        pal_vision_util::dctNormalization(inImage, inImageNorm, dctComponentsToRemove);
        inImage = inImageNorm;
      }

      //detection results will go into "markers"
      markers.clear();
      //Ok, let's detect
      mDetector.detect(inImage, markers, camParam, marker_size);
      //for each marker, draw info and its boundaries in the image
      for(unsigned int i=0; i<markers.size(); ++i)
      {
        // only publishing the selected marker
        if ( markers[i].id == marker_id1 )
        {
          tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
          br.sendTransform(tf::StampedTransform(transform, ros::Time::now(),
                                                parent_name, child_name1));
          geometry_msgs::Pose poseMsg;
          tf::poseTFToMsg(transform, poseMsg);
          pose_pub1.publish(poseMsg);
        }
        else if ( markers[i].id == marker_id2 )
        {
          tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
          br.sendTransform(tf::StampedTransform(transform, ros::Time::now(),
                                                parent_name, child_name2));
          geometry_msgs::Pose poseMsg;
          tf::poseTFToMsg(transform, poseMsg);
          pose_pub2.publish(poseMsg);
        }

        // but drawing all the detected markers
        markers[i].draw(inImage,Scalar(0,0,255),2);
      }

      //paint a circle in the center of the image
      cv::circle(inImage, cv::Point(inImage.cols/2, inImage.rows/2), 4, cv::Scalar(0,255,0), 1);

      if ( markers.size() == 2 )
      {
        float x[2], y[2], u[2], v[2];
        for (unsigned int i = 0; i < 2; ++i)
        {
          ROS_DEBUG_STREAM("Marker(" << i << ") at camera coordinates = ("
                           << markers[i].Tvec.at<float>(0,0) << ", "
                           << markers[i].Tvec.at<float>(1,0) << ", "
                           << markers[i].Tvec.at<float>(2,0));
          //normalized coordinates of the marker
          x[i] = markers[i].Tvec.at<float>(0,0)/markers[i].Tvec.at<float>(2,0);
          y[i] = markers[i].Tvec.at<float>(1,0)/markers[i].Tvec.at<float>(2,0);
          //undistorted pixel
          u[i] = x[i]*camParam.CameraMatrix.at<float>(0,0) +
              camParam.CameraMatrix.at<float>(0,2);
          v[i] = y[i]*camParam.CameraMatrix.at<float>(1,1) +
              camParam.CameraMatrix.at<float>(1,2);
        }

        ROS_DEBUG_STREAM("Mid point between the two markers in the image = ("
                         << (x[0]+x[1])/2 << ", " << (y[0]+y[1])/2 << ")");

        //              //paint a circle in the mid point of the normalized coordinates of both markers
        //              cv::circle(inImage,
        //                         cv::Point((u[0]+u[1])/2, (v[0]+v[1])/2),
        //                         3, cv::Scalar(0,0,255), CV_FILLED);


        //compute the midpoint in 3D:
        float midPoint3D[3]; //3D point
        for (unsigned int i = 0; i < 3; ++i )
          midPoint3D[i] = ( markers[0].Tvec.at<float>(i,0) +
                            markers[1].Tvec.at<float>(i,0) ) / 2;
        //now project the 3D mid point to normalized coordinates
        float midPointNormalized[2];
        midPointNormalized[0] = midPoint3D[0]/midPoint3D[2]; //x
        midPointNormalized[1] = midPoint3D[1]/midPoint3D[2]; //y
        u[0] = midPointNormalized[0]*camParam.CameraMatrix.at<float>(0,0) +
            camParam.CameraMatrix.at<float>(0,2);
        v[0] = midPointNormalized[1]*camParam.CameraMatrix.at<float>(1,1) +
            camParam.CameraMatrix.at<float>(1,2);

        ROS_DEBUG_STREAM("3D Mid point between the two markers in undistorted pixel coordinates = ("
                         << u[0] << ", " << v[0] << ")");

        //paint a circle in the mid point of the normalized coordinates of both markers
        cv::circle(inImage,
                   cv::Point(u[0], v[0]),
                   3, cv::Scalar(0,0,255), CV_FILLED);

      }

      //draw a 3d cube in each marker if there is 3d info
      if(camParam.isValid() && marker_size!=-1)
      {
        for(unsigned int i=0; i<markers.size(); ++i)
        {
          CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
        }
      }

      if(image_pub.getNumSubscribers() > 0)
      {
        //show input with augmented information
        cv_bridge::CvImage out_msg;
        out_msg.header.stamp = ros::Time::now();
        out_msg.encoding = sensor_msgs::image_encodings::RGB8;
        out_msg.image = inImage;
        image_pub.publish(out_msg.toImageMsg());
      }

      if(debug_pub.getNumSubscribers() > 0)
      {
        //show also the internal image resulting from the threshold operation
        cv_bridge::CvImage debug_msg;
        debug_msg.header.stamp = ros::Time::now();
        debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
        debug_msg.image = mDetector.getThresholdedImage();
        debug_pub.publish(debug_msg.toImageMsg());
      }

      ROS_DEBUG("runtime: %f ms",
                1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }
  }
}