예제 #1
0
int main(int argc, char** argv) {
    try {
        if (readArguments(argc, argv) == false)
            return 0;

        // read board configuration
        TheBoardConfig.readFromFile(TheBoardConfigFile);

        // Open video input source
        if (TheInputVideo == "") // read from camera
            TheVideoCapturer.open(0);
        else
            TheVideoCapturer.open(TheInputVideo);
        if (!TheVideoCapturer.isOpened()) {
            cerr << "Could not open video" << endl;
            return -1;
        }

        // read first image
        TheVideoCapturer >> TheInputImage;
        // read camera paramters if passed
        TheCameraParams.readFromXMLFile(TheIntrinsicFile);
        TheCameraParams.resize(TheInputImage.size());

        TheBoardDetector.getMarkerDetector().setThresholdParams(25, 7);

        glutInit(&argc, argv);
        glutInitWindowPosition(0, 0);
        glutInitWindowSize(TheInputImage.size().width, TheInputImage.size().height);
        glutInitDisplayMode(GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE);
        glutCreateWindow("ArUco");
        glutDisplayFunc(vDrawScene);
        glutIdleFunc(vIdle);
        glutReshapeFunc(vResize);
        glutMouseFunc(vMouse);
        glClearColor(0.0, 0.0, 0.0, 1.0);
        glClearDepth(1.0);

        // these two are necesary for the mask effect
        glEnable(GL_ALPHA_TEST);
        glAlphaFunc(GL_GREATER, 0.5);

        TheGlWindowSize = TheInputImage.size();
        vResize(TheGlWindowSize.width, TheGlWindowSize.height);
        glutMainLoop();

    } catch (std::exception& ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
int main(int argc,char **argv)
{
    try
    {
        if (readArguments (argc,argv)==false) return 0;

        //read board configuration
        TheBoardConfig.readFromFile(TheBoardConfigFile);

        //Open video input source
        if (TheInputVideo=="live")  //read from camera
            TheVideoCapturer.open(0);
        else TheVideoCapturer.open(TheInputVideo);
        if (!TheVideoCapturer.isOpened())
        {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image
        TheVideoCapturer>>TheInputImage;
        //read camera paramters if passed
        TheCameraParams.readFromXMLFile(TheIntrinsicFile);
        TheCameraParams.resize( TheInputImage.size());

		//init glut information and init
        glutInit(&argc, argv);
        glutInitWindowPosition( 0, 0);
        glutInitWindowSize(TheInputImage.size().width,TheInputImage.size().height);
        glutInitDisplayMode( GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE );
        glutCreateWindow( "AruCo" );
        glutDisplayFunc( vDrawScene );
        glutIdleFunc( vIdle );
        glutReshapeFunc( vResize );
        glutMouseFunc(vMouse);
        glClearColor( 0.0, 0.0, 0.0, 1.0 );
        glClearDepth( 1.0 );
        TheGlWindowSize=TheInputImage.size();
        vResize(TheGlWindowSize.width,TheGlWindowSize.height);
        glutMainLoop();

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}
예제 #3
0
int main(int argc,char **argv)
{
	try
	{
		if(argc==1) usage();
		readArguments (argc,argv);
		if (TheIntrinsicFile==""){cerr<<"-f option required"<<endl;return -1;}
		if (TheMarkerSize==-1){cerr<<"-s option required"<<endl;return -1;}
								 //read from camera
		if (TheInputVideo=="") TheVideoCapturer.open(0);
		else TheVideoCapturer.open(TheInputVideo);
		if (!TheVideoCapturer.isOpened())
		{
			cerr<<"Could not open video"<<endl;
			return -1;

		}

		//read first image
		TheVideoCapturer>>TheInputImage;
		//read camera paramters if passed
		if (isIntrinsicFileYAML)
		  TheCameraParams.readFromXMLFile(TheIntrinsicFile);
		else
		  TheCameraParams.readFromFile(TheIntrinsicFile);
		TheCameraParams.resize(TheInputImage.size());

		glutInit(&argc, argv);
		glutInitWindowPosition( 0, 0);
		glutInitWindowSize(TheInputImage.size().width,TheInputImage.size().height);
		glutInitDisplayMode( GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE );
		glutCreateWindow( "AruCo" );
		glutDisplayFunc( vDrawScene );
		glutIdleFunc( vIdle );
		glutReshapeFunc( vResize );
		glutMouseFunc(vMouse);
		glClearColor( 0.0, 0.0, 0.0, 1.0 );
		glClearDepth( 1.0 );
		TheGlWindowSize=TheInputImage.size();
		vResize(TheGlWindowSize.width,TheGlWindowSize.height);
		glutMainLoop();

	}catch(std::exception &ex)

	{
		cout<<"Exception :"<<ex.what()<<endl;
	}

}
예제 #4
0
/*!
 *  
 */
int main(int argc,char **argv)
{
  readArguments (argc,argv);

  //read board configuration
  boardConfig.readFromFile(bcon->sval[0]);

  //Open video input source
  if (inp->count==0 || strcmp(inp->sval[0], "live")==0)
  {
    //read from camera
    vCapturer.open(0);

    vCapturer.set(CV_CAP_PROP_FRAME_WIDTH, wid->ival[0]);
    vCapturer.set(CV_CAP_PROP_FRAME_HEIGHT, hei->ival[0]);
    int val = CV_FOURCC('M', 'P', 'E', 'G');
    vCapturer.set(CV_CAP_PROP_FOURCC, val);
  }
  else
    vCapturer.open(inp->sval[0]);

  if (!vCapturer.isOpened())
  {
    std::cerr<<"Could not open video"<<std::endl;
    return -1;
  }

  //read first image
  vCapturer>>inImg;
  //read camera paramters if passed
  camParams.readFromXMLFile(ints->sval[0]);
  camParams.resize( inImg.size());

  glutInit(&argc, argv);
  glutInitWindowPosition( 0, 0);
  glutInitWindowSize(inImg.size().width,inImg.size().height);
  glutInitDisplayMode( GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE );
  glutCreateWindow( "AruCo" );
  glutDisplayFunc( vDrawScene );
  glutIdleFunc( vIdle );
  glutReshapeFunc( vResize );
  glutMouseFunc(vMouse);
  glClearColor( 0.0, 0.0, 0.0, 1.0 );
  glClearDepth( 1.0 );
  glSize=inImg.size();
  vResize(glSize.width,glSize.height);
  glutMainLoop();
}
예제 #5
0
int main(int argc, char** argv)
{
    try
    {
        CmdLineParser cml(argc, argv);
        if (argc < 2 || cml["-h"])
        {
            cerr << "Invalid number of arguments" << endl;
            cerr << "Usage: (in.avi|live[:camera_index(e.g 0 or 1)]) [-c camera_params.yml] [-s  marker_size_in_meters] [-d "
                    "dictionary:ARUCO by default] [-h]"
                 << endl;
            cerr << "\tDictionaries: ";
            for (auto dict : aruco::Dictionary::getDicTypes())
                cerr << dict << " ";
            cerr << endl;
            cerr << "\t Instead of these, you can directly indicate the path to a file with your own generated "
                    "dictionary"
                 << endl;
            return false;
        }

        ///////////  PARSE ARGUMENTS
        string TheInputVideo = argv[1];
        // read camera parameters if passed
        if (cml["-c"])
            TheCameraParameters.readFromXMLFile(cml("-c"));
        float TheMarkerSize = std::stof(cml("-s", "-1"));
        // aruco::Dictionary::DICT_TYPES  TheDictionary= Dictionary::getTypeFromString( cml("-d","ARUCO") );

        ///////////  OPEN VIDEO
        // read from camera or from  file
        if (TheInputVideo.find("live") != string::npos)
        {
            int vIdx = 0;
            // check if the :idx is here
            char cad[100];
            if (TheInputVideo.find(":") != string::npos)
            {
                std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
                sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
            }
            cout << "Opening camera index " << vIdx << endl;
            TheVideoCapturer.open(vIdx);
            waitTime = 10;
        }
        else
            TheVideoCapturer.open(TheInputVideo);
        // check video is open
        if (!TheVideoCapturer.isOpened())
            throw std::runtime_error("Could not open video");

        ///// CONFIGURE DATA
        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;
        if (TheCameraParameters.isValid())
            TheCameraParameters.resize(TheInputImage.size());
        dictionaryString=cml("-d", "ARUCO");
        MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. );  // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
        MDetector.setThresholdParams(7, 7);
        MDetector.setThresholdParamRange(2, 0);

        // gui requirements : the trackbars to change this parameters
        iThresParam1 = static_cast<int>(MDetector.getParams()._thresParam1);
        iThresParam2 = static_cast<int>(MDetector.getParams()._thresParam2);
        cv::namedWindow("in");
        cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
        cv::createTrackbar("correction_rate", "in", &iCorrectionRate, 10, cvTackBarEvents);
        cv::createTrackbar("EnclosedMarkers", "in", &iEnclosedMarkers, 1, cvTackBarEvents);
        cv::createTrackbar("ShowAllCandidates", "in", &iShowAllCandidates, 1, cvTackBarEvents);

        // go!
        char key = 0;
        int index = 0,indexSave=0;
        // capture until press ESC or until the end of the video
        do
        {
            TheVideoCapturer.retrieve(TheInputImage);
            // copy image
            double tick = (double)getTickCount();  // for checking the speed
            // Detection of markers in the image passed
            TheMarkers = MDetector.detect(TheInputImage, TheCameraParameters, TheMarkerSize);
            // chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
            AvrgTime.second++;
            cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second
                 << " milliseconds nmarkers=" << TheMarkers.size() << std::endl;

            // print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);

            if (iShowAllCandidates){
                auto candidates=MDetector.getCandidates();
                for(auto cand:candidates)
                    Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
            }

            for (unsigned int i = 0; i < TheMarkers.size(); i++)
            {
                cout << TheMarkers[i] << endl;
                TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
            }

            // draw a 3d cube in each marker if there is 3d info
            if (TheCameraParameters.isValid() && TheMarkerSize > 0)
                for (unsigned int i = 0; i < TheMarkers.size(); i++)
                {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                }

            // DONE! Easy, right?
            // show input with augmented information and  the thresholded image
            cv::imshow("in", resize(TheInputImageCopy, 1280));
            cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));

            key = cv::waitKey(waitTime);  // wait for key to be pressed
            if (key == 's')
                waitTime = waitTime == 0 ? 10 : 0;
            if (key == 'w'){//writes current input image
                string number=std::to_string(indexSave++);
                while(number.size()!=3)number+="0";
                string imname="arucoimage"+number+".png";
                cv::imwrite(imname,TheInputImage);
                cout<<"saved "<<imname<<endl;
            }
            index++;  // number of images captured

        } while (key != 27 && (TheVideoCapturer.grab()));
    }
    catch (std::exception& ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
예제 #6
0
int main(int argc, char** argv)
{
    try
    {
        CmdLineParser cml(argc, argv);
        if (argc < 3 || cml["-h"])
        {
            cerr << "Invalid number of arguments" << endl;
            cerr << "Usage: (in.avi|live) marksetconfig.yml  [optional_arguments] \n\t[-c camera_intrinsics.yml] "
                    "\n\t[-s marker_size] \n\t[-pcd out_pcd_file_with_camera_poses] \n\t[-poses out_file_with_poses] "
                    "\n\t[-corner <corner_refinement_method> (0: LINES(default),1 SUBPIX) ][-h]"
                 << endl;
            return false;
        }
        TheMarkerMapConfig.readFromFile(argv[2]);

        TheMarkerMapConfigFile = argv[2];
        TheMarkerSize = stof(cml("-s", "1"));
        // read from camera or from  file
        if (string(argv[1]) == "live")
        {
            TheVideoCapturer.open(0);
        }
        else
            TheVideoCapturer.open(argv[1]);
        // check video is open
        if (!TheVideoCapturer.isOpened())
            throw std::runtime_error("Could not open video");

        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;

        // read camera parameters if passed
        if (cml["-c"])
        {
            TheCameraParameters.readFromXMLFile(cml("-c"));
            TheCameraParameters.resize(TheInputImage.size());
        }
        // prepare the detector
        string dict =
            TheMarkerMapConfig
                .getDictionary();  // see if the dictrionary is already indicated in the configuration file. It should!
        if (dict.empty())
            dict = "ARUCO";
        TheMarkerDetector.setDictionary(
            dict);  /// DO NOT FORGET THAT!!! Otherwise, the ARUCO dictionary will be used by default!
        if (stoi(cml("-corner", "0")) == 0)
            TheMarkerDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        else
        {
            MarkerDetector::Params params = TheMarkerDetector.getParams();
            params._cornerMethod = MarkerDetector::SUBPIX;
            // search corner subpix in a 5x5 widow area
            params._subpix_wsize = static_cast<int>((15.f / 2000.f) * float(TheInputImage.cols));
            TheMarkerDetector.setParams(params);
        }

        // prepare the pose tracker if possible
        // if the camera parameers are avaiable, and the markerset can be expressed in meters, then go

        if (TheMarkerMapConfig.isExpressedInPixels() && TheMarkerSize > 0)
            TheMarkerMapConfig = TheMarkerMapConfig.convertToMeters(TheMarkerSize);
        cout << "TheCameraParameters.isValid()=" << TheCameraParameters.isValid() << " "
             << TheMarkerMapConfig.isExpressedInMeters() << endl;
        if (TheCameraParameters.isValid() && TheMarkerMapConfig.isExpressedInMeters())
            TheMSPoseTracker.setParams(TheCameraParameters, TheMarkerMapConfig);

        // Create gui

        cv::namedWindow("thres", 1);
        cv::namedWindow("in", 1);

        TheMarkerDetector.getThresholdParams(ThresParam1, ThresParam2);
        iThresParam1 = static_cast<int>(ThresParam1);
        iThresParam2 = static_cast<int>(ThresParam2);
        cv::createTrackbar("ThresParam1", "in", &iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
        char key = 0;
        int index = 0;
        // capture until press ESC or until the end of the video
        cout << "Press 's' to start/stop video" << endl;
        do
        {
            TheVideoCapturer.retrieve(TheInputImage);
            TheInputImage.copyTo(TheInputImageCopy);
            index++;  // number of images captured
            // Detection of the board
            vector<aruco::Marker> detected_markers = TheMarkerDetector.detect(TheInputImage);
            // print the markers detected that belongs to the markerset
            for (auto idx : TheMarkerMapConfig.getIndices(detected_markers))
                detected_markers[idx].draw(TheInputImageCopy, Scalar(0, 0, 255), 2);
            // detect 3d info if possible
            if (TheMSPoseTracker.isValid())
            {
                if (TheMSPoseTracker.estimatePose(detected_markers))
                {
                    aruco::CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheCameraParameters,
                                                      TheMSPoseTracker.getRvec(), TheMSPoseTracker.getTvec(),
                                                      TheMarkerMapConfig[0].getMarkerSize() * 2);
                    frame_pose_map.insert(make_pair(index, TheMSPoseTracker.getRTMatrix()));
                    cout << "pose rt=" << TheMSPoseTracker.getRvec() << " " << TheMSPoseTracker.getTvec() << endl;
                }
            }

            // show input with augmented information and  the thresholded image
            cv::imshow("in", TheInputImageCopy);
            cv::imshow("thres", TheMarkerDetector.getThresholdedImage());

            key = cv::waitKey(waitTime);  // wait for key to be pressed
            processKey(key);

        } while (key != 27 && TheVideoCapturer.grab());

        // save a beatiful pcd file (pcl library) showing the results (you can use pcl_viewer to see it)
        if (cml["-pcd"])
        {
            savePCDFile(cml("-pcd"), TheMarkerMapConfig, frame_pose_map);
        }

        // save the poses to a file in tum rgbd data format
        if (cml["-poses"])
        {
            savePosesToFile(cml("-poses"), frame_pose_map);
        }
    }
    catch (std::exception& ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
예제 #7
0
void getJointPositions(Mat imgOrg, Arm *arm_left, Arm *arm_right, Chest *chest)
{
	// reset found flag from all joints and both arms
	arm_left->resetJ1Found();
	arm_left->resetJ2Found();
	arm_left->resetJ3Found();
	arm_left->resetArmFound();

	arm_right->resetJ1Found();
	arm_right->resetJ2Found();
	arm_right->resetJ3Found();
	arm_right->resetArmFound();

  // Create array of camera parameters
	CameraParameters TheCameraParameters;

	// set camera parameters
	Mat dist(1,5,CV_32FC1);
	dist.at<float>(0,0) = -0.0648763971625288;
	dist.at<float>(0,1) = 0.0612520196884308;
	dist.at<float>(0,2) = 0.0038281538281731;
	dist.at<float>(0,3) = -0.00551104078371959;
	dist.at<float>(0,4) = 0.000000;

	Mat cameraP(3,3,CV_32FC1);
	cameraP.at<float>(0,0) = 558.570339530768;
	cameraP.at<float>(0,1) = 0.000000;
	cameraP.at<float>(0,2) = 308.885375457296;
	cameraP.at<float>(1,0) = 0.000000;
	cameraP.at<float>(1,1) = 556.122943034837;
	cameraP.at<float>(1,2) = 247.600724811385;
	cameraP.at<float>(2,0) = 0.000000;
	cameraP.at<float>(2,1) = 0.000000;
	cameraP.at<float>(2,2) = 1.000000;

	TheCameraParameters.setParams(cameraP, dist, CAMERA_RESOLUTION);
	TheCameraParameters.resize(CAMERA_RESOLUTION);

	// create vectors for joints
  Vector3d j1Left, j2Left, j3Left;
	Vector3d j1Right, j2Right, j3Right;

	// create objects for marker
	MarkerDetector MDetector;
  vector<Marker> Markers;

	// set marker settings
	MDetector.setWarpSize(100);
	MDetector.enableLockedCornersMethod(true);
	MDetector.setMinMaxSize(0.01, 0.5);

	// convert image to gray
  Mat imgGray = imgOrg.clone();
  cvtColor(imgGray, imgGray, CV_BGR2GRAY);

	// detect all markers in the picture
  MDetector.detect(imgGray, Markers, TheCameraParameters);

	// go through all found markers
  for(int i = 0; i < Markers.size(); i++)
  {
			Markers[i].calculateExtrinsics(0.09, TheCameraParameters);
			Markers[i].draw(imgOrg, Scalar(0,0,255), 2);

			// get abs position in picture for every joint
			switch(Markers[i].id)
			{
			case SHOULDER_LEFT:
					// switch x and y axis
					j1Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j1Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j1Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ1Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos SHOULDER_LEFT: " << j1Left(0) << "\t" << j1Left(1) << "\t" << j1Left(2) << endl;
					}
					break;

			case ELBOW_LEFT:
					// switch x and y axis
					j2Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j2Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j2Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ2Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos ELBOW_LEFT: " << j2Left(0) << "\t" << j2Left(1) << "\t" << j2Left(2) << endl;
					}
					break;

			case WRIST_LEFT:
					// switch x and y axis
					j3Left(0) = Markers[i].Tvec.at<float>(1, 0);
					j3Left(1) = Markers[i].Tvec.at<float>(0, 0);
					j3Left(2) = Markers[i].Tvec.at<float>(2, 0);

					arm_left->setJ3Found();

					if(COUT_JOINT_ABS_POS == ON)
					{
							cout << "abs pos WRIST_LEFT: " << j3Left(0) << "\t" << j3Left(1) << "\t" << j3Left(2) << endl;
					}
					break;

				case SHOULDER_RIGHT:
						// switch x and y axis
						j1Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j1Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j1Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ1Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos SHOULDER_RIGHT: " << j1Right(0) << "\t" << j1Right(1) << "\t" << j1Right(2) << endl;
						}
						break;

				case ELBOW_RIGHT:
						// switch x and y axis
						j2Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j2Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j2Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ2Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos ELBOW_RIGHT: " << j2Right(0) << "\t" << j2Right(1) << "\t" << j2Right(2) << endl;
						}
						break;

				case WRIST_RIGHT:
						// switch x and y axis
						j3Right(0) = Markers[i].Tvec.at<float>(1, 0);
						j3Right(1) = Markers[i].Tvec.at<float>(0, 0);
						j3Right(2) = Markers[i].Tvec.at<float>(2, 0);

						arm_right->setJ3Found();

						if(COUT_JOINT_ABS_POS == ON)
						{
								cout << "abs pos WRIST_RIGHT: " << j3Right(0) << "\t" << j3Right(1) << "\t" << j3Right(2) << endl;
						}
						break;
				}
		}

		// Display camera imape with detected marker
		if(SHOW_ARUCO_FOUND_IMG == ON)
	  		cv::imshow("arruco", imgOrg);


		// set initial position for upper boddy
		if((chest->getInit() == false) && arm_left->getJ1Found() && arm_right->getJ1Found())
		{
				// set bool to true
				chest->setInit();

				// set initial position between shoulders -> both shoulders are needed
				if(arm_left->getJ1Found() && arm_right->getJ1Found())
					chest->setInitPos((-j1Left(2) -j1Right(2))/2);

				if(COUT_CHEST_INIT_POS == ON)
						cout << "chest init pos: " << chest->getInitPos() << endl;
		}


		// check if upper boddy moved
		if(chest->getInit() && (arm_left->getJ1Found() || arm_right->getJ1Found()))
		{
				// reset bool
				chest->resetTorsoMoved();

				// calculate current chest position
				double chestCurPos;
				if(arm_left->getJ1Found() && arm_right->getJ1Found())
						chestCurPos = (-j1Left(2) -j1Right(2))/2;
				else if(arm_left->getJ1Found())
						chestCurPos = -j1Left(2);
				else if(arm_right->getJ1Found())
						chestCurPos = -j1Right(2);

				if(COUT_CHEST_CUR_POS == ON)
						cout << "current pos: "	<< chestCurPos << endl;


				// calculate distance
				double dist = chest->getInitPos() - chestCurPos;

				if(COUT_CHEST_DIST == ON)
					cout << "chest dist: " << dist << endl;

				// check if distance is greater than threshold
				if(abs(dist) > CHEST_DIST_THRESH)
				{
						// set bool: thresh moved
						chest->setTorsoMoved();

						// calculate angle for torso
						chest->setAngle(asin(dist/TORSO_LENGTH));

						if(COUT_CHEST_ANGLE == ON)
							cout << "angle" << chest->getAngle() << endl;
				}
		}


		// calculate relative position for the left arm
		if(arm_left->getJ1Found() && arm_left->getJ2Found() && arm_left->getJ3Found())
		{
				// calculate distance
				Vector3d d1 = j2Left - j1Left;
				Vector3d d2 = j3Left - j1Left;

				// check min dist
				for(int i = 0; i < 3; i++)
				{
						if(abs(d1(i)) < MIN_DIST)
								d1(i) = 0.0;

						if(abs(d2(i)) < MIN_DIST)
								d2(i) = 0.0;
				}

//				cout << "d1 " << d1(0) << "\t" << d1(1) << "\t" << d1(2) << endl;
//				cout << "d2 " << d2(0) << "\t" << d2(1) << "\t" << d2(2) << endl;

				// set relative coordinates
				j1Left(0) = 0;
				j1Left(1) = 0;
				j1Left(2) = 0;

				j2Left(0) = -d1(2);
				j2Left(1) = d1(1);
				j2Left(2) = -d1(0);

				j3Left(0) = -d2(2);
				j3Left(1) = d2(1);
				j3Left(2) = -d2(0);

				// write into arm
				arm_left->setJ1Coord(j1Left);
				arm_left->setJ2Coord(j2Left);
				arm_left->setJ3Coord(j3Left);

				// set bool that all markers for the left arm have been found
				arm_left->setArmFound();

				if(COUT_JOINT_REL_POS == ON)
				{
						cout << "rel pos SHOULDER_LEFT: " 	<< j1Left(0) << "\t\t" 	<< j1Left(1) << "\t\t" 	<< j1Left(2) << endl;
						cout << "rel pos ELBOW_LEFT:    " 	<< j2Left(0) << "\t" 		<< j2Left(1) << "\t" 		<< j2Left(2) << endl;
						cout << "rel pos WRIST_LEFT:    " 	<< j3Left(0) << "\t" 		<< j3Left(1) << "\t" 		<< j3Left(2) << endl;

						cout << endl << endl;
				}
		}


		// calculate relative position for the right arm
		if(arm_right->getJ1Found() && arm_right->getJ2Found() && arm_right->getJ3Found())
		{
				// calculate distance
				Vector3d d1 = j2Right - j1Right;
				Vector3d d2 = j3Right - j1Right;

				// check min dist
				for(int i = 0; i < 3; i++)
				{
						if(abs(d1(i)) < MIN_DIST)
								d1(i) = 0.0;

						if(abs(d2(i)) < MIN_DIST)
								d2(i) = 0.0;
				}

//				cout << "d1 " << d1(0) << "\t" << d1(1) << "\t" << d1(2) << endl;
//				cout << "d2 " << d2(0) << "\t" << d2(1) << "\t" << d2(2) << endl;

				// set relative coordinates
				j1Right(0) = 0;
				j1Right(1) = 0;
				j1Right(2) = 0;

				j2Right(0) = -d1(2);
				j2Right(1) = d1(1);
				j2Right(2) = -d1(0);

				j3Right(0) = -d2(2);
				j3Right(1) = d2(1);
				j3Right(2) = -d2(0);

				// write into arm
				arm_right->setJ1Coord(j1Right);
				arm_right->setJ2Coord(j2Right);
				arm_right->setJ3Coord(j3Right);

				// set bool that all markers for the right arm have been found
				arm_right->setArmFound();

				if(COUT_JOINT_REL_POS == ON)
				{
						cout << "rel pos SHOULDER_RIGHT: " 	<< j1Right(0) << "\t\t" 	<< j1Right(1) << "\t\t" 	<< j1Right(2) << endl;
						cout << "rel pos ELBOW_RIGHT:    " 	<< j2Right(0) << "\t" 		<< j2Right(1) << "\t" 		<< j2Right(2) << endl;
						cout << "rel pos WRIST_RIGHT:    " 	<< j3Right(0) << "\t" 		<< j3Right(1) << "\t" 		<< j3Right(2) << endl;

						cout << endl << endl;
				}
		}
}
int main(int argc,char **argv)
{
	
    
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }
        

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);

        //begin copy-paste from http://stackoverflow.com/questions/11550021/converting-a-mat-file-from-matlab-into-cvmat-matrix-in-opencv
		Mat oneVect;
		Mat useVecLat;
		Mat someVects;
		Mat zeroYzero;

		string demoFile  = "demo.yml";

		FileStorage fsDemo( demoFile, FileStorage::READ);
		fsDemo["oneVect"] >> oneVect;
		
		fsDemo["oneVect"] >> useVecLat;
		
		
		fsDemo["oneVect"] >> zeroYzero;

		fsDemo["someVects"] >> someVects;

		
		cout << "Print the contents of oneVect:" << endl;
		cout << oneVect << endl;
		
		
		
		fsDemo.release(); //close the file
		
		
		
		// Declare what you need
		// FileStorage fileOutt("reading_positions.yml", FileStorage::WRITE);
		
		//end copy-paste from http://stackoverflow.com/questions/11550021/converting-a-mat-file-from-matlab-into-cvmat-matrix-in-opencv
		
		
		cout << "an element oneVect:" << endl;
		cout << oneVect.at<float>(0,1) << endl;
		
		// to access the 42 in this YAML:
		
		//oneVect: !!opencv-matrix
		//   rows: 1
		//   cols: 3
		//   dt: f
		//   data: [ 4, 3, 42, 55]
		
		//  do oneVect.at<float>(0,2)


		//end data input



        //Create gui

        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);

        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab())
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured
            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
			cout << "q" ;
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //note that this function outputs the marker info, for some reason.
            
            
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            
            //cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
            
            
            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
				cout<<TheMarkers[i].Tvec.at<float>(0,0)<<","<<
					TheMarkers[i].Tvec.at<float>(0,1)<<","<<
					TheMarkers[i].Tvec.at<float>(0,2)<<",";
				
//                cout<<TheMarkers[i]<<endl;
                if (TheMarkers[i].id == 605 && 1 == 0) { // THIS WILL NEVER HAPPEN!!! 1 is not zero.   
					Mat R33;
					cv::Rodrigues(TheMarkers[i].Rvec,R33);
					cout << R33 << endl;
					cout << TheMarkers[i].id << endl;
					
					for (unsigned int maytr=0;maytr<TheMarkers.size();maytr++) {  //take 0 , 1 , 0, inverse transform first, then transform.
						if (TheMarkers[maytr].id == 500) {
							
							
							zeroYzero.at<float>(0,0) = 0;
							zeroYzero.at<float>(0,1) = 2;
							zeroYzero.at<float>(0,2) = 0;
							zeroYzero.at<float>(0,3) = 0;
							zeroYzero.at<float>(0,4) = 0;
							zeroYzero.at<float>(0,5) = 0;
							
							cout << zeroYzero << endl;
							Mat R33for500;
							cout << TheMarkers[maytr].id<< "food"<< endl;
							cv::Rodrigues(TheMarkers[maytr].Rvec,R33for500);
							cout << R33for500 << endl;
							//cout << TheMarkers[i].id << endl;
							
							//R33for500 * R33.t() * zeroYzero; //transpose method
							
							//Mat afterDouble; 
							//cout << TheMarkers[maytr].id<< "water"<< endl;
							//afterDouble = R33for500 * (R33.inv() * zeroYzero); //inversion method
							//R33for500 * (R33.inv() * zeroYzero); //inversion method
							//cout << "shelll"<< endl;
							//cout << afterDouble << endl;
							//useVecLat.at<float>(0,0) = 0;
							//useVecLat.at<float>(0,1) = 0;
							//useVecLat.at<float>(0,2) = 0;
							//useVecLat.at<float>(0,3) = afterDouble.at<float>(0,0);
							//useVecLat.at<float>(0,4) = afterDouble.at<float>(0,1);
							//useVecLat.at<float>(0,5) = afterDouble.at<float>(0,2);
							
							//drawVecAtPos(TheInputImageCopy,TheMarkers[maytr],TheCameraParameters,afterDouble); //oneVect);
						}
					}
				}
                
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
                
                //time date stuff
				std::time_t result = std::time(NULL); //nullptr);
				std::cout // << std::asctime(std::localtime(&result))
						  << result; // <<  " seconds since the Epoch\n";
						  
				// fileOutt << "time" << result ; //<< endl;

				cout<<endl; // <<endl<<endl;
            }
            
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    //Never use this; just reference // CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    
                    draw3dAxisj(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    drawVecAtPos(TheInputImageCopy,TheMarkers[i],TheCameraParameters,oneVect);
                    drawVecsAtPosTesting(TheInputImageCopy,TheMarkers[i],TheCameraParameters,someVects);
                    						
                }
            //DONE! Easy, right?
            
            
            //show input with augmented information and  the thresholded image
            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",MDetector.getThresholdedImage());

            key=cv::waitKey(waitTime);//wait for key to be pressed
        }
        // fileOutt.release();

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}
int main(int argc,char **argv)
{
    try
    {
        if (  readArguments (argc,argv)==false) return 0;
//parse arguments
        TheBoardConfig.readFromFile(TheBoardConfigFile);
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //Open outputvideo
        if ( TheOutVideoFilePath!="")
            VWriter.open(TheOutVideoFilePath,CV_FOURCC('M','J','P','G'),15,TheInputImage.size());

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }

        //Create gui

        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
	TheBoardDetector.setParams(TheBoardConfig,TheCameraParameters,TheMarkerSize);
	TheBoardDetector.getMarkerDetector().getThresholdParams( ThresParam1,ThresParam2);
	TheBoardDetector.getMarkerDetector().enableErosion(true);//for chessboards
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);
        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab())
        {
            TheVideoCapturer.retrieve( TheInputImage);
            TheInputImage.copyTo(TheInputImageCopy);
            index++; //number of images captured
            double tick = (double)getTickCount();//for checking the speed
            //Detection of the board
            float probDetect=TheBoardDetector.detect(TheInputImage);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
            //print marker borders
            for (unsigned int i=0;i<TheBoardDetector.getDetectedMarkers().size();i++)
                TheBoardDetector.getDetectedMarkers()[i].draw(TheInputImageCopy,Scalar(0,0,255),1);

            //print board
             if (TheCameraParameters.isValid()) {
                if ( probDetect>0.2)   {
                    CvDrawingUtils::draw3dAxis( TheInputImageCopy,TheBoardDetector.getDetectedBoard(),TheCameraParameters);
                    //draw3dBoardCube( TheInputImageCopy,TheBoardDetected,TheIntriscCameraMatrix,TheDistorsionCameraParams);
                }
            }
            //DONE! Easy, right?

            //show input with augmented information and  the thresholded image
            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",TheBoardDetector.getMarkerDetector().getThresholdedImage());
            //write to video if required
            if (  TheOutVideoFilePath!="") {
                //create a beautiful compiosed image showing the thresholded
                //first create a small version of the thresholded image
                cv::Mat smallThres;
                cv::resize( TheBoardDetector.getMarkerDetector().getThresholdedImage(),smallThres,cvSize(TheInputImageCopy.cols/3,TheInputImageCopy.rows/3));
                cv::Mat small3C;
                cv::cvtColor(smallThres,small3C,CV_GRAY2BGR);
                cv::Mat roi=TheInputImageCopy(cv::Rect(0,0,TheInputImageCopy.cols/3,TheInputImageCopy.rows/3));
                small3C.copyTo(roi);
                VWriter<<TheInputImageCopy;
// 			 cv::imshow("TheInputImageCopy",TheInputImageCopy);

            }

            key=cv::waitKey(waitTime);//wait for key to be pressed
            processKey(key);
        }


    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}
예제 #10
0
int main(int argc, char **argv) {
    try {
        if (readArguments(argc, argv) == false) {
            return 0;
        }
        // parse arguments

        // read from camera or from  file
        if (TheInputVideo.find("live") != string::npos) {
            int vIdx = 0;
            // check if the :idx is here
            char cad[100];
            if (TheInputVideo.find(":") != string::npos) {
                std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
                sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
            }
            cout << "Opening camera index " << vIdx << endl;
            TheVideoCapturer.open(vIdx);
            waitTime = 10;
        } else
            TheVideoCapturer.open(TheInputVideo);
        // check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr << "Could not open video" << endl;
            return -1;
        }
        bool isVideoFile = false;
        if (TheInputVideo.find(".avi") != std::string::npos || TheInputVideo.find("live") != string::npos)
            isVideoFile = true;
        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;

        // read camera parameters if passed
        if (TheIntrinsicFile != "") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        // Configure other parameters
        if (ThePyrDownLevel > 0)
            MDetector.pyrDown(ThePyrDownLevel);


        // Create gui

        cv::namedWindow("thres", 1);
        cv::namedWindow("in", 1);

        MDetector.setThresholdParams(7, 7);
        MDetector.setThresholdParamRange(2, 0);
        // 	MDetector.enableLockedCornersMethod(true);
        //         MDetector.setCornerRefinementMethod ( MarkerDetector::SUBPIX );
        MDetector.getThresholdParams(ThresParam1, ThresParam2);
        iThresParam1 = ThresParam1;
        iThresParam2 = ThresParam2;
        //cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
        //cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);

        char key = 0;
        int index = 0;
        // capture until press ESC or until the end of the video
        TheVideoCapturer.retrieve(TheInputImage);

        cv::Size sz = TheInputImage.size();
        MDetector.createCudaBuffers(sz.width, sz.height);

        do {

            // copy image

            index++; // number of images captured
            double tick = (double)getTickCount(); // for checking the speed
            // Detection of markers in the image passed
            MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters, TheMarkerSize);
            // chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
            AvrgTime.second++;
            cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second << " milliseconds nmarkers=" << TheMarkers.size() << std::flush;

            // print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);

            for (unsigned int i = 0; i < TheMarkers.size(); i++) {
                cout << endl << TheMarkers[i];
                TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255), 1);
            }
            if (TheMarkers.size() != 0)
                cout << endl;
            // print other rectangles that contains no valid markers
            /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                     aruco::Marker m( MDetector.getCandidates()[i],999);
                     m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
                 }*/



            // draw a 3d cube in each marker if there is 3d info
            if (TheCameraParameters.isValid())
                for (unsigned int i = 0; i < TheMarkers.size(); i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                }
            // DONE! Easy, right?
            // show input with augmented information and  the thresholded image
            cv::imshow("in", TheInputImageCopy);
            cv::imshow("thres", MDetector.getThresholdedImage());
            //cv::imshow("thres_gpu", MDetector.getThresholdedImageGPU());

            key = cv::waitKey(waitTime); // wait for key to be pressed
            if (isVideoFile)
                TheVideoCapturer.retrieve(TheInputImage);

        } while (key != 27 && (TheVideoCapturer.grab() || !isVideoFile));

    } catch (std::exception &ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
예제 #11
0
int main(int argc,char **argv)
{
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);


        //Create gui

	MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);

	/*
        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);
	*/
	
        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab() ) // && index <= 50)
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured

            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            //cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
	    
            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
	      if (AllMarkers.count( TheMarkers[i].id ) == 0)
		AllMarkers[TheMarkers[i].id] = map<int,Marker>();
	      AllMarkers[TheMarkers[i].id][index] = TheMarkers[i];
	      
	      cout<<index<<endl;
                cout<<TheMarkers[i]<<endl;
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
            }
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                }
            //DONE! Easy, right?
            cout<<endl<<endl<<endl;
            //show input with augmented information and  the thresholded image
            //cv::imshow("in",TheInputImageCopy);
            //cv::imshow("thres",MDetector.getThresholdedImage());

            //key=cv::waitKey(waitTime);//wait for key to be pressed
        }

	lastFrame = index;

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

    cout << "All done."<< endl;

    map<int, Markers>::const_iterator i;
    for( i = AllMarkers.begin(); i != AllMarkers.end(); ++i ) {
      int markerId = (*i).first;
      map<int, Marker> markers = (*i).second;

      int frameCount = markers.size();

      cout << "frameCount = " << frameCount << endl;

      std::vector<double> x(frameCount);
      std::vector<double> m0x(frameCount);
      std::vector<double> m0y(frameCount);
      std::vector<double> m1x(frameCount);
      std::vector<double> m1y(frameCount);
      std::vector<double> m2x(frameCount);
      std::vector<double> m2y(frameCount);
      std::vector<double> m3x(frameCount);
      std::vector<double> m3y(frameCount);
      std::vector<double> tx(frameCount);
      std::vector<double> ty(frameCount);
      std::vector<double> tz(frameCount);
      std::vector<double> rx(frameCount);
      std::vector<double> ry(frameCount);
      std::vector<double> rz(frameCount);

      map<int, Marker>::const_iterator j;
      int index = 0;
      for( j = markers.begin(); j != markers.end(); ++j, index++ ) {
	int frameIndex = (*j).first;
	Marker marker = (*j).second;

	x[index] = frameIndex;
	m0x[index] = marker[0].x;
	m0y[index] = marker[0].y;
	m1x[index] = marker[1].x;
	m1y[index] = marker[1].y;
	m2x[index] = marker[2].x;
	m2y[index] = marker[2].y;
	m3x[index] = marker[3].x;
	m3y[index] = marker[3].y;
	tx[index] = marker.Tvec.ptr<float>(0)[0];
	ty[index] = marker.Tvec.ptr<float>(0)[1];
	tz[index] = marker.Tvec.ptr<float>(0)[2];
	rx[index] = marker.Rvec.ptr<float>(0)[0];
	ry[index] = marker.Rvec.ptr<float>(0)[1];
	rz[index] = marker.Rvec.ptr<float>(0)[2];
	
	cout << frameIndex << endl;
      }

#define SPLINE(VAR) gsl_spline *spline_ ## VAR = gsl_spline_alloc (gsl_interp_cspline, frameCount); gsl_spline_init (spline_ ## VAR, &x[0], &VAR[0], frameCount)

      SPLINE(m0x);
      SPLINE(m0y);
      SPLINE(m1x);
      SPLINE(m1y);
      SPLINE(m2x);
      SPLINE(m2y);
      SPLINE(m3x);
      SPLINE(m3y);
      SPLINE(tx);
      SPLINE(ty);
      SPLINE(tz);
      SPLINE(rx);
      SPLINE(ry);
      SPLINE(rz);

      for( index = 0; index < lastFrame; index++ ) {

	double m0x = gsl_spline_eval (spline_m0x, index, NULL);
	double m0y = gsl_spline_eval (spline_m0y, index, NULL);
	double m1x = gsl_spline_eval (spline_m1x, index, NULL);
	double m1y = gsl_spline_eval (spline_m1y, index, NULL);
	double m2x = gsl_spline_eval (spline_m2x, index, NULL);
	double m2y = gsl_spline_eval (spline_m2y, index, NULL);
	double m3x = gsl_spline_eval (spline_m3x, index, NULL);
	double m3y = gsl_spline_eval (spline_m3y, index, NULL);
	double tx = gsl_spline_eval (spline_tx, index, NULL);
	double ty = gsl_spline_eval (spline_ty, index, NULL);
	double tz = gsl_spline_eval (spline_tz, index, NULL);
	double rx = gsl_spline_eval (spline_rx, index, NULL);
	double ry = gsl_spline_eval (spline_ry, index, NULL);
	double rz = gsl_spline_eval (spline_rz, index, NULL);

	cv::Point2f m0 = cv::Point2f(m0x,m0y);
	cv::Point2f m1 = cv::Point2f(m1x,m1y);
	cv::Point2f m2 = cv::Point2f(m2x,m2y);
	cv::Point2f m3 = cv::Point2f(m3x,m3y);

	std::vector<cv::Point2f> corners(4);
	corners[0] = m0;
	corners[1] = m1;
	corners[2] = m2;
	corners[3] = m3;
	
	Marker interpolated = Marker(corners, markerId);

	interpolated.Rvec.create(3,1,CV_32FC1);
        interpolated.Tvec.create(3,1,CV_32FC1);
	interpolated.Tvec.at<float>(0,0) = tx;
	interpolated.Tvec.at<float>(1,0) = ty;
	interpolated.Tvec.at<float>(2,0) = tz;
	interpolated.Rvec.at<float>(0,0) = rx;
	interpolated.Rvec.at<float>(1,0) = ry;
	interpolated.Rvec.at<float>(2,0) = rz;

	cout << index << endl;
	cout << interpolated << endl;
      }
      

      
      gsl_spline_free (spline_m0x);
      gsl_spline_free (spline_m0y);
      gsl_spline_free (spline_m1x);
      gsl_spline_free (spline_m1y);
      gsl_spline_free (spline_m2x);
      gsl_spline_free (spline_m2y);
      gsl_spline_free (spline_m3x);
      gsl_spline_free (spline_m3y);
      gsl_spline_free (spline_tx);
      gsl_spline_free (spline_ty);
      gsl_spline_free (spline_tz);
      gsl_spline_free (spline_rx);
      gsl_spline_free (spline_ry);
      gsl_spline_free (spline_rz);

      
      //map<int, Marker>::const_iterator j;
      
      
      
      //cout << "id = " << markerId << endl;

      
    }
    
    //cout << TheFrames << endl;
}
int main(int argc,char **argv)
{
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);


        //Create gui

        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);

        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab())
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured
            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;

            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
                cout<<TheMarkers[i]<<endl;
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
            }
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                }
            //DONE! Easy, right?
            cout<<endl<<endl<<endl;
            //show input with augmented information and  the thresholded image
            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",MDetector.getThresholdedImage());

            key=cv::waitKey(waitTime);//wait for key to be pressed
        }

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}