bool update(void) {
		bool result;
		if(useVideo == 0) {
			result = cap.grab();
			cap.retrieve(rgbImg, CV_CAP_OPENNI_BGR_IMAGE);
			cap.retrieve(depthMap, CV_CAP_OPENNI_DEPTH_MAP);
		}
		else if(useVideo == 1) {
			result = cap.grab();
			cap.retrieve(rgbImg);
			depthMap = Mat();
		}
		else if(useVideo == 2) {
			result = (cap.grab() && cap1.grab());
			if(result) {
				cap.retrieve(rgbImg);
				cap1.retrieve(depthImg);
				for(int i=0; i<depthImg.rows; i++) {
					for(int j=0; j<depthImg.cols; j++) {
						depthMap.at<ushort>(i,j) = (int(depthImg.at<Vec3b>(i,j)[0])*256 + int(depthImg.at<Vec3b>(i,j)[1]));
					}
				}
			}
		}
		return result;
	}
Example #2
0
int grab_frame(VideoCapture& capture, Mat& img, char* filename) {

    // camera/image setup
    if (!cv_init_) {
        if (filename != NULL) {
            capture.open(filename);
        } else {
            float scale = 0.52;//0.606;
            int w = 640 * scale;
            int h = 480 * scale;
            capture.open(0); //try to open
            capture.set(CV_CAP_PROP_FRAME_WIDTH, w);  capture.set(CV_CAP_PROP_FRAME_HEIGHT, h);
        }
        if (!capture.isOpened()) { cerr << "open video device fail\n" << endl; return 0; }
        capture.grab();
        capture.retrieve(img);
        if (img.empty()) { cout << "load image fail " << endl; return 0; }
        printf(" img = %d x %d \n", img.rows, img.cols);
        cv_init_ = 1;
    }

    // get frames
    capture.grab();
    capture.retrieve(img);
    imshow("cam", img);

    if (waitKey(10) >= 0) { return 0; }
    else { return 1; }
}
int main(int argc, char* argv[])
{
  VideoCapture cap; //
  cap.open("http://192.168.1.139:1234/?action=stream");
  if (!cap.isOpened())  // if not success, exit program
  {
    cout << "Cannot open the video cam" << endl;
    return -1;
  }

  clientSocket = socket(PF_INET, SOCK_DGRAM, 0);
  serverAddr.sin_family = AF_INET;
  serverAddr.sin_port = htons(33333);
  serverAddr.sin_addr.s_addr = inet_addr("192.168.1.139");
  memset(serverAddr.sin_zero, '\0', sizeof serverAddr.sin_zero);
  addr_size = sizeof serverAddr;

  while (1)
  {
    cap.grab();
    cap.retrieve(frame);
    adjusted_color_img = brightness_and_contrast(frame);
    color_detection(adjusted_color_img);
    imshow("view", frame);
    waitKey(20);
    usleep(2000);
  }
}
int main(int argc, char *argv[]){

  ros::init(argc,argv,"aruco_test");
  ros::NodeHandle nh;

  namedWindow("detection_result");
  startWindowThread();
  namedWindow("board");
  startWindowThread();

  /*  Mat distCoeffs = (Mat_<float>(1,5) << 0.182276,-0.533582,0.000520,-0.001682,0.000000);
  Mat camMatrix  = (Mat_<float>(3,3) << 
		    743.023418,0.000000,329.117496,
		    0.000000,745.126083,235.748102,
		    0.000000,0.000000,1.000000);*/

  
  VideoCapture input;
  input.open(0);
  Mat boardImg;
  createBoard(boardImg);

  imshow("board",boardImg);


  while(input.grab() && ros::ok()){
    Mat image,result;
    input.retrieve(image);
    process(image,result);
    imshow("detection_result",result);
  } 
  return 0;
}
Example #5
0
int getDepthImage(VideoCapture capture, Mat &depth_image){

	//depth image

	if( !capture.grab() )
	{
		cout << "Can not grab images." << endl;
		return -1;
	}
	else
	{

		if( capture.retrieve( depth_image, CAP_OPENNI_DEPTH_MAP ) )
		{
			const float scaleFactor = 0.05f;
			//Mat show;
			depth_image.convertTo( depth_image, CV_8UC1, scaleFactor );

			//imshow( "depth map", depth_image );

		}

	}

	return 0;
}
Example #6
0
    JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJ
    (JNIEnv* env, jclass, jlong self, jlong image_nativeObj)
    {
        try {
#ifdef DEBUG
            LOGD("highgui::VideoCapture_n_1retrieve__JJ()");
#endif // DEBUG
            VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL
            Mat& image = *((Mat*)image_nativeObj);
            bool _retval_ = me->retrieve( image );

            return _retval_;
        } catch(cv::Exception e) {
#ifdef DEBUG
            LOGD("highgui::VideoCapture_n_1retrieve__JJ() catched cv::Exception: %s", e.what());
#endif // DEBUG
            jclass je = env->FindClass("org/opencv/core/CvException");
            if(!je) je = env->FindClass("java/lang/Exception");
            env->ThrowNew(je, e.what());
            return 0;
        } catch (...) {
#ifdef DEBUG
            LOGD("highgui::VideoCapture_n_1retrieve__JJ() catched unknown exception (...)");
#endif // DEBUG
            jclass je = env->FindClass("java/lang/Exception");
            env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1retrieve__JJ()}");
            return 0;
        }
    }
Example #7
0
void vIdle() {
    if (TheCaptureFlag) {
        // capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve(TheInputImage);
        TheUndInputImage.create(TheInputImage.size(), CV_8UC3);
        // by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage, TheInputImage, CV_BGR2RGB);
        // remove distorion in image
        cv::undistort(TheInputImage, TheUndInputImage, TheCameraParams.CameraMatrix,
                      TheCameraParams.Distorsion);
        // detect markers
        MDetector.detect(TheUndInputImage, TheMarkers);
        // Detection of the board
        TheBoardDetected.second = TheBoardDetector.detect(
            TheMarkers, TheBoardConfig, TheBoardDetected.first, TheCameraParams, TheMarkerSize);
        // chekc the speed by calculating the mean speed of all iterations
        // resize the image to the size of the GL window
        cv::resize(TheUndInputImage, TheResizedImage, TheGlWindowSize);
        // create mask. It is a syntetic mask consisting of a simple rectangle, just to show a example of
        // opengl with mask
        TheMask = createSyntheticMask(TheResizedImage); // lets create with the same size of the resized
                                                        // image, i.e. the size of the opengl window
    }
    glutPostRedisplay();
}
void video_thread_CL(void* pParams)
{
	FaceDetector *faceDetector;
	if (threadUseCL){
		faceDetector = (FaceDetectorCL*)pParams;
	}
	else{
		faceDetector = (FaceDetectorCpu*)pParams;
	}
	
	std::string name = faceDetector->name();

	//HAAR_EYE_TREE_EYEGLASSES_DATA
	//HAAR_EYE_DATA
	//HAAR_FRONT_FACE_DEFAULT_DATA
	//LBP_FRONTAL_FACE
	//LBP_PROFILE_FACE
	faceDetector->load(HAAR_FRONT_FACE_DEFAULT_DATA);

	VideoCapture videoCapture;
	cv::Mat frame, frameCopy, image;

	videoCapture.open(faceDetector->videoFile().c_str());
	if (!videoCapture.isOpened()) { cout << "No video detected" << endl; return; }

	if (imshowFlag) { cv::namedWindow(name.c_str(), 1); }

	if (videoCapture.isOpened())
	{
		cout << "In capture ..." << name.c_str() << endl;
		while (videoCapture.grab())
		{
			if (!videoCapture.retrieve(frame, 0)) { break; }

			faceDetector->setSrcImg(frame, 1);
			faceDetector->doWork();
			if (imshowFlag){ cv::imshow(name.c_str(), faceDetector->resultMat()); }
			
			std::vector<cv::Rect> &faces_result = faceDetector->getResultFaces();
			std::cout << "face --" << name.c_str() << std::endl;
			for (int i = 0; i < faces_result.size(); ++i){
				std::cout << faces_result.at(i).x << ", " << faces_result.at(i).y << std::endl;
			}
			
			if (waitKey(10) >= 0){
				videoCapture.release();
				break;
			}

			Sleep(1);
		}
	}
	
	if (imshowFlag) { cvDestroyWindow(name.c_str()); }
	finishTaskFlag++;
	_endthread();
	return;

}
Example #9
0
void threadGrab0(){
        cap0.grab();
        cap0.retrieve(img0);
        Mat Tmp = img0.t(); //Rotate Image
        flip(Tmp,img0,0);
        leftImgs.push_back(img0);
        //outputVideocap0.write(img0);
}
Example #10
0
void threadGrab1(){
        cap1.grab();
        cap1.retrieve(img1);
        Mat Tmp = img1.t(); //Rotate Image
        flip(Tmp,img1,1);
        rightImgs.push_back(img1);
        //outputVideocap1.write(img1);
}
Example #11
0
ImagePacket getFrames(VideoCapture capture) {
	Mat depthMap;
	Mat color;
	Mat uvMap;

	capture.grab();


	capture.retrieve( depthMap, CV_CAP_INTELPERC_DEPTH_MAP );
	transpose(depthMap, depthMap);
		
	capture.retrieve(color, CV_CAP_INTELPERC_IMAGE );
	transpose(color, color);		

	capture.retrieve(uvMap, CV_CAP_INTELPERC_UVDEPTH_MAP);
	//transpose(uvMap, uvMap);

	return ImagePacket(color, depthMap, uvMap);
}
Example #12
0
/**
 * parameter cam - Camera that will be used
 * parameter src  - Source matrix that will be shown
 */
void checkFeed(VideoCapture cam, Mat src){
    if(!cam.isOpened()){
        cout << "Camera not detected.";
    }
    if (cam.grab()){
        if (!cam.retrieve(src)){
            cout << "Could not retrieve.";
        }
    }
}
bool getNextFrame(VideoCapture& video, Mat& frame, int& framenum, int jump = 1)
{
	// firstGot should regulate itself so it'll reset when a video runs out of frames
	
	bool moreFrames = true;
	if(firstGot) // not first frame
	{
		bool val1 = true;
		for(int i = 0; i < jump; i++)
			if(!video.grab())
				val1 = false;
		bool val2 = video.retrieve(frame);
		framenum += jump;
		if(!val1 || !val2)
		{
			firstGot = false; // this means video ended
			moreFrames = false;
		}
	}
	else // first frame
	{
		bool val = video.grab();
		firstGot = true;
		if(!val)
		{
			printf("first frame val failed on grab\n");
			firstGot = false;
			moreFrames = false;
		}
		val = video.retrieve(frame);
		if(!val)
		{
			printf("first frame failed on retreive\n");
			firstGot = false;
			moreFrames = false;
		}
		framenum++;
	}
	return moreFrames;
}
Example #14
0
      bool retrieve(Mat *image)
      {
         bool bSuccess = camera_.retrieve(frame_); // grab a new frame from video
         if (!bSuccess) //if not success, break loop
         {
            printf("Cannot grab a frame from video stream in camera %d \n", camNum_);
            return false;
         }

         *image = frame_;
         printf("Grabbed a frame from video stream in camera %d \n", camNum_);
         return true;
      }         
Example #15
0
int main(){
    pthread_t thread;
    int threadSuccess = pthread_create(&thread, NULL, imageTakingThread, NULL);

    cout << "got one photo\n";
    imwrite("1st.jpg",mat);
    int a;
    cin >> a;

    Mat matToProcess;
    vidCap.retrieve(matToProcess);
    imwrite("2nd.jpg",matToProcess);
    cout<<"done"<<endl;
}
Example #16
0
bool getVideoDescriptor(const string &_videoLocation, vector<DescriptorPtr> &_outputDescriptor, const DescType &_type, const int &_skipFrames, const int param)
{
	bool statusOk = true;

	if (_skipFrames < 0)
	{
		cout << "ERROR: number of frames to skip\n";
		statusOk = false;
	}
	else
	{
		VideoCapture capture;
		capture.open(_videoLocation);
		if (!capture.isOpened())
		{
			cout << "ERROR: Can't open target video " << _videoLocation << endl;
			statusOk = false;
		}
		else
		{
			_outputDescriptor.clear();
			double totalFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);

			cout << "Total frames in query: " << totalFrames << "\n";

			Mat frame, grayFrame;
			int k = 0;
			for (int j = 0; j < totalFrames; j++)
			{
				if (!capture.grab() || !capture.retrieve(frame))
				{
					cout << "ERROR: Can't get new frame. Finishing loop.\n";
					statusOk = false;
					break;
				}

				if (k == _skipFrames)
				{
					cvtColor(frame, grayFrame, COLOR_BGR2GRAY);
					_outputDescriptor.push_back(DescriptorPtr(new Descriptor(grayFrame, j, _type, param)));

					k = -1;
				}
				k++;
			}
		}
	}

	return statusOk;
}
void doOpenNI(
    VideoCapture&capture,Mat&depthOut,
    Mat&rgbImage)
{
    Mat depthMap, depthValid;

    capture.grab();
    capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP );
    capture.retrieve( rgbImage, CV_CAP_OPENNI_BGR_IMAGE );
    capture.retrieve( depthValid, CV_CAP_OPENNI_VALID_DEPTH_MASK);

    // get circles
    //vector<Vec3f> circles;
    //HoughCircles(depthMap,circles, CV_HOUGH_GRADIENT, 2,
    //	 depthMap.rows/4, 200, 100);
    //for(int iter = 0; iter < circles.size(); iter++)
    //{
    //  Point center(cvRound(circles[iter][0]),cvRound(circles[iter][1]));
    //  int radius = cvRound(circles[iter][2]);
    //  circle(rgbImage, center, radius, Scalar(0,0,255), 3, 8, 0);
    //}

    // noramalize the depth image
    double depth_min, depth_max;
    //cout << "min = " << depth_min << " max = " << depth_max << endl;
    depthMap.convertTo(depthMap,cv::DataType<float>::type);
    depthMap.setTo(numeric_limits<float>::quiet_NaN(),1-depthValid);
    //cout << depthMap << endl;

    // get edges
    //Mat depthEdges = autoCanny(255*depthMap);
    //imshow("Captured: Depth edges",depthEdges);

    depthOut = depthMap/10; // convert mm to cm
    depthOut = fillDepthHoles(depthOut);
}
void CameraStreamer::captureFrame(int index)
{
    VideoCapture *capture = camera_capture[index];
    while (true)
    {
        Mat frame;
        //Grab frame from camera capture
        capture->grab();
        capture->retrieve(frame, CV_CAP_OPENNI_BGR_IMAGE);
        //Put frame to the queue
        frame_queue[index]->push(frame);     
        //relase frame resource
        //frame.release();
        //imshow(to_string(index), frame);
        frame.release();
    }
}
Example #19
0
extern "C" JNIEXPORT void JNICALL Java_narl_itrc_vision_CapVidcap_implFetch(
	JNIEnv* env,
	jobject thiz,
	jobject objCamera
){
	PREPARE_CONTEXT;
	PREPARE_CAMERA;

	VideoCapture* vid = (VideoCapture*)(env->GetLongField(thiz,f_cntx));
	if(vid->grab()==false){
		return;
	}
	Mat img;
	vid->retrieve(img);

	FINISH_CAMERA(img);
}
void vIdle()
{
    if (TheCaptureFlag) {
        //capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve( TheInputImage);
        TheUndInputImage.create(TheInputImage.size(),CV_8UC3);
        //transform color that by default is BGR to RGB because windows systems do not allow reading BGR images with opengl properly
        cv::cvtColor(TheInputImage,TheInputImage,CV_BGR2RGB);
        //remove distorion in image
        cv::undistort(TheInputImage,TheUndInputImage, TheCameraParams.CameraMatrix, TheCameraParams.Distorsion);
        //detect markers
        PPDetector.detect(TheUndInputImage,TheMarkers, TheCameraParams.CameraMatrix,Mat(),TheMarkerSize,false);
        //resize the image to the size of the GL window
        cv::resize(TheUndInputImage,TheResizedImage,TheGlWindowSize);
    }
    glutPostRedisplay();
}
int captureImages (int camera) {
  char key = 0;

  int numSnapshot = 1;
  std::string snapshotFilename = "1";

  cout << "Press 's' to take snapshots" << std::endl;
  cout << "Press 'Esc' to finish" << std::endl;

  TheVideoCapturer.open(camera);	//0 -> Camara portatil --- 1 -> WebCam externa

  /**
   * CON LAS CAMARAS PROBADAS NO DEJA CAMBIAR NINGUN PARAMETRO
   */
  /*TheVideoCapturer.set(CV_CAP_PROP_FPS, 25);
  std::cout <<  TheVideoCapturer.get(CV_CAP_PROP_EXPOSURE) << std::endl;
  bool b = TheVideoCapturer.set(CV_CAP_PROP_EXPOSURE, 10);
  std::cout <<  TheVideoCapturer.get(CV_CAP_PROP_EXPOSURE) << " bool: " << b << " " << std::endl;*/


  if (!TheVideoCapturer.isOpened()) {
    cerr << "Could not open video" << std::endl;
    return -1;
  }

  while (key!=27 && TheVideoCapturer.grab()) {
    TheVideoCapturer.retrieve(bgrMap);
    cvtColor(bgrMap, bgrMap, CV_BGR2GRAY);

    imshow("BGR image", bgrMap);

    if (key == 115) {
      imwrite("Data/Captures/" + snapshotFilename + ".jpg", bgrMap);
      numSnapshot++;
      snapshotFilename = static_cast<std::ostringstream*>(&(std::ostringstream() << numSnapshot))->str();
      //cout << "Captura guardada.";
        }

    key = waitKey(20);
  }
  cvDestroyWindow("BGR image"); //Destroy Window

  return numSnapshot;
}
Example #22
0
/*!
 *  
 */
static void vIdle()
{
  if (capture)
  {
    //capture image
    vCapturer.grab();
    vCapturer.retrieve( inImg);
    undImg.create(inImg.size(),CV_8UC3);
    //transform color that by default is BGR to RGB because windows systems do not allow reading
    //BGR images with opengl properly
//    cv::cvtColor(inImg,inImg,CV_BGR2RGB);
    //remove distorion in image
    cv::undistort(inImg,undImg, camParams.getCamMatrix(), camParams.getDistor());
    //detect markers
    mDetector.detect(undImg,markers, camParams.getCamMatrix(),Mat(),msiz->dval[0]);
    //resize the image to the size of the GL window
    cv::resize(undImg,resImg,glSize);
  }
  glutPostRedisplay();
}
Example #23
0
int getBGRImage(VideoCapture capture, Mat &bgr_image){

	//bgr image

	if( !capture.grab() )
	{
		cout << "Can not grab bgr images." << endl;
		return -1;
	}
	else
	{

		if( capture.retrieve( bgr_image, CAP_OPENNI_BGR_IMAGE ) )
		{
			//imshow( "bgr map", bgr_image );
		}

	}

	return 0;
}
Example #24
0
bool
readNextFrame(VideoCapture &reader, Mat &mat)
{
    if (reader.isOpened())
    {
        if (!reader.grab())
        {
            std::cerr << "Frame was not grabbed successfully for device:\n";
            return false;
        }
        if (!reader.retrieve(mat))
        {
            std::cerr << "Frame was not decoded successfully for device:\n";
            return false;
        }
    } else
    {
        return false;
    }
    return true;
};
void vIdle()
{
    if (TheCaptureFlag) {
        //capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve( TheInputImage);
        TheUndInputImage.create(TheInputImage.size(),CV_8UC3);
        //by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage,TheInputImage,CV_BGR2RGB);
        //remove distorion in image
        cv::undistort(TheInputImage,TheUndInputImage, TheCameraParams.CameraMatrix,TheCameraParams.Distorsion);
        //detect markers
        MDetector.detect(TheUndInputImage,TheMarkers,TheCameraParams.CameraMatrix,Mat(),TheMarkerSize);
        //Detection of the board
        TheBoardDetected.second=TheBoardDetector.detect( TheMarkers, TheBoardConfig,TheBoardDetected.first, TheCameraParams,TheMarkerSize);
        //chekc the speed by calculating the mean speed of all iterations
        //resize the image to the size of the GL window
        cv::resize(TheUndInputImage,TheResizedImage,TheGlWindowSize);
    }
    glutPostRedisplay();
}
Example #26
0
void CameraEngine::run()
{
    qDebug() << "camera thread started";
    VideoCapture capture;
    capture.open(m_camera);
    if(!capture.isOpened()){
        qDebug() << "ERROR: could not open camera capture" << m_camera;
    }
    qDebug() << "   Frame Size: " << capture.get(CV_CAP_PROP_FRAME_WIDTH) << capture.get(CV_CAP_PROP_FRAME_HEIGHT);
    m_frameSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));

    Mat image;
    while(1){
        // check if we should stop the thread...
        m_stopMutex.lock();
        if(m_stop){
            m_stop=false;
            m_stopMutex.unlock();
            break;
        }
        m_stopMutex.unlock();

        // Capture frame (if available)
        if (!capture.grab())
            continue;

        if(!capture.retrieve(image)){
            qDebug() << "ERROR: could not read image image from camera.";
        }
        emit updateImage(image);
    }
    qDebug() << "camera thread stopped";
    capture.release();
    if(capture.isOpened()){
        qDebug() << "could not release capture";
    }
}
Example #27
0
    /*override*/ void* operator()(void*)
	{
		if (isVideo)
		{
			Mat* _retMat=new Mat();
			if (!capture->grab())
			{
				delete _retMat;
				return NULL;
			} else
			{
				capture->retrieve(*_retMat);
				frame=_retMat;
				if (isShow && frame)
				{
					imshow("Source", *frame);
				}
				return _retMat;
			}
		} else
		{
			if (frame)
			{
				Mat* _retMat=new Mat(*frame);
				//_retMat=frame;
				if (isShow)
				{
					imshow("Source", *frame);
				}
				frame=NULL;
				return _retMat;
			} else
			{
				return NULL;
			}
		}
	}
Example #28
0
int main(int argc, char** argv)
{
    try
    {
        CmdLineParser cml(argc, argv);
        if (argc < 2 || cml["-h"])
        {
            cerr << "Invalid number of arguments" << endl;
            cerr << "Usage: (in.avi|live[:camera_index(e.g 0 or 1)]) [-c camera_params.yml] [-s  marker_size_in_meters] [-d "
                    "dictionary:ARUCO by default] [-h]"
                 << endl;
            cerr << "\tDictionaries: ";
            for (auto dict : aruco::Dictionary::getDicTypes())
                cerr << dict << " ";
            cerr << endl;
            cerr << "\t Instead of these, you can directly indicate the path to a file with your own generated "
                    "dictionary"
                 << endl;
            return false;
        }

        ///////////  PARSE ARGUMENTS
        string TheInputVideo = argv[1];
        // read camera parameters if passed
        if (cml["-c"])
            TheCameraParameters.readFromXMLFile(cml("-c"));
        float TheMarkerSize = std::stof(cml("-s", "-1"));
        // aruco::Dictionary::DICT_TYPES  TheDictionary= Dictionary::getTypeFromString( cml("-d","ARUCO") );

        ///////////  OPEN VIDEO
        // read from camera or from  file
        if (TheInputVideo.find("live") != string::npos)
        {
            int vIdx = 0;
            // check if the :idx is here
            char cad[100];
            if (TheInputVideo.find(":") != string::npos)
            {
                std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
                sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
            }
            cout << "Opening camera index " << vIdx << endl;
            TheVideoCapturer.open(vIdx);
            waitTime = 10;
        }
        else
            TheVideoCapturer.open(TheInputVideo);
        // check video is open
        if (!TheVideoCapturer.isOpened())
            throw std::runtime_error("Could not open video");

        ///// CONFIGURE DATA
        // read first image to get the dimensions
        TheVideoCapturer >> TheInputImage;
        if (TheCameraParameters.isValid())
            TheCameraParameters.resize(TheInputImage.size());
        dictionaryString=cml("-d", "ARUCO");
        MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. );  // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
        MDetector.setThresholdParams(7, 7);
        MDetector.setThresholdParamRange(2, 0);

        // gui requirements : the trackbars to change this parameters
        iThresParam1 = static_cast<int>(MDetector.getParams()._thresParam1);
        iThresParam2 = static_cast<int>(MDetector.getParams()._thresParam2);
        cv::namedWindow("in");
        cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
        cv::createTrackbar("correction_rate", "in", &iCorrectionRate, 10, cvTackBarEvents);
        cv::createTrackbar("EnclosedMarkers", "in", &iEnclosedMarkers, 1, cvTackBarEvents);
        cv::createTrackbar("ShowAllCandidates", "in", &iShowAllCandidates, 1, cvTackBarEvents);

        // go!
        char key = 0;
        int index = 0,indexSave=0;
        // capture until press ESC or until the end of the video
        do
        {
            TheVideoCapturer.retrieve(TheInputImage);
            // copy image
            double tick = (double)getTickCount();  // for checking the speed
            // Detection of markers in the image passed
            TheMarkers = MDetector.detect(TheInputImage, TheCameraParameters, TheMarkerSize);
            // chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
            AvrgTime.second++;
            cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second
                 << " milliseconds nmarkers=" << TheMarkers.size() << std::endl;

            // print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);

            if (iShowAllCandidates){
                auto candidates=MDetector.getCandidates();
                for(auto cand:candidates)
                    Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
            }

            for (unsigned int i = 0; i < TheMarkers.size(); i++)
            {
                cout << TheMarkers[i] << endl;
                TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
            }

            // draw a 3d cube in each marker if there is 3d info
            if (TheCameraParameters.isValid() && TheMarkerSize > 0)
                for (unsigned int i = 0; i < TheMarkers.size(); i++)
                {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
                }

            // DONE! Easy, right?
            // show input with augmented information and  the thresholded image
            cv::imshow("in", resize(TheInputImageCopy, 1280));
            cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));

            key = cv::waitKey(waitTime);  // wait for key to be pressed
            if (key == 's')
                waitTime = waitTime == 0 ? 10 : 0;
            if (key == 'w'){//writes current input image
                string number=std::to_string(indexSave++);
                while(number.size()!=3)number+="0";
                string imname="arucoimage"+number+".png";
                cv::imwrite(imname,TheInputImage);
                cout<<"saved "<<imname<<endl;
            }
            index++;  // number of images captured

        } while (key != 27 && (TheVideoCapturer.grab()));
    }
    catch (std::exception& ex)

    {
        cout << "Exception :" << ex.what() << endl;
    }
}
void detectMarker(bool checkWrite)
{
	auto dictionaryId = 10;
	float markerLength = 0.04;

	auto detectorParams = aruco::DetectorParameters::create();

	detectorParams->doCornerRefinement = true; // do corner refinement in markers

	auto camId = 0;

	auto dictionary =
		aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));

	Mat camMatrix, distCoeffs;
	auto readOk = readCameraParameters("camera.yaml", camMatrix, distCoeffs);
	if (!readOk) {
		cerr << "Invalid camera file" << endl;
	}

	VideoCapture inputVideo;
	inputVideo.open(camId);
	auto waitTime = 10;

	double totalTime = 0;
	auto totalIterations = 0;
	
	double theta;
	void (*wf)(double mean, double stdDev);
	wf = &writeToFile;
	vector<double> angles;

	while (inputVideo.grab()) {
		Mat image, imageCopy;
		inputVideo.retrieve(image);

		auto tick = double(getTickCount());

		vector< int > ids;
		vector< vector< Point2f > > corners, rejected;
		vector< Vec3d > rvecs, tvecs;

		// detect markers and estimate pose
		aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
		if (ids.size() > 0)
			aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
			tvecs);

		// draw results
		image.copyTo(imageCopy);
		if (ids.size() > 0) {
			aruco::drawDetectedMarkers(imageCopy, corners, ids);

			for (unsigned int i = 0; i < ids.size(); i++) {
				aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
					markerLength * 0.5f);
				cout << "y: " << rvecs[i][2] * 57.2958 << " x :" << rvecs[i][0] * 57.2958 << " z: " << rvecs[i][1] * 57.2958 << endl;
				angles.push_back(rvecs[i][2]);
			}

			if (angles.size() == 50 && checkWrite == true)
			{
				doStatistics(angles, wf);
				break;
			}
		}

		imshow("out", imageCopy);
		auto key = char(waitKey(waitTime));
		if (key == 27) break;
	}
}
int main(int argc, const char * argv[]) {

    //----------------------
    // Open an OpenNI device
    //----------------------

    //TODO: You'll want to open an RGB camera stream here too (the one with wich you wish to register the depth)

    cout << "Device opening ..." << endl;

    VideoCapture capture;
    capture.open( CAP_OPENNI );

    if( !capture.isOpened() )
    {
        cout << "Can not open a capture object." << endl;
        return -1;
    }

    // We don't want registration on, since we're going to do it ourselves.
    // Some devices with RGB cameras can perform registration on device
    bool modeRes=false;
    modeRes = capture.set( CAP_PROP_OPENNI_REGISTRATION, 0 );

    if (!modeRes) {
        cout << "Can't disable registration. That's crazy!\n" << endl;
        return -1;
    }

    // Display the current configuration
    cout << "\nDepth generator output mode:" << endl <<
        "FRAME_WIDTH      " << capture.get( CAP_PROP_FRAME_WIDTH ) << endl <<
        "FRAME_HEIGHT     " << capture.get( CAP_PROP_FRAME_HEIGHT ) << endl <<
        "FRAME_MAX_DEPTH  " << capture.get( CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
        "FPS              " << capture.get( CAP_PROP_FPS ) << endl <<
        "REGISTRATION     " << capture.get( CAP_PROP_OPENNI_REGISTRATION ) << endl;


    //---------------------------------------
    // Specify camera properties and geometry
    //--------------------------------------

    //TODO: Fill in the values for your setup.

    // Depth camera intrinsics
    Matx33f unregisteredCameraMatrix = Matx33f::eye();
    unregisteredCameraMatrix(0,0) = 570.0f;
    unregisteredCameraMatrix(1,1) = 570.0f;
    unregisteredCameraMatrix(0,2) = 320.0f-0.5f;
    unregisteredCameraMatrix(1,2) = 240.0f-0.5f;

    // NOTE: The depth distortion coefficients are currently not used by the Registration class.
    Vec<float, 5> unregisteredDistCoeffs(0,0,0,0,0);


    // RGB camera intrinsics
    Matx33f registeredCameraMatrix = Matx33f::eye();
    registeredCameraMatrix(0,0) = 570.0f;
    registeredCameraMatrix(1,1) = 570.0f;
    registeredCameraMatrix(0,2) = 320.0f-0.5f;
    registeredCameraMatrix(1,2) = 240.0f-0.5f;

    Vec<float, 5> registeredDistCoeffs(0,0,0,0,0);

    Size2i registeredImagePlaneSize = Size2i(640, 480);

    // The rigid body transformation between cameras.
    // Used as: uv_rgb = K_rgb * [R | t] * z * inv(K_ir) * uv_ir
    Matx44f registrationRbt = Matx44f::eye();
    registrationRbt(0,3) = .04;


    //------------------------------
    // Create our registration class
    //------------------------------
    oc::Registration registration(unregisteredCameraMatrix,
                                  unregisteredDistCoeffs,
                                  registeredCameraMatrix,
                                  registeredDistCoeffs,
                                  registrationRbt);

    for (;;) {

        Mat_<uint16_t> depthMap;

        if( !capture.grab() )
        {
            cout << "Can't grab depth." << endl;
            return -1;
        }
        else
        {
            if( capture.retrieve( depthMap, CAP_OPENNI_DEPTH_MAP ) )
            {

                // Actually perform the registration
                Mat_<uint16_t> registeredDepth;
                bool performDilation = false;
                registration.registerDepthToColor(depthMap,
                                                  registeredImagePlaneSize,
                                                  registeredDepth,
                                                  performDilation);


                //Display the unregistered and registered depth
                const float scaleFactor = 0.05f;
                {
                    Mat_<uint8_t> show;
                    depthMap.convertTo( show, CV_8UC1, scaleFactor );
                    imshow( "depth map", show );
                }
                {
                    Mat_<uint8_t> show;
                    registeredDepth.convertTo( show, CV_8UC1, scaleFactor );
                    imshow( "registered map", show );
                }

            }

        }

        if( waitKey( 1 ) >= 0 )
            break;
    }



    return 0;
}