bool update(void) {
		bool result;
		if(useVideo == 0) {
			result = cap.grab();
			cap.retrieve(rgbImg, CV_CAP_OPENNI_BGR_IMAGE);
			cap.retrieve(depthMap, CV_CAP_OPENNI_DEPTH_MAP);
		}
		else if(useVideo == 1) {
			result = cap.grab();
			cap.retrieve(rgbImg);
			depthMap = Mat();
		}
		else if(useVideo == 2) {
			result = (cap.grab() && cap1.grab());
			if(result) {
				cap.retrieve(rgbImg);
				cap1.retrieve(depthImg);
				for(int i=0; i<depthImg.rows; i++) {
					for(int j=0; j<depthImg.cols; j++) {
						depthMap.at<ushort>(i,j) = (int(depthImg.at<Vec3b>(i,j)[0])*256 + int(depthImg.at<Vec3b>(i,j)[1]));
					}
				}
			}
		}
		return result;
	}
int grab_frame(VideoCapture& capture, Mat& img, char* filename) {

  // camera/image setup
  if (!cv_init_) {
    if (filename != NULL) {
      capture.open(filename);
    } else {
      float scale = 0.52;//0.606;
      int w = 640 * scale;
      int h = 480 * scale;
      capture.open(0); //try to open
      capture.set(CV_CAP_PROP_FRAME_WIDTH, w);  capture.set(CV_CAP_PROP_FRAME_HEIGHT, h);
    }
    if (!capture.isOpened()) { cerr << "open video device fail\n" << endl; return 0; }
    capture.grab();
    capture.retrieve(img);
    if (img.empty()) { cout << "load image fail " << endl; return 0; }
    printf(" img = %d x %d \n", img.rows, img.cols);
    cv_init_ = 1;
  }

  // get frames
  capture.grab();
  capture.retrieve(img);

  if (waitKey(10) >= 0) { return 0; }
  else { return 1; }
}
int main(int argc, char *argv[]){

  ros::init(argc,argv,"aruco_test");
  ros::NodeHandle nh;

  namedWindow("detection_result");
  startWindowThread();
  namedWindow("board");
  startWindowThread();

  /*  Mat distCoeffs = (Mat_<float>(1,5) << 0.182276,-0.533582,0.000520,-0.001682,0.000000);
  Mat camMatrix  = (Mat_<float>(3,3) << 
		    743.023418,0.000000,329.117496,
		    0.000000,745.126083,235.748102,
		    0.000000,0.000000,1.000000);*/

  
  VideoCapture input;
  input.open(0);
  Mat boardImg;
  createBoard(boardImg);

  imshow("board",boardImg);


  while(input.grab() && ros::ok()){
    Mat image,result;
    input.retrieve(image);
    process(image,result);
    imshow("detection_result",result);
  } 
  return 0;
}
Beispiel #4
0
int main( /*int argc, char* argv[]*/ ){
    VideoCapture capture;
    capture.open(CV_CAP_OPENNI);
    capture.set( CV_CAP_PROP_OPENNI_REGISTRATION , 0);

    if( !capture.isOpened() ){
        cout << "Can not open a capture object." << endl;
        return -1;
    }

    namedWindow( "depth", 1 );
    setMouseCallback( "depth", onMouse, 0 );
    for(;;){
        if( !capture.grab() ){
            cout << "Can not grab images." << endl;
            return -1;
        }else{
            Mat depthMap,show;
            capture.retrieve(world, CV_CAP_OPENNI_POINT_CLOUD_MAP);
            if( capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) ) depthMap.convertTo( show, CV_8UC1, 0.05f);
            line(show,startPt,endPt,Scalar(255));
            putText(show,format("distance: %f m",dist),Point(5,15),FONT_HERSHEY_PLAIN,1,Scalar(255));
            imshow("depth",show);
        }
        if( waitKey( 30 ) >= 0 )    break;
    }
}
int main(int argc, char* argv[])
{
  VideoCapture cap; //
  cap.open("http://192.168.1.139:1234/?action=stream");
  if (!cap.isOpened())  // if not success, exit program
  {
    cout << "Cannot open the video cam" << endl;
    return -1;
  }

  clientSocket = socket(PF_INET, SOCK_DGRAM, 0);
  serverAddr.sin_family = AF_INET;
  serverAddr.sin_port = htons(33333);
  serverAddr.sin_addr.s_addr = inet_addr("192.168.1.139");
  memset(serverAddr.sin_zero, '\0', sizeof serverAddr.sin_zero);
  addr_size = sizeof serverAddr;

  while (1)
  {
    cap.grab();
    cap.retrieve(frame);
    adjusted_color_img = brightness_and_contrast(frame);
    color_detection(adjusted_color_img);
    imshow("view", frame);
    waitKey(20);
    usleep(2000);
  }
}
int getDepthImage(VideoCapture capture, Mat &depth_image){

	//depth image

	if( !capture.grab() )
	{
		cout << "Can not grab images." << endl;
		return -1;
	}
	else
	{

		if( capture.retrieve( depth_image, CAP_OPENNI_DEPTH_MAP ) )
		{
			const float scaleFactor = 0.05f;
			//Mat show;
			depth_image.convertTo( depth_image, CV_8UC1, scaleFactor );

			//imshow( "depth map", depth_image );

		}

	}

	return 0;
}
void vIdle() {
    if (TheCaptureFlag) {
        // capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve(TheInputImage);
        TheUndInputImage.create(TheInputImage.size(), CV_8UC3);
        // by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage, TheInputImage, CV_BGR2RGB);
        // remove distorion in image
        cv::undistort(TheInputImage, TheUndInputImage, TheCameraParams.CameraMatrix,
                      TheCameraParams.Distorsion);
        // detect markers
        MDetector.detect(TheUndInputImage, TheMarkers);
        // Detection of the board
        TheBoardDetected.second = TheBoardDetector.detect(
            TheMarkers, TheBoardConfig, TheBoardDetected.first, TheCameraParams, TheMarkerSize);
        // chekc the speed by calculating the mean speed of all iterations
        // resize the image to the size of the GL window
        cv::resize(TheUndInputImage, TheResizedImage, TheGlWindowSize);
        // create mask. It is a syntetic mask consisting of a simple rectangle, just to show a example of
        // opengl with mask
        TheMask = createSyntheticMask(TheResizedImage); // lets create with the same size of the resized
                                                        // image, i.e. the size of the opengl window
    }
    glutPostRedisplay();
}
Beispiel #8
0
    JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1grab
    (JNIEnv* env, jclass, jlong self)
    {
        try {
#ifdef DEBUG
            LOGD("highgui::VideoCapture_n_1grab()");
#endif // DEBUG
            VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL
            bool _retval_ = me->grab(  );

            return _retval_;
        } catch(cv::Exception e) {
#ifdef DEBUG
            LOGD("highgui::VideoCapture_n_1grab() catched cv::Exception: %s", e.what());
#endif // DEBUG
            jclass je = env->FindClass("org/opencv/core/CvException");
            if(!je) je = env->FindClass("java/lang/Exception");
            env->ThrowNew(je, e.what());
            return 0;
        } catch (...) {
#ifdef DEBUG
            LOGD("highgui::VideoCapture_n_1grab() catched unknown exception (...)");
#endif // DEBUG
            jclass je = env->FindClass("java/lang/Exception");
            env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1grab()}");
            return 0;
        }
    }
void video_thread_CL(void* pParams)
{
	FaceDetector *faceDetector;
	if (threadUseCL){
		faceDetector = (FaceDetectorCL*)pParams;
	}
	else{
		faceDetector = (FaceDetectorCpu*)pParams;
	}
	
	std::string name = faceDetector->name();

	//HAAR_EYE_TREE_EYEGLASSES_DATA
	//HAAR_EYE_DATA
	//HAAR_FRONT_FACE_DEFAULT_DATA
	//LBP_FRONTAL_FACE
	//LBP_PROFILE_FACE
	faceDetector->load(HAAR_FRONT_FACE_DEFAULT_DATA);

	VideoCapture videoCapture;
	cv::Mat frame, frameCopy, image;

	videoCapture.open(faceDetector->videoFile().c_str());
	if (!videoCapture.isOpened()) { cout << "No video detected" << endl; return; }

	if (imshowFlag) { cv::namedWindow(name.c_str(), 1); }

	if (videoCapture.isOpened())
	{
		cout << "In capture ..." << name.c_str() << endl;
		while (videoCapture.grab())
		{
			if (!videoCapture.retrieve(frame, 0)) { break; }

			faceDetector->setSrcImg(frame, 1);
			faceDetector->doWork();
			if (imshowFlag){ cv::imshow(name.c_str(), faceDetector->resultMat()); }
			
			std::vector<cv::Rect> &faces_result = faceDetector->getResultFaces();
			std::cout << "face --" << name.c_str() << std::endl;
			for (int i = 0; i < faces_result.size(); ++i){
				std::cout << faces_result.at(i).x << ", " << faces_result.at(i).y << std::endl;
			}
			
			if (waitKey(10) >= 0){
				videoCapture.release();
				break;
			}

			Sleep(1);
		}
	}
	
	if (imshowFlag) { cvDestroyWindow(name.c_str()); }
	finishTaskFlag++;
	_endthread();
	return;

}
Beispiel #10
0
void* imageTakingThread(void* parameters) {
    vidCap.set(CV_CAP_PROP_FRAME_WIDTH, 160);
    vidCap.set(CV_CAP_PROP_FRAME_HEIGHT, 120);
    while(1)
    {
        vidCap.grab();
    }
}
Beispiel #11
0
void threadGrab1(){
        cap1.grab();
        cap1.retrieve(img1);
        Mat Tmp = img1.t(); //Rotate Image
        flip(Tmp,img1,1);
        rightImgs.push_back(img1);
        //outputVideocap1.write(img1);
}
Beispiel #12
0
void threadGrab0(){
        cap0.grab();
        cap0.retrieve(img0);
        Mat Tmp = img0.t(); //Rotate Image
        flip(Tmp,img0,0);
        leftImgs.push_back(img0);
        //outputVideocap0.write(img0);
}
Beispiel #13
0
/**
 * parameter cam - Camera that will be used
 * parameter src  - Source matrix that will be shown
 */
void checkFeed(VideoCapture cam, Mat src){
    if(!cam.isOpened()){
        cout << "Camera not detected.";
    }
    if (cam.grab()){
        if (!cam.retrieve(src)){
            cout << "Could not retrieve.";
        }
    }
}
Beispiel #14
0
 bool grab()
 {
    bool bSuccess = camera_.grab(); // grab a new frame from video
   if (!bSuccess) //if not success, break loop
   {
        printf("Cannot grab a frame from video stream in camera %d \n", camNum_);
        return false;
   }
   printf("Grabbed a frame from video stream in camera %d \n", camNum_);
   return true;
 }
bool getNextFrame(VideoCapture& video, Mat& frame, int& framenum, int jump = 1)
{
	// firstGot should regulate itself so it'll reset when a video runs out of frames
	
	bool moreFrames = true;
	if(firstGot) // not first frame
	{
		bool val1 = true;
		for(int i = 0; i < jump; i++)
			if(!video.grab())
				val1 = false;
		bool val2 = video.retrieve(frame);
		framenum += jump;
		if(!val1 || !val2)
		{
			firstGot = false; // this means video ended
			moreFrames = false;
		}
	}
	else // first frame
	{
		bool val = video.grab();
		firstGot = true;
		if(!val)
		{
			printf("first frame val failed on grab\n");
			firstGot = false;
			moreFrames = false;
		}
		val = video.retrieve(frame);
		if(!val)
		{
			printf("first frame failed on retreive\n");
			firstGot = false;
			moreFrames = false;
		}
		framenum++;
	}
	return moreFrames;
}
Beispiel #16
0
bool getVideoDescriptor(const string &_videoLocation, vector<DescriptorPtr> &_outputDescriptor, const DescType &_type, const int &_skipFrames, const int param)
{
	bool statusOk = true;

	if (_skipFrames < 0)
	{
		cout << "ERROR: number of frames to skip\n";
		statusOk = false;
	}
	else
	{
		VideoCapture capture;
		capture.open(_videoLocation);
		if (!capture.isOpened())
		{
			cout << "ERROR: Can't open target video " << _videoLocation << endl;
			statusOk = false;
		}
		else
		{
			_outputDescriptor.clear();
			double totalFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);

			cout << "Total frames in query: " << totalFrames << "\n";

			Mat frame, grayFrame;
			int k = 0;
			for (int j = 0; j < totalFrames; j++)
			{
				if (!capture.grab() || !capture.retrieve(frame))
				{
					cout << "ERROR: Can't get new frame. Finishing loop.\n";
					statusOk = false;
					break;
				}

				if (k == _skipFrames)
				{
					cvtColor(frame, grayFrame, COLOR_BGR2GRAY);
					_outputDescriptor.push_back(DescriptorPtr(new Descriptor(grayFrame, j, _type, param)));

					k = -1;
				}
				k++;
			}
		}
	}

	return statusOk;
}
void CameraStreamer::captureFrame(int index)
{
    VideoCapture *capture = camera_capture[index];
    while (true)
    {
        Mat frame;
        //Grab frame from camera capture
        capture->grab();
        capture->retrieve(frame, CV_CAP_OPENNI_BGR_IMAGE);
        //Put frame to the queue
        frame_queue[index]->push(frame);     
        //relase frame resource
        //frame.release();
        //imshow(to_string(index), frame);
        frame.release();
    }
}
Beispiel #18
0
extern "C" JNIEXPORT void JNICALL Java_narl_itrc_vision_CapVidcap_implFetch(
	JNIEnv* env,
	jobject thiz,
	jobject objCamera
){
	PREPARE_CONTEXT;
	PREPARE_CAMERA;

	VideoCapture* vid = (VideoCapture*)(env->GetLongField(thiz,f_cntx));
	if(vid->grab()==false){
		return;
	}
	Mat img;
	vid->retrieve(img);

	FINISH_CAMERA(img);
}
void vIdle()
{
    if (TheCaptureFlag) {
        //capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve( TheInputImage);
        TheUndInputImage.create(TheInputImage.size(),CV_8UC3);
        //transform color that by default is BGR to RGB because windows systems do not allow reading BGR images with opengl properly
        cv::cvtColor(TheInputImage,TheInputImage,CV_BGR2RGB);
        //remove distorion in image
        cv::undistort(TheInputImage,TheUndInputImage, TheCameraParams.CameraMatrix, TheCameraParams.Distorsion);
        //detect markers
        PPDetector.detect(TheUndInputImage,TheMarkers, TheCameraParams.CameraMatrix,Mat(),TheMarkerSize,false);
        //resize the image to the size of the GL window
        cv::resize(TheUndInputImage,TheResizedImage,TheGlWindowSize);
    }
    glutPostRedisplay();
}
int captureImages (int camera) {
  char key = 0;

  int numSnapshot = 1;
  std::string snapshotFilename = "1";

  cout << "Press 's' to take snapshots" << std::endl;
  cout << "Press 'Esc' to finish" << std::endl;

  TheVideoCapturer.open(camera);	//0 -> Camara portatil --- 1 -> WebCam externa

  /**
   * CON LAS CAMARAS PROBADAS NO DEJA CAMBIAR NINGUN PARAMETRO
   */
  /*TheVideoCapturer.set(CV_CAP_PROP_FPS, 25);
  std::cout <<  TheVideoCapturer.get(CV_CAP_PROP_EXPOSURE) << std::endl;
  bool b = TheVideoCapturer.set(CV_CAP_PROP_EXPOSURE, 10);
  std::cout <<  TheVideoCapturer.get(CV_CAP_PROP_EXPOSURE) << " bool: " << b << " " << std::endl;*/


  if (!TheVideoCapturer.isOpened()) {
    cerr << "Could not open video" << std::endl;
    return -1;
  }

  while (key!=27 && TheVideoCapturer.grab()) {
    TheVideoCapturer.retrieve(bgrMap);
    cvtColor(bgrMap, bgrMap, CV_BGR2GRAY);

    imshow("BGR image", bgrMap);

    if (key == 115) {
      imwrite("Data/Captures/" + snapshotFilename + ".jpg", bgrMap);
      numSnapshot++;
      snapshotFilename = static_cast<std::ostringstream*>(&(std::ostringstream() << numSnapshot))->str();
      //cout << "Captura guardada.";
        }

    key = waitKey(20);
  }
  cvDestroyWindow("BGR image"); //Destroy Window

  return numSnapshot;
}
Beispiel #21
0
ImagePacket getFrames(VideoCapture capture) {
	Mat depthMap;
	Mat color;
	Mat uvMap;

	capture.grab();


	capture.retrieve( depthMap, CV_CAP_INTELPERC_DEPTH_MAP );
	transpose(depthMap, depthMap);
		
	capture.retrieve(color, CV_CAP_INTELPERC_IMAGE );
	transpose(color, color);		

	capture.retrieve(uvMap, CV_CAP_INTELPERC_UVDEPTH_MAP);
	//transpose(uvMap, uvMap);

	return ImagePacket(color, depthMap, uvMap);
}
Beispiel #22
0
/*!
 *  
 */
static void vIdle()
{
  if (capture)
  {
    //capture image
    vCapturer.grab();
    vCapturer.retrieve( inImg);
    undImg.create(inImg.size(),CV_8UC3);
    //transform color that by default is BGR to RGB because windows systems do not allow reading
    //BGR images with opengl properly
//    cv::cvtColor(inImg,inImg,CV_BGR2RGB);
    //remove distorion in image
    cv::undistort(inImg,undImg, camParams.getCamMatrix(), camParams.getDistor());
    //detect markers
    mDetector.detect(undImg,markers, camParams.getCamMatrix(),Mat(),msiz->dval[0]);
    //resize the image to the size of the GL window
    cv::resize(undImg,resImg,glSize);
  }
  glutPostRedisplay();
}
Beispiel #23
0
bool
readNextFrame(VideoCapture &reader, Mat &mat)
{
    if (reader.isOpened())
    {
        if (!reader.grab())
        {
            std::cerr << "Frame was not grabbed successfully for device:\n";
            return false;
        }
        if (!reader.retrieve(mat))
        {
            std::cerr << "Frame was not decoded successfully for device:\n";
            return false;
        }
    } else
    {
        return false;
    }
    return true;
};
int getBGRImage(VideoCapture capture, Mat &bgr_image){

	//bgr image

	if( !capture.grab() )
	{
		cout << "Can not grab bgr images." << endl;
		return -1;
	}
	else
	{

		if( capture.retrieve( bgr_image, CAP_OPENNI_BGR_IMAGE ) )
		{
			//imshow( "bgr map", bgr_image );
		}

	}

	return 0;
}
void vIdle()
{
    if (TheCaptureFlag) {
        //capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve( TheInputImage);
        TheUndInputImage.create(TheInputImage.size(),CV_8UC3);
        //by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage,TheInputImage,CV_BGR2RGB);
        //remove distorion in image
        cv::undistort(TheInputImage,TheUndInputImage, TheCameraParams.CameraMatrix,TheCameraParams.Distorsion);
        //detect markers
        MDetector.detect(TheUndInputImage,TheMarkers,TheCameraParams.CameraMatrix,Mat(),TheMarkerSize);
        //Detection of the board
        TheBoardDetected.second=TheBoardDetector.detect( TheMarkers, TheBoardConfig,TheBoardDetected.first, TheCameraParams,TheMarkerSize);
        //chekc the speed by calculating the mean speed of all iterations
        //resize the image to the size of the GL window
        cv::resize(TheUndInputImage,TheResizedImage,TheGlWindowSize);
    }
    glutPostRedisplay();
}
Beispiel #26
0
void CameraEngine::run()
{
    qDebug() << "camera thread started";
    VideoCapture capture;
    capture.open(m_camera);
    if(!capture.isOpened()){
        qDebug() << "ERROR: could not open camera capture" << m_camera;
    }
    qDebug() << "   Frame Size: " << capture.get(CV_CAP_PROP_FRAME_WIDTH) << capture.get(CV_CAP_PROP_FRAME_HEIGHT);
    m_frameSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));

    Mat image;
    while(1){
        // check if we should stop the thread...
        m_stopMutex.lock();
        if(m_stop){
            m_stop=false;
            m_stopMutex.unlock();
            break;
        }
        m_stopMutex.unlock();

        // Capture frame (if available)
        if (!capture.grab())
            continue;

        if(!capture.retrieve(image)){
            qDebug() << "ERROR: could not read image image from camera.";
        }
        emit updateImage(image);
    }
    qDebug() << "camera thread stopped";
    capture.release();
    if(capture.isOpened()){
        qDebug() << "could not release capture";
    }
}
    /*override*/ void* operator()(void*)
	{
		if (isVideo)
		{
			Mat* _retMat=new Mat();
			if (!capture->grab())
			{
				delete _retMat;
				return NULL;
			} else
			{
				capture->retrieve(*_retMat);
				frame=_retMat;
				if (isShow && frame)
				{
					imshow("Source", *frame);
				}
				return _retMat;
			}
		} else
		{
			if (frame)
			{
				Mat* _retMat=new Mat(*frame);
				//_retMat=frame;
				if (isShow)
				{
					imshow("Source", *frame);
				}
				frame=NULL;
				return _retMat;
			} else
			{
				return NULL;
			}
		}
	}
void doOpenNI(
    VideoCapture&capture,Mat&depthOut,
    Mat&rgbImage)
{
    Mat depthMap, depthValid;

    capture.grab();
    capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP );
    capture.retrieve( rgbImage, CV_CAP_OPENNI_BGR_IMAGE );
    capture.retrieve( depthValid, CV_CAP_OPENNI_VALID_DEPTH_MASK);

    // get circles
    //vector<Vec3f> circles;
    //HoughCircles(depthMap,circles, CV_HOUGH_GRADIENT, 2,
    //	 depthMap.rows/4, 200, 100);
    //for(int iter = 0; iter < circles.size(); iter++)
    //{
    //  Point center(cvRound(circles[iter][0]),cvRound(circles[iter][1]));
    //  int radius = cvRound(circles[iter][2]);
    //  circle(rgbImage, center, radius, Scalar(0,0,255), 3, 8, 0);
    //}

    // noramalize the depth image
    double depth_min, depth_max;
    //cout << "min = " << depth_min << " max = " << depth_max << endl;
    depthMap.convertTo(depthMap,cv::DataType<float>::type);
    depthMap.setTo(numeric_limits<float>::quiet_NaN(),1-depthValid);
    //cout << depthMap << endl;

    // get edges
    //Mat depthEdges = autoCanny(255*depthMap);
    //imshow("Captured: Depth edges",depthEdges);

    depthOut = depthMap/10; // convert mm to cm
    depthOut = fillDepthHoles(depthOut);
}
void detectMarker(bool checkWrite)
{
	auto dictionaryId = 10;
	float markerLength = 0.04;

	auto detectorParams = aruco::DetectorParameters::create();

	detectorParams->doCornerRefinement = true; // do corner refinement in markers

	auto camId = 0;

	auto dictionary =
		aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));

	Mat camMatrix, distCoeffs;
	auto readOk = readCameraParameters("camera.yaml", camMatrix, distCoeffs);
	if (!readOk) {
		cerr << "Invalid camera file" << endl;
	}

	VideoCapture inputVideo;
	inputVideo.open(camId);
	auto waitTime = 10;

	double totalTime = 0;
	auto totalIterations = 0;
	
	double theta;
	void (*wf)(double mean, double stdDev);
	wf = &writeToFile;
	vector<double> angles;

	while (inputVideo.grab()) {
		Mat image, imageCopy;
		inputVideo.retrieve(image);

		auto tick = double(getTickCount());

		vector< int > ids;
		vector< vector< Point2f > > corners, rejected;
		vector< Vec3d > rvecs, tvecs;

		// detect markers and estimate pose
		aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
		if (ids.size() > 0)
			aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
			tvecs);

		// draw results
		image.copyTo(imageCopy);
		if (ids.size() > 0) {
			aruco::drawDetectedMarkers(imageCopy, corners, ids);

			for (unsigned int i = 0; i < ids.size(); i++) {
				aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
					markerLength * 0.5f);
				cout << "y: " << rvecs[i][2] * 57.2958 << " x :" << rvecs[i][0] * 57.2958 << " z: " << rvecs[i][1] * 57.2958 << endl;
				angles.push_back(rvecs[i][2]);
			}

			if (angles.size() == 50 && checkWrite == true)
			{
				doStatistics(angles, wf);
				break;
			}
		}

		imshow("out", imageCopy);
		auto key = char(waitKey(waitTime));
		if (key == 27) break;
	}
}
Beispiel #30
0
int main(){
	//Dimensions of Capture window
	int scale = 1;
	int width = 640/scale;
	int height = 480/scale;
	int lineSize;
	unsigned int start_time,stop_time;
	//Open capture device
	int device = 0; //assume we want first device

	bool gui = true;
	bool record = false;

	//create video capture device, set capture area
	VideoCapture capture = VideoCapture(device);
	capture.open(device);
	capture.set(CAP_PROP_FRAME_WIDTH,width);
	capture.set(CAP_PROP_FRAME_HEIGHT,height);


	//create recording object
	VideoWriter *recorder;
	//recorder = new VideoWriter ("test.avi",cv::CV_F FOURCC('D','I','V','X'), 30,Point(width,height));
	if (!recorder->isOpened() && record){
		return 0;
	}


	//Construct GUI object
	DebugGUI myGUI = DebugGUI(gui);

	//create image processing objects
	LineFinder imgproc = LineFinder(myGUI.getHSV(),scale);
	//imgproc.configWebcam("line");
	if(capture.isOpened()){  //check if we succeeded
		Mat raw;
		//main loop
		while(true){
			start_time = GetTimeMs64();

			//Pull a frame from the camera to the raw image
			// capture the current frame

			if (!capture.grab()){
				break;
			}
			capture >> raw;

			if (gui){
				imgproc.setFrame(raw.clone());
			}else{
				imgproc.setFrame(raw);
			}
			imgproc.setHSV(&myGUI);
			/*//imgproc.getGray();
			imgproc.thresholdHSV();
			imgproc.fillHoles();
			//imgproc.findObjects();
			//imgproc.printBiggestObject(raw)
			imgproc.findLines();
			double size =  imgproc.calculateBestGradient();
			LineObject* drawLine = imgproc.calculateErrorLine(height,width);
			if (drawLine != 0){
				lineSize = drawLine->size();
			}else{
				lineSize = 0;
			}
			if (gui){
				imgproc.drawErrorLine(raw,height,width);
				imgproc.printLines(raw);
			}
			//print (1/(time2-time1))

#ifdef time
			stop_time = GetTimeMs64();
			cout << "FPS: "  <<  1000/(stop_time - start_time) << endl;
#else
			cout << "Gradient: " << size << " " << "Offset: " << lineSize  << endl;
#endif
			*/
			if (gui){
				imshow("Raw",raw);
			}
			if (record){
				recorder->write(raw);
			}
			if(waitKey(30) >= 0){
				return 0;
			}
		}
	}