Ejemplo n.º 1
0
void* camera(void* arg) {
	//pFormatCtx=(AVFormatContext *)arg;
	char key;
	drawing=false;
	Ball.roll = Ball.pitch = Ball.gaz = Ball.yaw = 0;
	pthread_mutex_init(&mutexVideo, NULL);
	liste.suivant=NULL;
#if output_video == ov_remote_ffmpeg
	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);
#else	
	VideoCapture cap(0); //capture video webcam
#endif



#if output_video != ov_remote_ffmpeg

	if (!cap.isOpened()) {
		cout << "Impossible de lire le flux de la camera" << endl;
		return NULL;
	}
	Mat frame;
	cap >> frame;
	fSize.width = frame.cols;
	fSize.height = frame.rows;
#endif

	// Initialise les fenetres
	namedWindow(winDetected, 1);
	namedWindow(winOutputVideo, 1);

	//Creer une image noir de taille de notre image tmp
	Mat imgLines = Mat::zeros(fSize.height, fSize.width, CV_8UC3);

	while (true) {

#if output_video != ov_remote_ffmpeg
		bool bSuccess = cap.read(imgOriginal); // Nouvelle capture
		if (!bSuccess) {
			cout << "Impossible de lire le flux video" << endl;
			break;
		}
#else
		pthread_mutex_lock(&mutexVideo);
		memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
		pthread_mutex_unlock(&mutexVideo);
		imgOriginal = cv::cvarrToMat(img, true);
#endif
		pthread_t mtId,ocId;
		//Appel aux threads de tracking
		pthread_create(&mtId, NULL, &matchTemplate, NULL);
		pthread_create(&ocId, NULL, &opencv, NULL);
		
		pthread_join(mtId,NULL);
		pthread_join(ocId,NULL);

		//Fonction permettant d'interpreter les résultats des deux tracking
		Ball.setRealPos();

		// Genere la fenetre de repere 
		imgLines.setTo(Scalar(255, 255, 255));
		drawCross(imgLines, fSize.width / 2, fSize.height / 2, Scalar(0, 0, 255));
		drawCross(imgLines, posX, posY, Scalar(0, 255, 0));

		imgOriginal = imgOriginal & imgLines; // Croise les resultats à la fenetre de sortie //

		// Affichage des fenetre //
		imshow(winDetected, imgDetection);			//Pour montrer l image avec le masque
		//imshow(winRepere, imgLines);				//Pour montrer la fenetre de repere
		imshow(winOutputVideo, imgOriginal);		//Image d origine
		string Action = "Mouvement a effectuer : ";
		ObjCoord tmp = Ball.getRealPos();
		cout << "x " << tmp.Xcoord << " y " << tmp.Ycoord << " z " << tmp.Zcoord << endl;
		/*
		if(tmp.Zcoord == -1){
			Action += "Recule, "; Ball.pitch = 0.05f;
		}
		else if(tmp.Zcoord == 1){
			Action += "Avance, "; Ball.pitch = -0.05f;
		}
		else
		{
			Ball.pitch = 0;
		}
		*/
		if (tmp.Xcoord <= 35.0 && tmp.Xcoord != 0) {
			Ball.yaw = -0.2f;
			Action += "Gauche ("+ to_string(Ball.yaw)+"%), ";
		} else if (tmp.Xcoord >= 65.0) {
			Ball.yaw = 0.2f;
			Action += "Droite ("+ to_string(Ball.yaw)+"%), ";
		}
		else
		{
			Ball.yaw = 0;	
		}
		if (tmp.Ycoord >= 65.0) {
			Action += "Descendre";  Ball.gaz = -0.2f;
		} else if (tmp.Ycoord <= 35.0 && tmp.Ycoord != 0) {
			Action += "Monter";    Ball.gaz = 0.2f;
		}
		else
		{
			Ball.gaz = 0;
		}
		/*if(Ball.pitch != 0) {
			Ball.roll = Ball.yaw / 2;
			Ball.yaw = 0;
		}*/
		if(tmp.Xcoord == 0 && tmp.Ycoord == 0 && tmp.Zcoord == 0)
		{
			Ball.roll = Ball.pitch = Ball.gaz = Ball.yaw = 0;
			
		}
		if(Ball.pitch == 0)
			AtCmd::sendMovement(0, Ball.roll, Ball.pitch, Ball.gaz, Ball.yaw);
		else
			AtCmd::sendMovement(3, Ball.roll, Ball.pitch, Ball.gaz, Ball.yaw);
		//cout << Action << endl;
		key=waitKey(10);
		if(key == 10)
		{
			enVol=true;
			key=-1;
		}
		else if (key != -1) //Attends qu'une touche soit presser pour quitter
		{
			break;
		}
	}
	stopTracking=true;
	destroyAllWindows();
	return NULL;
}
Ejemplo n.º 2
0
int main( int argc, char** argv )
{
	VideoCapture cap(0); //capture the video from web cam

	if ( !cap.isOpened() )  // if not success, exit program
	{
		cout << "Cannot open the web cam" << endl;
		return -1;
	}

	namedWindow("Control", CV_WINDOW_AUTOSIZE); //create a window called "Control"

	int iLowH = 0;
	int iHighH = 179;

	int iLowS = 0;
	int iHighS = 255;

	int iLowV = 0;
	int iHighV = 255;

	//Create trackbars in "Control" window
	cvCreateTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
	cvCreateTrackbar("HighH", "Control", &iHighH, 179);

	cvCreateTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
	cvCreateTrackbar("HighS", "Control", &iHighS, 255);

	cvCreateTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
	cvCreateTrackbar("HighV", "Control", &iHighV, 255);

	while (true)
	{
		Mat imgOriginal;

		bool bSuccess = cap.read(imgOriginal); // read a new frame from video

		if (!bSuccess) //if not success, break loop
		{
			cout << "Cannot read a frame from video stream" << endl;
			break;
		}

		Mat imgHSV;

		cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV

		Mat imgThresholded;

		inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image

		//morphological opening (remove small objects from the foreground)
		erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
		dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

		//morphological closing (fill small holes in the foreground)
		dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
		erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

		imshow("Thresholded Image", imgThresholded); //show the thresholded image
		imshow("Original", imgOriginal); //show the original image

		if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
		{
			cout << "esc key is pressed by user" << endl;
			break;
		}
	}

	return 0;
}
Ejemplo n.º 3
0
int main(){
    
   
   
    cv::VideoCapture cap(0);
    ThumbTracker tracker;
    ThumbMap map;
    
    std::vector<cv::Point2f> screen;
    screen.push_back(cv::Point2f(0, 0));
    screen.push_back(cv::Point2f(0, 30));
    screen.push_back(cv::Point2f(40, 30));
    screen.push_back(cv::Point2f(40, 0));
    
    cv::Mat templateImg ;//set first captured image as template image
    cap >> templateImg;
    Thumbnail templateThumb(templateImg);
    tracker.setTemplateThumb(templateThumb);
    
    // add it to map
    map.addKeyFrame(templateThumb);
    int currentKeyFrameIndex = 0;
    
    cv::Mat pose;
    

    while (true)
    {
        
        
        
        cv::Mat img;
        cap >> img; //get image from camera
        
        
        float finalScore;
        
        struct timeval tv;
        gettimeofday(&tv,NULL);
        unsigned long s0 = 1000000 * tv.tv_sec + tv.tv_usec;
        
        //make thumbnail for current image
        Thumbnail compThumb(img);
        tracker.setCompareThumb(compThumb);
        
        
        //tracking : find the transform between current image and template image
        cv::Mat currentKeyFrameImg = tracker.IteratePose(pose,finalScore);
        
        cv::Scalar color = cv::Scalar(0, 0, 255); //red
        
        
        if(finalScore > 2.0e6){//tracking failed (the diffrence between current and template image is too large)

            //see if there is some other Keyframe better
            int best = map.findBestKeyFrame(compThumb, finalScore);
            
            
            if(best != -1 && finalScore < 2.0e6){ //finally find it. the use it as tracking template
                tracker.setTemplateThumb(*map.getKeyFrame(best));
                currentKeyFrameIndex = best;
                pose = cv::Mat();
            }else{ //nothing find
                pose = cv::Mat();
            }
        }else{  //tracking is OK, draw some information

            //draw pose
            std::vector<cv::Point2f> trans;
            cv::transform(screen, trans, pose);
            for (size_t i = 0; i < screen.size(); ++i) {
                cv::Point2f& r1 = trans[i % 4];
                cv::Point2f& r2 = trans[(i + 1) % 4];
                cv::line(img, r1*16 , r2*16 , color, 3, CV_AA);
            }
            
            //draw thumbnail image
            cv::cvtColor(currentKeyFrameImg, currentKeyFrameImg, cv::COLOR_GRAY2BGR);
            cv::resize(currentKeyFrameImg, currentKeyFrameImg, cv::Size(120,90 ));            
            cv::Rect roi = cv::Rect(10, 10, currentKeyFrameImg.cols, currentKeyFrameImg.rows);
            cv::addWeighted( img(roi), 0.0, currentKeyFrameImg, 1.0, 0.0, img(roi));
            
            //draw current template index
            std::stringstream stream;
            stream<<"KF: "<<currentKeyFrameIndex;
            cv::putText(img, stream.str(),cv::Point(50,50), CV_FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255,255,255),2);
        }
        
        gettimeofday(&tv,NULL);
        unsigned long s1 = 1000000 * tv.tv_sec + tv.tv_usec;
        std::cout << "one round cost time: " << s1 - s0 << " micro" << std::endl;
        std::cout << "score: " << finalScore << " micro" << std::endl;
        
        imshow ("Image", img);

        int k = cv::waitKey(1);
        if (k == 13){
            //Press Enter to add one keyframe
            map.addKeyFrame(compThumb);
            tracker.setTemplateThumb(compThumb);
            currentKeyFrameIndex = map.getSize()-1;
        }else if(k == 32){
            break; // Press Space to exit
        }
        
    }

}
Ejemplo n.º 4
0
int main(int argc, char* argv[]) {
  std::cout << "GO" << std::endl;
  cv::VideoCapture cap(VIDEO); // open the default camera
  if(!cap.isOpened())  // check if we succeeded
    return -1;
  
  cv::Mat frame;
  cv::Mat hsv;
  cap >> frame;
  
  Image3D image(frame.cols, frame.rows, NULL);

  Rectangle dummyRect;
  //Marker marker1(false, yellowMin, yellowMax, redMin, redMax, binary);
  Marker marker1(false, yellowRect, redRect, dummyRect);
  PositionMarker pm1;
  //Marker marker2(false, redMin, redMax, yellowMin, yellowMax, binary);
  Marker marker2(false, redRect, yellowRect, dummyRect);
  PositionMarker pm2;
  //Marker marker3(false, greenMin, greenMax, blueMin, blueMax, binary);
  Marker marker3(true, greenRect, blueRect, dummyRect);
  PositionMarker pm3;
  //Marker marker4(false, blueMin, blueMax, greenMin, greenMax, binary);
  Marker marker4(true, blueRect, greenRect, dummyRect);
  PositionMarker pm4;


  std::cout << "start loop" << std::endl;
  int count = 0;
  while(1) {
    cap >> frame; // get a new frame from camera
    cv::cvtColor(frame, hsv, CV_BGR2Luv);
    image.setData(hsv.data);
    image.id = count;

    if (count % 20 == 0)
        std::cout << "NEXT POS " << count << std::endl;
    bool result;

    result = marker1.getNextPos(image, pm1);
    if (!result) {
        std::cout << "POS " << count << std::endl;
        std::cout << "NOP (1)" << std::endl;
    }

    result = marker2.getNextPos(image, pm2);
    if (!result) {
        std::cout << "POS " << count << std::endl;
        std::cout << "NOP (2)" << std::endl;
    }

//    result = marker3.getNextPos(image, pm3);
//    if (!result) {
//        std::cout << "POS " << count << std::endl;
//        std::cout << "NOP (3)" << std::endl;
//    }

//    result = marker4.getNextPos(image, pm4);
//    if (!result) {
//        std::cout << "POS " << count << std::endl;
//        std::cout << "NOP (4)" << std::endl;
//    }

//     Image &mask = marker1.masks[0];
//     for (unsigned int im=0; im<mask.height; ++im) {
//       for (unsigned int jm=0; jm<mask.width; ++jm) {
//        if (mask.getValue(im, jm) == 1)
//          cv::line(frame, cv::Point(jm, im), cv::Point(jm, im), cv::Scalar(255, 0, 0));
//       }
//     }

//    Image &mask1 = marker3.masks[0];
//    for (unsigned int im=0; im<mask1.height; ++im) {
//      for (unsigned int jm=0; jm<mask1.width; ++jm) {
//        if (mask1.getValue(im, jm) == 1)
//          cv::line(frame, cv::Point(jm, im), cv::Point(jm, im), cv::Scalar(255, 0, 255));
//      }
//    }

    cv::rectangle(frame, cv::Point(pm1.x - pm1.size/2, pm1.minI), cv::Point(pm1.x + pm1.size/2, pm1.maxI), cv::Scalar(0, 0, 0));
    cv::rectangle(frame, cv::Point(pm1.x - pm1.size/2 + 1, pm1.minI + 1), cv::Point(pm1.x + pm1.size/2 - 1, pm1.maxI - 1), cv::Scalar(0, 0, 0));
    cv::rectangle(frame, cv::Point(pm2.x - pm2.size/2, pm2.minI), cv::Point(pm2.x + pm2.size/2, pm2.maxI), cv::Scalar(0, 0, 0));
    cv::rectangle(frame, cv::Point(pm2.x - pm2.size/2 + 1, pm2.minI + 1), cv::Point(pm2.x + pm2.size/2 - 1, pm2.maxI - 1), cv::Scalar(0, 0, 0));
    cv::rectangle(frame, cv::Point(pm3.x - pm3.size/2, pm3.minI), cv::Point(pm3.x + pm3.size/2, pm3.maxI), cv::Scalar(0, 0, 255));
    cv::rectangle(frame, cv::Point(pm3.x - pm3.size/2 + 1, pm3.minI + 1), cv::Point(pm3.x + pm3.size/2 - 1, pm3.maxI - 1), cv::Scalar(0, 0, 0));
    cv::rectangle(frame, cv::Point(pm4.x - pm4.size/2, pm4.minI), cv::Point(pm4.x + pm4.size/2, pm4.maxI), cv::Scalar(0, 0, 255));
    cv::rectangle(frame, cv::Point(pm4.x - pm4.size/2 + 1, pm4.minI + 1), cv::Point(pm4.x + pm4.size/2 - 1, pm4.maxI - 1), cv::Scalar(0, 0, 0));
//    cv::line(frame, cv::Point(pm1.x - pm1.size/2, 183), cv::Point(pm1.x + pm1.size/2, 211), cv::Scalar(0, 0, 255));
//    cv::line(frame, cv::Point(565, 220), cv::Point(575, 230), cv::Scalar(0, 0, 255));
    cv::imshow("img", frame);
    cv::waitKey(10);
    if (count > 472 && count < 480)
       cv::waitKey(1000);
    // if (count > 2250)
    //   cv::waitKey(2);
    
    count++;
  }
}
Ejemplo n.º 5
0
int main(int argc, char* argv[])
{
	CNN net;

	double time_cost;


	//-------- CNN Initializing --------
	//----------------------------------

	//Read parameters file
	net.readPara(parameter_file);


	//-------- Load Dataset ------------
	//----------------------------------

#ifdef _HANY_NET_WITH_LABEL_NAMES
	ifstream read_label(label_file);
	for(int c = 0; c < net.class_count; c++) {
		string new_label_name;
		read_label >> new_label_name;
		label_list.push_back(make_pair(c, new_label_name));
	}
#endif

#ifdef _HANY_NET_LOAD_MNIST
#ifdef _HANY_NET_PRINT_MSG
	cout << "Loading MNIST dataset..." << endl;
	time_cost = (double)getTickCount();
#endif

	loadMNIST("train-images.idx3-ubyte", "train-labels.idx1-ubyte", net.train_set);
	loadMNIST("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte", net.test_set);

#ifdef _HANY_NET_PRINT_MSG
	time_cost = ((double)getTickCount() - time_cost) / getTickFrequency();
	cout << "Load samples done." << endl << "Time cost: " << time_cost << "s." << endl << endl;
#endif
#endif

#ifdef _HANY_NET_TRAIN_FROM_SCRATCH

#ifdef _HANY_NET_LOAD_SAMPLE_FROM_PIC
#ifdef _HANY_NET_PRINT_MSG
	cout << "Loading samples..." << endl;
	time_cost = (double)getTickCount();
#endif

	for(int c = 0; c < net.class_count; c++) {

		for(int i = 0; i < sample_num; i++) {
			string file_name = sample_file_pre + to_string(c) + "_" + to_string(i) + ".jpg";
			Mat img_read = imread(file_name, CV_LOAD_IMAGE_GRAYSCALE);
			if(img_read.data == NULL) {
				break;
			}
			Mat img_nor;
			resize(img_read, img_nor, Size(net.sample_width, net.sample_height));

			net.train_set.push_back(make_pair(img_nor, (uchar)(c)));
		}
	}

#ifdef _HANY_NET_PRINT_MSG
	time_cost = ((double)getTickCount() - time_cost) / getTickFrequency();
	cout << "Load samples done." << endl << "Time cost: " << time_cost << "s." << endl << endl;
#endif
#endif


#ifdef _HANY_NET_CAPTURE_FACE_FROM_CAMERA
#ifdef _HANY_NET_PRINT_MSG
	cout << "Capturing samples..." << endl;
	time_cost = (double)getTickCount();
#endif

	VideoCapture cap_in(0);
	if(!cap_in.isOpened()) {
		cout << "Cannot access camera. Press ANY key to exit." << endl;
		cin.get();
		exit(-1);
	}

	CascadeClassifier cascade_in;
	cascade_in.load(haar_file);

	Mat frame;
	int frame_count = 0;
	int capture_count = 0;
	int class_idx = 0;
	int class_count = 0;
	bool sample_suff = false;
	bool cap_sample = true;

	while(cap_in.read(frame)) {
		capture_count++;

		vector<Rect> faces;
		Mat frame_gray, img_gray;
		cvtColor(frame, frame_gray, CV_BGR2GRAY);
		equalizeHist(frame_gray, img_gray);
		cascade_in.detectMultiScale(img_gray, faces, 1.1, 2, 0, Size(120, 120));

		int face_area = 0;
		int face_idx = 0;

		if(faces.size() > 0) {
			for(int f = 0; f < faces.size(); f++) {
				if(faces[f].area() > face_area) {
					face_area = faces[f].area();
					face_idx = f;
				}
			}

			rectangle(frame, faces[face_idx], Scalar(255, 0, 0), 3);

			if(frame_count % 5 == 0 && cap_sample && !sample_suff) {
				Mat face, face_nor;
				img_gray(faces[face_idx]).copyTo(face);

				resize(face, face_nor, Size(net.sample_width, net.sample_height));

				net.train_set.push_back(make_pair(face_nor, (uchar)class_idx));
				class_count++;
			}
		}

		putText(frame, "Class: " + to_string(class_idx), Point(50, 100), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);
		putText(frame, "Sample: " + to_string(class_count), Point(50, 150), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);

		if(sample_suff) {
			putText(frame, "Enough samples. Press SPACE.", Point(50, 50), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);
		}else {
			putText(frame, "Capturing...", Point(50, 50), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);
		}
		if(!cap_sample) {
			putText(frame, "Wait for another person. Press SPACE.", Point(50, 200), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);
		}

		imshow(camera_window_name, frame);

		if(class_count >= sample_num) {
			sample_suff = true;
		}

		frame_count++;
		int key = waitKey(20);
		if(key == 27){
			cap_in.release();
			break;
		} else if(key == ' ') {
			if(cap_sample && sample_suff) {
				cap_sample = false;
				continue;
			}
			if(!cap_sample && sample_suff) {
				cap_sample = true;
				sample_suff = false;
				class_idx++;
				class_count = 0;
				continue;
			}
		}
	}

#ifdef _HANY_NET_PRINT_MSG
	time_cost = ((double)getTickCount() - time_cost) / getTickFrequency();
	cout << "Load samples done." << endl << "Time cost: " << time_cost << "s." << endl << endl;
#endif
#endif

#endif


	//-------- CNN Initializing --------
	//----------------------------------

#ifdef _HANY_NET_PRINT_MSG
	cout << "Initializing neural networks..." << endl;
	time_cost = (double)getTickCount();
#endif

	//Initialize CNN with knowledge of samples
	net.initCNN();

#ifdef _HANY_NET_PRINT_MSG
	time_cost = ((double)getTickCount() - time_cost) / getTickFrequency();
	cout << "Total number of samples: " << (int)(net.train_set.size() + net.test_set.size()) << endl;
	cout << "Initializing neural networks done." << endl << "Time cost: " << time_cost << "s." << endl << endl;
#endif


	//Load pre-trained CNN parameters from file and continue to train
//	net.uploadCNN(pretrained_cnn_file);

	//-------- CNN Training ----------
	//--------------------------------

#ifdef _HANY_NET_TRAIN_FROM_SCRATCH
#ifdef _HANY_NET_PRINT_MSG
	cout << "Start training CNN..." << endl;
	time_cost = (double)getTickCount();
#endif

	//Train CNN with train sample set
	net.trainCNN();

#ifdef _HANY_NET_PRINT_MSG
	time_cost = ((double)getTickCount() - time_cost) / getTickFrequency();
	cout << "CNN training done." << endl << "Time cost: " << time_cost << "s." << endl << endl;
#endif

	for(int i = 0; i < net.time_ff.size(); i++) {
		cout << "FeedForward stage " << i << ":  " << net.time_ff[i] << "s" << endl;
	}
	for(int i = 0; i < net.time_bp.size(); i++) {
		cout << "BackPropagation stage " << i << ":  " << net.time_bp[i] << "s" << endl;
	}

	//Draw stage loss graph
	Mat stage_loss_graph = Mat::zeros(600, 1100, CV_8UC3);
	Point2d pt1, pt2;
	pt1 = Point2d(50.0, 50.0);
	for(int stage = 0; stage < net.stage_loss.size(); stage++) {
		pt2 = Point2d(50.0 + 1200.0 / net.stage_loss.size() * stage, 550.0 - 500.0 * net.stage_loss[stage] / net.stage_loss[0]);
		line(stage_loss_graph, pt1, pt2, Scalar(255, 255, 255));
		pt1 = pt2;
	}
	imshow("Stage Loss Graph", stage_loss_graph);
	imwrite("stage_loss_graph.jpg", stage_loss_graph);
	waitKey(10);

#endif


	//-------- Save Trained Network -----
	//-----------------------------------

#ifdef _HANY_NET_TRAIN_FROM_SCRATCH
#ifdef _HANY_NET_PRINT_MSG
	cout << "Dumping trained CNN parameters to file " << pretrained_cnn_file << "..." << endl;
#endif

	//Dump trained CNN parameters to file
	net.downloadCNN(trained_cnn_file);

#ifdef _HANY_NET_PRINT_MSG
	cout << "Dumping trained CNN parameters to file done." << endl << endl;
#endif
#endif


	//-------- Load Pre-trained Network -----
	//---------------------------------------

#ifndef _HANY_NET_TRAIN_FROM_SCRATCH
#ifdef _HANY_NET_PRINT_MSG
	cout << "Loading pre-trained CNN parameters from file " << pretrained_cnn_file << "..." << endl;
#endif

	//Load pre-trained CNN parameters from file
	net.uploadCNN(pretrained_cnn_file);

#ifdef _HANY_NET_PRINT_MSG
	cout << "Loading pre-trained CNN parameters from file done." << endl << endl;
#endif
#endif


	//-------- Predict New Samples-------
	//--------------------------------------

#ifdef _HANY_NET_PREDICT_MNIST
#ifdef _HANY_NET_PRINT_MSG
	cout << "Predicting MNIST test dataset..." << endl;
	time_cost = (double)getTickCount();
#endif

	//Calculate correctness ratio with test samples
	int total_correct_count = 0;
	for(int sample_idx = 0; sample_idx < net.test_set.size(); sample_idx++) {
		vector<Mat> input_sample;
		input_sample.push_back(net.test_set[sample_idx].first);
		vector<Mat> predict_result = net.predictCNN(input_sample);
		if((int)predict_result[0].ptr<uchar>(0)[0] == net.test_set[sample_idx].second) {
			total_correct_count++;
		}
	}
	double total_correct_ratio = (double)total_correct_count / net.test_set.size();

#ifdef _HANY_NET_PRINT_MSG
	time_cost = ((double)getTickCount() - time_cost) / getTickFrequency();
	cout << "MNIST testing done." << endl << "Time cost: " << time_cost << "s." << endl;
	cout << "Total correctness ratio: " << total_correct_ratio << endl << endl;
#endif
#endif

#ifdef _HANY_NET_PREDICT_IMAGE_SERIES
#ifdef _HANY_NET_PRINT_MSG
	cout << "Predicting from image series..." << endl;
#endif

//	VideoWriter wri(output_video_file, CV_FOURCC('M', 'J', 'P', 'G'), 25.0, Size(640, 480));

	for(int c = 0; c < net.class_count; c++) {

		for(int i = 0; i < sample_num; i++) {
			string file_name = sample_file_pre + to_string(c) + "_" + to_string(i) + ".jpg";
			Mat img_read = imread(file_name, CV_LOAD_IMAGE_GRAYSCALE);
			if(img_read.data == NULL) {
				break;
			}
			Mat img_nor, img_show;
			resize(img_read, img_show, Size(400, 400));
			resize(img_read, img_nor, Size(net.sample_width, net.sample_height));

			vector<Mat> input_sample;
			input_sample.push_back(img_nor);

			vector<Mat> predict_result = net.predictCNN(input_sample);

			int pred_rst = (int)predict_result[0].ptr<uchar>(0)[0];
			if(pred_rst <= net.class_count)
				putText(img_show, label_list[pred_rst].second, Point(10, 40), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);

			putText(img_show, to_string(c)+"-"+to_string(i), Point(img_show.cols-80, 40), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);

			int frame_count = 25;
			while(--frame_count) {
//				wri.write(img_show);
			}
			imshow(camera_window_name, img_show);

			int key_get = waitKey(20);
			switch(key_get) {
			case 27:
//				wri.release();
				return 0;
			default:
				break;
			}
		}
	}

#endif


#ifdef _HANY_NET_PREDICT_VEDIO_SERIES
#ifdef _HANY_NET_PRINT_MSG
	cout << "Predicting from video series..." << endl;
#endif

	VideoWriter wri(output_video_file, CV_FOURCC('M', 'J', 'P', 'G'), 25.0, Size(640, 480));
	namedWindow(camera_window_name);

	CascadeClassifier cascade_out;
	cascade_out.load(haar_file);

	for(int c = 1; c <= net.class_count; c++) {
		string file_name = "path_to_face_videos\\" + to_string(c) + ".wmv";
		VideoCapture cap(file_name);
		if(!cap.isOpened())
			continue;

		Mat img_read;
		while(cap.read(img_read)) {
			Mat img_gray, nor_gray, img_show;
			img_read.copyTo(img_show);
			cvtColor(img_read, img_gray, CV_BGR2GRAY);

			vector<Rect> faces;
			equalizeHist(img_gray, img_gray);
			cascade_out.detectMultiScale(img_gray, faces, 1.1, 2, 0, Size(120, 120));

			for(int f = 0; f < faces.size(); f++) {
				rectangle(img_show, faces[f], Scalar(0, 255, 255), 3);

				resize(img_gray(faces[f]), nor_gray, Size(net.sample_width, net.sample_height));
				vector<Mat> input_sample;
				input_sample.push_back(nor_gray);

				vector<Mat> predict_result = net.predictCNN(input_sample);
				
				int pred_rst = (int)predict_result[0].ptr<uchar>(0)[0];
				if(pred_rst <= net.class_count)
					putText(img_show, to_string(pred_rst), Point(faces[f].x+faces[f].width, faces[f].y+faces[f].height), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);
			}

			int frame_count = 2;
			while(--frame_count) {
				wri.write(img_show);
			}
			imshow(camera_window_name, img_show);

			int key_get = waitKey(20);
			switch(key_get) {
			case 27:
				wri.release();
				return 0;
			default:
				break;
			}
		}
	}
	wri.release();
#endif

#ifdef _HANY_NET_PREDICT_CAMERA
#ifdef _HANY_NET_PRINT_MSG
	cout << "Predicting from camera..." << endl;
#endif

	VideoCapture cap_out(0);
	if(!cap_out.isOpened()) {
		cout << "Cannot access camera." << endl;
		cin.get();
		exit(-1);
	}

	CascadeClassifier cascade_out;
	cascade_out.load(haar_file);

//	VideoWriter wri(output_video_file, CV_FOURCC('M', 'J', 'P', 'G'), 25.0, Size(640, 480));

	Mat src_frame;

	namedWindow(camera_window_name);

	Mat img_read;
	while(cap_out.read(img_read)) {
		Mat img_gray, nor_gray, img_show;
		img_read.copyTo(img_show);
		cvtColor(img_read, img_gray, CV_BGR2GRAY);

		vector<Rect> faces;
		equalizeHist(img_gray, img_gray);
		cascade_out.detectMultiScale(img_gray, faces, 1.1, 2, 0, Size(120, 120));

		for(int f = 0; f < faces.size(); f++) {
			rectangle(img_show, faces[f], Scalar(0, 255, 255), 3);

			resize(img_gray(faces[f]), nor_gray, Size(net.sample_width, net.sample_height));
			vector<Mat> input_sample;
			input_sample.push_back(nor_gray);

			vector<Mat> predict_result = net.predictCNN(input_sample);

			int pred_rst = (int)predict_result[0].ptr<uchar>(0)[0];
			if(pred_rst <= net.class_count)
				putText(img_show, label_list[pred_rst].second, Point(faces[f].x+faces[f].width, faces[f].y+faces[f].height), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 255, 255), 2);

		}

		int frame_count = 2;
		while(--frame_count) {
//			wri.write(img_show);
		}
		imshow(camera_window_name, img_show);

		int key_get = waitKey(20);
		if(key_get == 27) {
//			wri.release();
			cap_out.release();
			return 0;
		}
	}
#endif

	cout << "Press any key to quit..." << endl;
//	waitKey(0);
	cin.get();

	return 0;
}
Ejemplo n.º 6
0
int main(int argc, char * argv[])
{
	setlocale(LC_ALL, "Russian");
	OpticalFlowMatrices of_matr;
	CameraParams cp;
	char command;
	int number_of_cams;
	puts("Проверка камер");
	presets_aiming();

	printf("Нужна ли калибровка? (y//n): ");
	scanf("%c", &command);
	bool calibration_needed = command == 'y';

	printf("Введите число камер (1//2): ");
	scanf("%d", &number_of_cams);

	if (number_of_cams == 1)
	{
		if (calibration_needed)
		{
			//Проводим калибровку и записываем результаты в файл
			int board_width, board_height;
			puts("Введите размеры шахматной доски (число углов)");
			printf("Ширина: "); scanf("%d", &board_width);
			printf("Высота: "); scanf("%d", &board_height);

			Size board_sz(board_width, board_height);
			cp = camera_calibration(board_sz);

			write_camera_calibration_to_file(cp);
		}
		else
		{ //Читаем калибровку из файла
			read_camera_calibration_from_file(cp);
		}

		//Алгоритм оптического потока
		//Делаем фото с камер
		VideoCapture cap(0);

		Mat img0, img1;
		single_camera_aiming(cap);
		cap.read(img0);
		printf("Передвиньте камеру и нажмите любую клавишу");
		single_camera_aiming(cap);
		cap.read(img1);

		//Находим оптический поток и строим облако точек
		Mat point_cloud = single_sfm(img0, img1, cp.CM, cp.D);

		//Сохраняем результаты в файл
		save_3d_to_file("[1]point_cloud.obj", point_cloud, "w");
	}

	else if (number_of_cams == 2)
	{
		if (calibration_needed)
		{
			//Проводим калибровку и записываем результаты в файл
			int board_width, board_height;
			puts("Введите размеры шахматной доски (число углов)");
			printf("Ширина: "); scanf("%d", &board_width);
			printf("Высота: "); scanf("%d", &board_height);

			Size board_sz(board_width, board_height);
			of_matr = stereo_calibration(board_sz);

			write_stereo_calibration_to_file(of_matr);
		}
		else
		{ //Читаем калибровку из файла
			read_stereo_calibration_from_file(of_matr);
		}

		//Алгоритм оптического потока
		//Делаем фото с камер
		VideoCapture cap0(0);
		VideoCapture cap1(1);

		Mat img0, img1;
		aiming(cap0, cap1);
		cap0.read(img0);
		cap1.read(img1);


		//Находим оптический поток и строим облако точек
		Mat point_cloud = stereo_sfm(img0, img1, of_matr);

		//Сохраняем результаты в файл
		save_3d_to_file("[2]point_cloud.obj", point_cloud, "w");
	}


	return 0;
}
Ejemplo n.º 7
0
void Styler_Syntax::AddCaptures(matcher& m, stxmatch& sm, unsigned int offset, const SearchInfo& si, int rc, int* ovector) {
	wxASSERT(m.HasCaptures());
	wxASSERT(sm.subMatch.get() == NULL);
	wxASSERT(offset + sm.start >= si.lineStart && offset + sm.start < si.lineEnd);

	// Handle captures inside eachother
	if (rc > 0) {
		vector<unsigned int> offsets;
		vector<interval> ivs;
		vector<stxmatch*> mts;

		// All intervals are in absolute numbers
		const interval iv(offset + sm.start, offset + sm.end);
		wxASSERT(iv.start == si.lineStart + ovector[0] && iv.end == si.lineStart + ovector[1]);

		ivs.push_back(iv);
		mts.push_back(&sm);

		for (unsigned int i = 1; (int)i < rc; ++i) {
			if (ovector[2*i] == -1) continue;

			const wxString& name = m.GetCaptureName(i);
			if (name.empty()) continue;

			const interval capiv(si.lineStart + ovector[2*i], si.lineStart + ovector[2*i+1]);

			// Get the right parent match
			while(ivs.size() > 1 && capiv.end > ivs.back().end) {
				ivs.pop_back();
				mts.pop_back();
			}
			stxmatch& parent = *mts.back();

			// We have to adjust the interval against the parent offset (which is absolute)
			const int cap_start = capiv.start - ivs.back().start;
			const int cap_end = capiv.end - ivs.back().start;

			// Captures outside match (like in a non-capturing part)
			// are not currently supported
			const int parentLen = ivs.back().end - ivs.back().start;
			if (cap_start < 0 || cap_end > parentLen) {
				continue;
			}

			// Create submatch list if not there
			if (!parent.subMatch.get()) {
				parent.subMatch = auto_ptr<submatch>(new submatch);
				parent.subMatch->subMatcher = NULL; // matches with captures distinguishes from spans by not having subMatcher
			}

			// Create the new match
			auto_ptr<stxmatch> cap(new stxmatch(name, &m, cap_start, cap_end, NULL, NULL, &parent));

			wxASSERT(capiv.end <= offset + sm.end);

			ivs.push_back(capiv);
			mts.push_back(cap.get());

			cap->st = GetStyle(*cap); // style the match
			parent.subMatch->matches.push_back(cap);
		}
	}
}
bool CCapturador::CapturePatternsUndisorted(Mat& CameraMatrix,Mat& DistMatrix,int time)
{
	m_vCaptures.clear();
	VideoCapture cap(0); // open the default camera
	if (!cap.isOpened())  // check if we succeeded
		return -1;
	bool bMakeCapture = false;
	int nPatterns = 0;
	namedWindow("Camera", 1);
	namedWindow("Patrones");
/*
	HWND win_handle = FindWindow(0, L"Patrones");
	if (!win_handle)
	{
		printf("Failed FindWindow\n");
	}

	// Resize
	unsigned int flags = (SWP_SHOWWINDOW | SWP_NOSIZE | SWP_NOMOVE | SWP_NOZORDER);
	flags &= ~SWP_NOSIZE;
	unsigned int x = 0;
	unsigned int y = 0;
	unsigned int w = m_Options->m_nWidth;
	unsigned int h = m_Options->m_nHeight;
	SetWindowPos(win_handle, HWND_NOTOPMOST, x, y, w, h, flags);

	// Borderless
	SetWindowLong(win_handle, GWL_STYLE, GetWindowLong(win_handle, GWL_EXSTYLE) | WS_EX_TOPMOST);
	ShowWindow(win_handle, SW_SHOW);
	cvMoveWindow("Patrones", 0, 0);
    */
    long A = getTickCount();
    long B = getTickCount();
	bool start = false;
	for (int i = 0;;)
	{
		imshow("Patrones", m_vPatterns[i]);
		Mat frame;
		cap >> frame;
		if (frame.empty())
			return false;
		Mat view, rview, map1, map2;
		initUndistortRectifyMap(CameraMatrix, DistMatrix, Mat(),
			getOptimalNewCameraMatrix(CameraMatrix, DistMatrix, frame.size(), 1, frame.size(), 0),
			frame.size(), CV_16SC2, map1, map2);
		remap(frame, rview, map1, map2, INTER_LINEAR);
		imshow("Camera", rview);
        B = getTickCount();
		int C = B - A;
		if ((C>time&&start) || waitKey(30) >= 0)
		{
			start = true;
			cout << "time = " << C << endl;
            A = getTickCount();
			i++;
			Mat capture = frame.clone();
			Mat gray;
			cv::cvtColor(capture, gray, CV_BGR2GRAY);
			m_vCaptures.push_back(gray);
			if (++nPatterns >= m_nPatterns)
				break;
		};
	}
	cout << "Patrones capturados." << endl;
	cvDestroyWindow("Patrones");
	return true;
}
Ejemplo n.º 9
0
int main(int argc, char** argv)
{
	try
	{
		// Detection parameters :
		// -> Region of interest detection
		// -> Tag validator
		typedef aram::TagDetector<aram::CannyFittingDetector,aram::LocalThreshTagMatcher> myDetector;
		
		// Tag detector instanciation
		myDetector *detector = new myDetector();
		
		// Intrinsics parameters
		aram::Intrinsic intr("C:\\camera_data.xml");
		
		aram::MultiTag mt;
		float m_size = 28.0;
		float m_delta = 14.0;

		aram::TagInfo t0(0,aram::Point2D(0.0,m_delta+m_size),m_size);
		aram::TagInfo t1(1,aram::Point2D(0.0,0.0),m_size);
		aram::TagInfo t2(2,aram::Point2D(m_delta+m_size,0.0),m_size);
		aram::TagInfo t3(3,aram::Point2D(m_delta+m_size,m_delta+m_size),m_size);

		mt.addTagInfo(t0);
		mt.addTagInfo(t1);
		mt.addTagInfo(t2);
		mt.addTagInfo(t3);

		
		// Video input (see openCV doc)
		cv::VideoCapture cap(0); // use default video (usually your webcam)
		if(!cap.isOpened()) throw std::exception();
		
		cv::Mat frame;

		// Main loop
		while(true)
       	{
			// next frame from video input 
			cap >> frame;
						
			// Tag detection
			detector->detect(frame);

			// Tag list iterator
			aram::iteratorTag it;
			
			for(it=detector->begin();it!=detector->end();++it)
			{
				aram::vecPoint2D corners = (*it)->corners();

				for(unsigned int i=0;i<corners.size();++i)
				{
					cv::line(frame,corners[i%4],corners[(i+1)%4],cv::Scalar(100,150,150),2);
				}
			}

			// If any tags was detected
			if(detector->begin()!=detector->end())
			{
				// Get extrinsics parameters
				aram::Extrinsic e = mt.compute(detector->begin(),detector->end(),intr);
				drawPyramide(frame,m_size*2+m_delta,aram::Point3D(m_size+m_delta/2,m_size+m_delta/2,0),cv::Scalar(0,255,0),e);
			}
	
			// render
			cv::imshow("render", frame);
			// GUI refresh (see openCV doc)
			if(cv::waitKey(10)>=0) break;
		}
	}
	catch(std::exception &)
	{
	}

	return 0;
}
Ejemplo n.º 10
0
int main(int argc, char** argv) {
        if(argc == 1) {
            std::cout << "Need input file to process." << std::endl;
            return 1;
        }
        std::string infile = argv[1];
        bool boulderproc = false;
		bool videoproc = false;

        for(int i=2;i<argc;i++) {           
			std::string opt = argv[i];
	        if(opt == "--boulder") {
	            boulderproc = true;
	        } else if(opt == "--video") {
				videoproc = true;		
			}
		}

		cv::namedWindow("input");
		cv::namedWindow("output");

		if(!videoproc) {
			cv::Mat src;			

			if(infile == "video") {
				cv::VideoCapture cap(0);
				if(!cap.isOpened())
					return -1;
				
				if( !cap.read(src) ) {
					std::cerr << "Error reading image from camera";	
					return -1;
				}
			} else {
				src = cv::imread(infile, 1);
			}
			

			scoredContour out;
			if(!boulderproc) {
			    out = goal_pipeline(goal_preprocess_pipeline(src));
			} else {
			    out = boulder_pipeline(boulder_preprocess_pipeline(src));
			}

			cv::Mat output = cv::Mat::zeros(src.size(), CV_8UC3);

			std::vector< std::vector<cv::Point> > drawVec;
			drawVec.push_back(out.second);

			cv::Scalar col(255,255,255);
			cv::drawContours(output, drawVec, 0, col);

			cv::imwrite("pipeline_output.png", output);

			cv::imshow("input", src);
			cv::imshow("output", output);

			while(true) {
				if(cv::waitKey(30) >= 0) break;
			}
		} else {
			cv::VideoCapture cap(1); // open cam 1
			if(!cap.isOpened())  // check if we succeeded
				return -1;

			cv::namedWindow("stage1");
			cv::namedWindow("stage2");
			cv::namedWindow("stage3");
			cv::namedWindow("stage4");
			cv::namedWindow("stage5");

			cvCreateTrackbar("Hue Min", "input", &(hueThres[0]), 179, NULL);
			cvCreateTrackbar("Hue Max", "input", &(hueThres[1]), 179, NULL);

			cvCreateTrackbar("Val Min", "input", &(valThres[0]), 255, NULL);
			cvCreateTrackbar("Val Max", "input", &(valThres[1]), 255, NULL);

			std::vector<cv::Point> last_good;

			while(true) {
				cv::Mat src;
				if( !cap.read(src) ) {
					std::cerr << "Error reading image from camera";	
					return -1;
				}

				cv::imshow("input", src);

				double t = (double)cv::getTickCount();

				scoredContour out;
				if(!boulderproc) {
					out = goal_pipeline(goal_preprocess_pipeline(src, true, true), true);
				} else {
					out = boulder_pipeline(boulder_preprocess_pipeline(src, true, true), true);
				}

				double fps = 1 / (((double)cv::getTickCount() - t) / cv::getTickFrequency());

				std::cout << "FPS: " << fps << std::endl;
				cv::Mat output = cv::Mat::zeros(src.size(), CV_8UC3);

				if( out.second.size() > 0 ) {
					std::vector< std::vector<cv::Point> > drawVec;
					drawVec.push_back(out.second);

					cv::Rect bounds = cv::boundingRect(out.second);
					last_good = out.second;

					double d = getDistance(bounds.size(), src.size());
					std::pair<double, double> angles = getRelativeAngleOffCenter(out, src.size(), d);

					cv::Scalar col(255,255,255);
					cv::drawContours(output, drawVec, 0, col);
					cv::putText(output, std::to_string(fps), cv::Point(50, 50), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(0, 0, 255));
					cv::putText(output, std::to_string(d) + " inches", cv::Point(50, 75), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(0, 255, 0));
					cv::putText(output, std::to_string(angles.first * (180 / pi)) + " degrees horizontal / " + std::to_string(angles.second * (180 / pi)) + " degrees vertical", cv::Point(50, 100), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(255, 0, 0));
/*
					cv::putText(output, std::string("Horizontal ") + std::to_string(getFOVAngleHoriz(bounds.size(), src.size(), 36.0)) + " radians", cv::Point(50, 75), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(0, 255, 0));
					cv::putText(output, std::string("Vertical ") + std::to_string(getFOVAngleVert(bounds.size(), src.size(), 36.0)) + " radians", cv::Point(50, 100), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(255, 0, 0));
*/
				
					cv::imshow("output", output);
				}

				if(cv::waitKey(30) >= 0) { 
/*
					cv::Rect bounds = cv::boundingRect(last_good);
					std::cout << "Horizontal: " << std::to_string(getFOVAngleHoriz(bounds.size(), src.size(), 36.0)) + " radians" << std::endl;
					std::cout << "Vertical: " << std::to_string(getFOVAngleVert(boundsize(), src.size(), 36.0)) + " radians" << std::endl;
*/
					break;
				}
			}
		}
}
Ejemplo n.º 11
0
void VDThread::run()
{
	cv::VideoCapture cap(videopath.toStdString());
	int frameCount = cap.get(CV_CAP_PROP_FRAME_COUNT);
	for(int i=0; i<5; i++)
	{
		sleep(1);
		emit indexOfFrame(frameCount/5 * i); 
	}
	
	emit indexOfFrame(frameCount); 

	/*
	cv::VideoCapture cap(videopath.toStdString());
	int frameCount = cap.get(CV_CAP_PROP_FRAME_COUNT);

	if(_access(outputFolder.toStdString().c_str(), 0) == -1)
	{
		_mkdir(outputFolder.toStdString().c_str());
	}
	
	cv::Mat preFrame;
	int num = 1;
	int count = 1;
	int index = 0;
	while(index<frameCount)
	{
		cv::Mat frame;
		cap.read(frame);
        //cap >> frame;
		index++;
		if(index % 10 == 0)
		{
			qDebug()<<index;
			emit indexOfFrame(index);
		}

		if(frame.empty()) { 
			//emit indexOfFrame(index); 
			continue; 
		}
		
		if(!preFrame.empty())
		{
			cv::Mat c1[3];
			cv::Mat c2[3];

			split(frame,c1);
			split(preFrame,c2);

			double B = corr2(c1[0],c2[0]);
			double G = corr2(c1[1],c2[1]);
			double R = corr2(c1[2],c2[2]);
			if(B < thre || G < thre || R < thre)
			{
				char temp[20];
				sprintf(temp, "img-%05d.png", count);
				QString imgPath = outputFolder + "/" + QString(temp);

				imwrite(imgPath.toStdString(), preFrame);
				frame.copyTo(preFrame);

				count++;
				num = 1;
			}
			else
			{
				num++;
			}
		}
		else
		{
			frame.copyTo(preFrame);
		}
	}

	emit indexOfFrame(index); 
	char temp[20];
	sprintf(temp, "img-%05d.png", count);
	QString imgPath = outputFolder + "/" + QString(temp);

	imwrite(imgPath.toStdString(), preFrame);
	*/
}
Ejemplo n.º 12
0
int main(int argc, char** argv)
{
	help();

	//Start Video Feed
	cv::VideoCapture cap(1);

	if (!cap.isOpened())
	{
		std::cout << "Can not open video capture device." << std::endl;
		exit(-1);
	}

	//Video feed settings
	cap.set(CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH); //set frame width
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT); //set frame height

	double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH);   //get frame width
	double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get fram height
	std::cout << "Frame size: " << dWidth << "x" << dHeight << std::endl; //print frame size

	//Tracking Window
	cv::namedWindow("Tracking Window", cv::WINDOW_AUTOSIZE);

	//Video Capture Variables
	cv::Mat frame;
	
	
	const double threshold_val = 225;
	const double max_BINARY_value = 255;
	const int threshold_type = 3;

	while (1)
	{
		//start timer to measure performance
		clock_t begin_time = clock();

		//Capture Video Frame
		if (!cap.read(frame))
		{
			std::cout << "Cannot read a frame from video stream." << std::endl;
			exit(-2);
		}


		//cv::vector<cv::Mat> CV_8U channel;
		cv::Mat R, G, B;
		cv::Mat channel[3];
		split(frame, channel);
		channel[0].convertTo(B, CV_8U);
		channel[1].convertTo(G, CV_8U);
		channel[2].convertTo(R, CV_8U);
		cv::Mat ch1 = cvCreateImage(B.size(), 8, 1);
		cv::Mat ch2 = cvCreateImage(G.size(), 8, 1);
		cv::Mat ch3 = cvCreateImage(R.size(), 8, 1);
		cv::Mat temp = cvCreateImage(R.size(), 8, 1);
		cv::Mat res = cvCreateImage(R.size(), 8, 1);

		//cv::Mat* hh1 = &B;
		//cv::Mat* hh2 = &G;
		//cv::Mat* hh3 = &R;

		threshold(B, ch1, threshold_val, max_BINARY_value, threshold_type);
		threshold(G, ch2, threshold_val, max_BINARY_value, threshold_type);
		threshold(R, ch3, threshold_val, max_BINARY_value, threshold_type);
		
		cv::bitwise_and(ch1, ch2, temp);
		cv::bitwise_and(ch3, temp, res);
		imshow("Resulting Image", res);


		//cvThreshold(&channel[0], &R, threshold_val, max_BINARY_value, threshold_type);
		//cvThreshold(&channel[1], &G, threshold_val, max_BINARY_value, threshold_type);
		//cvThreshold(&channel[2], &B, threshold_val, max_BINARY_value, threshold_type);
		//cv::imshow("red channel", R);

		//IplImage* img = new IplImage(frame);
		//cvSplit(img, &R, &G, &B, NULL);
		//cv::Mat R(cv::Size(dWidth, dHeight), CV_MAKETYPE(CV_8U, 1));
		//cv::Mat G(cv::Size(dWidth, dHeight), CV_MAKETYPE(CV_8U, 1));
		//cv::Mat B(cv::Size(dWidth, dHeight), CV_MAKETYPE(CV_8U, 1));
		//cv::Mat CH1(cv::Size(dWidth, dHeight), CV_MAKETYPE(CV_8U, 1));
		//cv::Mat CH2(cv::Size(dWidth, dHeight), CV_MAKETYPE(CV_8U, 1));
		//cv::Mat CH3(cv::Size(dWidth, dHeight), CV_MAKETYPE(CV_8U, 1));

		if (cv::waitKey(30) == 27) //wait for 'esc' key press for 30ms.
		{
			std::cout << std::endl << "Press enter to exit program." << std::endl;
			break;
		}

		imshow("Tracking Window", frame);

		//print execution time
		clock_t end_time = clock();
		std::cout << "Frequency = " << double(get_CPU_time_usage(end_time, begin_time))
			<< " ms (" << 1 / (double(get_CPU_time_usage(end_time, begin_time)) / 1000) << " Hz)\n\n";
	}

	cvWaitKey(0);
	return 0;
}
Ejemplo n.º 13
0
int main(int argc, char** argv)
{
	try
	{
		float size = 142.0;
		float delta = 654.0;

		aram::Grid g;
		aram::TagInfo t1(21,aram::Point2D(0.0,0.0),size);
		aram::TagInfo t2(22,aram::Point2D(0.0,delta),size);
		aram::TagInfo t3(19,aram::Point2D(delta,delta),size);
		aram::TagInfo t4(20,aram::Point2D(delta,0.0),size);
		
		g.addTagInfo(t1);
		g.addTagInfo(t2);
		g.addTagInfo(t3);
		g.addTagInfo(t4);
		aram::Chessboard *coord = new aram::Chessboard(g);


		// Detection parameters :
		// -> Region of interest detection
		// -> Tag validator USE HAMMINGTAG FOR MULTI TRACKING !
		typedef aram::TagDetector<aram::EdgeDetector,aram::HammingTag> myDetector;
		
		// Tag detector instanciation
		myDetector *detector = new myDetector();
		
		// Intrinsics parameters
		aram::Intrinsics intr("C:\\camera_data.xml");
		
		// Video input (see openCV doc)
		cv::VideoCapture cap(0); // use default video (usually your webcam)
		if(!cap.isOpened()) throw std::exception();
		
		cv::Mat frame;

		// Main loop
		while(true)
       	{
			// next frame from video input 
			cap >> frame;
						
			// Tag detection
			detector->detect(frame);
						
			// Intrinsics parameters
			aram::Intrinsics intr("C:\\camera_data.xml");

			// Tag list iterator
			aram::iteratorTag it;
			
			for(it=detector->begin();it!=detector->end();++it)
			{
				aram::vecPoint2D corners = (*it)->corners();

				for(unsigned int i=0;i<corners.size();++i)
				{
					cv::line(frame,corners[i%4],corners[(i+1)%4],cv::Scalar(100,150,150),2);
				}
			}

			// If any tags was detected
			if(detector->begin()!=detector->end())
			{
				// Get extrinsics parameters
				aram::Extrinsics e = coord->compute(detector->begin(),detector->end(),intr);

				// Project 3D world coordinate -> 2D image coordinate
				aram::Point2D o = e.project(aram::Point3D(0.0,0.0,0.0));
				aram::Point2D x = e.project(aram::Point3D(delta,0.0,0.0));
				aram::Point2D y = e.project(aram::Point3D(0.0,delta,0.0));
				aram::Point2D z = e.project(aram::Point3D(0.0,0.0,delta/2.0));

				// draw axis
				cv::line(frame,o,x,cv::Scalar(200,0,0),2);
				cv::line(frame,o,y,cv::Scalar(0,200,0),2);
				cv::line(frame,o,z,cv::Scalar(0,0,200),2);
			}
	
			// render
			cv::imshow("render", frame);
			// GUI refresh (see openCV doc)
			if(cv::waitKey(10)>=0) break;
		}
	}
	catch(std::exception &)
	{
	}

	return 0;
}
Ejemplo n.º 14
0
void greenDetect(HWND hwnd) {
    int fontFace = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
    double fontScale = 1;
    int thickness = 1;  
    
    cv::Point textOrg(500, 25);
    
    cv::Mat frame, hsv, thresholded;
    
    int numWhite = 0;
    
    int iLowH = 75;
    int iHighH = 104;
    /*
    int iLowH = 32;
    int iHighH = 51;
    */        
    int iLowS = 29; 
    int iHighS = 142;
    
    int iLowV = 38;
    int iHighV = 255;
    
    cv::namedWindow("Control", cv::WINDOW_AUTOSIZE); //create a window called "Control"
    
    //Create trackbars in "Control" window
    cv::createTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
    cv::createTrackbar("HighH", "Control", &iHighH, 179);
    
    cv::createTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
    cv::createTrackbar("HighS", "Control", &iHighS, 255);
    
    cv::createTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
    cv::createTrackbar("HighV", "Control", &iHighV, 255);
    
    cv::VideoCapture cap(0); // open the default camera
    
    if(cap.isOpened()) {
    
      cv::namedWindow("thresholded", 1);
      cv::waitKey(1000); // Wait for camera so frame won't be empty
    
      for (;;)
      {
        
          try
          {
              boost::this_thread::interruption_point();
          }
          catch(boost::thread_interrupted&)
          {
              break;
          }

          cap >> frame; // get a new frame from capture
          cv::cvtColor(frame, hsv, cv::COLOR_BGR2HSV);
          
          cv::inRange(hsv, cv::Scalar(iLowH, iLowS, iLowV), cv::Scalar(iHighH, iHighS, iHighV), thresholded); //Threshold the image
          
          //morphological opening (remove small objects from the foreground)
          cv::erode(thresholded, thresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5)) );
          cv::dilate(thresholded, thresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5)) ); 
          
          //morphological closing (fill small holes in the foreground)
          cv::dilate(thresholded, thresholded, getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5)) ); 
          cv::erode(thresholded, thresholded, getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5)) );
          
          numWhite = cv::countNonZero(thresholded);
            
          cv::putText(thresholded,
            boost::lexical_cast<std::string>(numWhite),
            textOrg, fontFace, fontScale, cv::Scalar::all(255), thickness, 8);
          
          if (numWhite > 10000 ) {
            if (activeKey == NULL && selectedKey != NULL) {
              activeKey = selectedKey;
              sendBoundKey(activeKey->virtualKey, TRUE); 
              InvalidateRect(hwnd, 0, TRUE);
            }
          } else {
            if (activeKey != NULL) {
              sendBoundKey(activeKey->virtualKey, FALSE);
              activeKey = NULL; 
              InvalidateRect(hwnd, 0, TRUE);

            }
          }
            
            
          cv::imshow("thresholded", thresholded);
          
          cv::waitKey(30);
          
        }
    }
Ejemplo n.º 15
0
int main(int argc, char **argv) {
    cv::Mat frame;
    cv::Mat orig;
    cv::Mat fore;

    time_t camAdaptationStartTime = time(NULL);
    bool camAdapted = false;

    std::vector <std::vector<cv::Point>> contours;

    EnableCameraAutoAdjust(GetVideoNum(argc, argv));
    cv::VideoCapture cap(GetVideoNum(argc, argv));
    cv::Ptr<cv::BackgroundSubtractor> bgsub;

    cvNamedWindow("Contador de Votos", CV_WINDOW_NORMAL);
    cvSetWindowProperty("Contador de Votos", CV_WND_PROP_AUTOSIZE, CV_WINDOW_AUTOSIZE);
    ScreenSize ss = GetScreenSize();
    cvResizeWindow("Contador de Votos", ss.width, ss.height);

    cap >> frame;

    const int squareSize = 150;
    const int squareMargin = 10;

    cv::Point leftOrigin(squareMargin, squareMargin);
    cv::Point rightOrigin(frame.size().width - squareSize - squareMargin, squareMargin);

    InteractiveObject left(cv::Rect(leftOrigin.x, leftOrigin.y, squareSize, squareSize));
    InteractiveObject right(cv::Rect(rightOrigin.x, rightOrigin.y, squareSize, squareSize));

    cv::Mat yesc = cv::imread("SWC.png", CV_LOAD_IMAGE_COLOR);
    cv::resize(yesc, yesc, cv::Size(squareSize, squareSize));

    cv::Mat yesbw = cv::imread("vader.jpg", CV_LOAD_IMAGE_COLOR);
    cv::resize(yesbw, yesbw, cv::Size(squareSize, squareSize));

    cv::Mat noc = cv::imread("STC.png", CV_LOAD_IMAGE_COLOR);
    cv::resize(noc, noc, cv::Size(squareSize, squareSize));

    cv::Mat nobw = cv::imread("spock.png", CV_LOAD_IMAGE_COLOR);
    cv::resize(nobw, nobw, cv::Size(squareSize, squareSize));

    cv::Mat vote = cv::imread("vote.png", CV_LOAD_IMAGE_COLOR);
    cv::resize(vote,vote, cv::Size(squareSize, squareSize));

   // VideoCapture video("Game.mp4");

    while(true) {
        cap >> frame;
        cv::flip(frame, frame, 1);
        frame.copyTo(orig);

	/*Mat game;
	video >> game;
	cv::resize(game,game, cv::Size(squareSize, squareSize));
	game.copyTo(orig(cv::Rect((rightOrigin.x + leftOrigin.x)/2, (rightOrigin.y + leftOrigin.y)/2 + 2*game.size().height , game.size().width, game.size().height)));*/

        if (camAdapted) {
            cv::blur(frame, frame, cv::Size(3, 3));
            cv::blur(frame, frame, cv::Size(5, 5));
            cv::erode( frame, frame, cv::Mat());
            cv::erode( frame, frame, cv::Mat());

            cv::Mat cropped;
            cv::Rect roi(0, 0, frame.size().width, 150);
            frame(roi).copyTo(cropped);

            cv::Mat dst;
            bgsub->apply(cropped, dst, 0.0001);

            cv::threshold(dst, dst, 230, 255, CV_THRESH_BINARY);
            cv::findContours(dst , contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);


            std::vector<std::vector<cv::Point> >hull( contours.size() );
            for( int i = 0; i < contours.size(); i++ )
                {  cv::convexHull( cv::Mat(contours[i]), hull[i], false ); }

            int counter = 0;

            bool leftActive = false;
            bool rightActive = false;
            for( int i = 0; i< contours.size(); i++ )
            {
               cv::Scalar color( 255, 0, 255);
               if (cv::contourArea(hull[i]) > 600) {
                    cv::drawContours( orig, hull, i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
                    counter++;
               }
            }

            left.ProcessHulls(hull);
            right.ProcessHulls(hull);

		 std::string text = "VOTE";
                cv::Size txtSz = cv::getTextSize(text, cv::FONT_HERSHEY_DUPLEX, 4, 4, NULL);
                //cv::putText(orig, text, cv::Point(orig.size().width / 2 - txtSz.width /2, orig.size().height - 2* txtSz.height), cv::FONT_HERSHEY_PLAIN, 7, cv::Scalar(0, 255, 255), 4);


		vote.copyTo(orig(cv::Rect((rightOrigin.x + leftOrigin.x)/2, (rightOrigin.y + leftOrigin.y)/2, vote.size().width, vote.size().height)));


            if (!(left.IsActive() && right.IsActive())) {
                char buff[11];
                sprintf(buff, "%02d", left.GetCount());
                cv::putText(orig, buff, cv::Point(40, orig.size().height - 40), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 255, 0), 4);


                sprintf(buff, "%02d", right.GetCount());
                cv::Size txtSz = cv::getTextSize(buff, cv::FONT_HERSHEY_PLAIN, 4, 4, NULL);
                cv::putText(orig, buff, cv::Point(orig.size().width - txtSz.width - 40, orig.size().height - 40), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 255, 0), 4);



                left.Draw(orig);
                right.Draw(orig);

                if (right.IsCounted()) {
                    yesc.copyTo(orig(cv::Rect(rightOrigin.x, rightOrigin.y, yesc.size().width, yesc.size().height)));
                }
                else {
                    yesbw.copyTo(orig(cv::Rect(rightOrigin.x, rightOrigin.y, yesbw.size().width, yesbw.size().height)));
                }

                if (left.IsCounted()) {
                    noc.copyTo(orig(cv::Rect(leftOrigin.x, leftOrigin.y, noc.size().width, noc.size().height)));
                }
                else {
                    nobw.copyTo(orig(cv::Rect(leftOrigin.x, leftOrigin.y, nobw.size().width, nobw.size().height)));

                }

		int totalVotes = right.GetCount() + left.GetCount();
		std::string out = "FotosVote/votacao" + std::to_string(totalVotes) + ".png";

		imwrite(out, orig);

            }
            else {
                left.Deactivate();
                right.Deactivate();

               /* std::string text = "Fraude!";
                cv::Size txtSz = cv::getTextSize(text, cv::FONT_HERSHEY_PLAIN, 4, 4, NULL);
                cv::putText(orig, text, cv::Point(orig.size().width / 2 - txtSz.width / 2, orig.size().height /2), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 0, 255), 4);*/
            }
        }
        else {
            if ((time(NULL) - camAdaptationStartTime) > ADAPTATION_TIME_SEC) {
                    camAdapted = true;
                    bgsub = cv::createBackgroundSubtractorMOG2();
                    DisableCameraAutoAdjust(GetVideoNum(argc, argv));
            }
            else {
                std::string text = "Configurando...";
                cv::Size txtSz = cv::getTextSize(text, cv::FONT_HERSHEY_PLAIN, 4, 4, NULL);
                cv::putText(orig, text, cv::Point(orig.size().width / 2 - txtSz.width / 2, orig.size().height /2 - 2* txtSz.height), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 0, 255), 4);

                char buff[3];
                sprintf(buff, "%d", ADAPTATION_TIME_SEC - abs(time(NULL) - camAdaptationStartTime));
                txtSz = cv::getTextSize(buff, cv::FONT_HERSHEY_PLAIN, 4, 4, NULL);
                cv::putText(orig, buff, cv::Point(orig.size().width / 2 - txtSz.width / 2, orig.size().height /2 ), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 0, 255), 4);
            }
        }

        imshow("Contador de Votos", orig);

        int key = cv::waitKey(30);
        if ((key & 0xFF) == 27) {
                exit(0);
        }
        else if ((key & 0xFF) == ' '){
                camAdapted = false;
                EnableCameraAutoAdjust(GetVideoNum(argc, argv));
                camAdaptationStartTime = time(NULL);
        }
        else if ((key & 0xFF) == 'c') {
                imwrite("out.png", orig);
                system("python sendmail.py");
        }



    }

    return 0;
}
Ejemplo n.º 16
0
/** @function main */
int main( int argc, char** argv )
{
	cv::VideoCapture cap(0);
	cv::Mat image;

	CameraMeasurements cameraMeasurements;
	CameraDetector cameraDetector = CameraDetector( );


	cv::namedWindow( "Matches" );
	struct timeval tv;
	struct timezone tz = {};
		tz.tz_minuteswest = 0;
		tz.tz_dsttime = 0;
	gettimeofday( &tv, &tz );


	int key = 0;
	while( ( key = cv::waitKey(1) ) != 27 ) {
		cap.grab();
/*		cap.grab();
		cap.grab();
		cap.grab();
		cap.grab();*/
		cap.retrieve( image );
		cvtColor(image, image, CV_BGR2GRAY);

		cameraDetector.detectFeatures( image, cameraMeasurements );
		cameraDetector.addFeatures( cameraMeasurements );


		if( key == '\n' ) {
			for ( std::list<CameraMeas_t>::iterator meas_j = cameraMeasurements.meas.begin(); meas_j != cameraMeasurements.meas.end(); ) {
				meas_j = cameraMeasurements.removeFeature( meas_j );
			}
		}

		// Iterate over meas and draw all non lost elements:
		for ( std::list<CameraMeas_t>::iterator meas_j = cameraMeasurements.meas.begin(); meas_j != cameraMeasurements.meas.end(); ++meas_j ) {
			if ( !meas_j->isLost && (meas_j->z.rows()>3) ) {
				Eigen::MatrixX2d& z = meas_j->z;
				cv::Point pt = Point( z( z.rows()-1, 0 ), z( z.rows()-1, 1 ) );
				cv::circle( image, pt, 4, Scalar( 255, 0, 0 ) );
				for( int i = 0; i < z.rows() - 1; i++ ) {
					cv::Point pt1 = Point( z( i, 0 ), z( i, 1 ) );
					cv::Point pt2 = Point( z( i+1, 0 ), z( i+1, 1 ) );
					cv::line( image, pt1, pt2, Scalar( 255, 0, 0 ) );
				}

			}
		}

		cv::imshow("Matches", image );
		

		struct timeval tnow;
		struct timeval tdiff;
		gettimeofday( &tnow, &tz );
		timersub( &tnow, &tv, &tdiff );
		tv = tnow;
		printf( "Dt = %d.%06d\n", tdiff.tv_sec, tdiff.tv_usec );

	}


	return 0;
}
Ejemplo n.º 17
0
void GLView::drawAgent(const Agent& agent)
{
    float n;
    float r= conf::BOTRADIUS;
    float rp= conf::BOTRADIUS+2;
    //handle selected agent
    if (agent.selectflag>0) {

        //draw selection
        glBegin(GL_POLYGON);
        glColor3f(1,1,0);
        drawCircle(agent.pos.x, agent.pos.y, conf::BOTRADIUS+5);
        glEnd();

        glPushMatrix();
        glTranslatef(agent.pos.x-80,agent.pos.y+20,0);
        //draw inputs, outputs
        float col;
        float yy=15;
        float xx=15;
        float ss=16;
        glBegin(GL_QUADS);
        for (int j=0;j<INPUTSIZE;j++) {
            col= agent.in[j];
            glColor3f(col,col,col);
            glVertex3f(0+ss*j, 0, 0.0f);
            glVertex3f(xx+ss*j, 0, 0.0f);
            glVertex3f(xx+ss*j, yy, 0.0f);
            glVertex3f(0+ss*j, yy, 0.0f);
        }
        yy+=5;
        for (int j=0;j<OUTPUTSIZE;j++) {
            col= agent.out[j];
            glColor3f(col,col,col);
            glVertex3f(0+ss*j, yy, 0.0f);
            glVertex3f(xx+ss*j, yy, 0.0f);
            glVertex3f(xx+ss*j, yy+ss, 0.0f);
            glVertex3f(0+ss*j, yy+ss, 0.0f);
        }
        yy+=ss*2;

        //draw brain. Eventually move this to brain class?
        float offx=0;
        ss=8;
        for (int j=0;j<BRAINSIZE;j++) {
            col= agent.brain.boxes[j].out;
            glColor3f(col,col,col);
            glVertex3f(offx+0+ss*j, yy, 0.0f);
            glVertex3f(offx+xx+ss*j, yy, 0.0f);
            glVertex3f(offx+xx+ss*j, yy+ss, 0.0f);
            glVertex3f(offx+ss*j, yy+ss, 0.0f);
            if ((j+1)%40==0) {
                yy+=ss;
                offx-=ss*40;
            }
        }

        glEnd();
        glPopMatrix();
    }

    //draw giving/receiving
    if(agent.dfood!=0){
        glBegin(GL_POLYGON);
        float mag=cap(abs(agent.dfood)/conf::FOODTRANSFER/3);
        if(agent.dfood>0) glColor3f(0,mag,0); //draw boost as green outline
        else glColor3f(mag,0,0);
        for (int k=0;k<17;k++){
            n = k*(M_PI/8);
            glVertex3f(agent.pos.x+rp*sin(n),agent.pos.y+rp*cos(n),0);
            n = (k+1)*(M_PI/8);
            glVertex3f(agent.pos.x+rp*sin(n),agent.pos.y+rp*cos(n),0);
        }
        glEnd();
    }

    //draw indicator of this agent... used for various events
     if (agent.indicator>0) {
         glBegin(GL_POLYGON);
         glColor3f(agent.ir,agent.ig,agent.ib);
         drawCircle(agent.pos.x, agent.pos.y, conf::BOTRADIUS+((int)agent.indicator));
         glEnd();
     }

    //viewcone of this agent
    glBegin(GL_LINES);
    //and view cones
    glColor3f(0.5,0.5,0.5);
    for (int j=-2;j<3;j++) {
        if (j==0)continue;
        glVertex3f(agent.pos.x,agent.pos.y,0);
        glVertex3f(agent.pos.x+(conf::BOTRADIUS*4)*cos(agent.angle+j*M_PI/8),agent.pos.y+(conf::BOTRADIUS*4)*sin(agent.angle+j*M_PI/8),0);
    }
    //and eye to the back
    glVertex3f(agent.pos.x,agent.pos.y,0);
    glVertex3f(agent.pos.x+(conf::BOTRADIUS*1.5)*cos(agent.angle+M_PI+3*M_PI/16),agent.pos.y+(conf::BOTRADIUS*1.5)*sin(agent.angle+M_PI+3*M_PI/16),0);
    glVertex3f(agent.pos.x,agent.pos.y,0);
    glVertex3f(agent.pos.x+(conf::BOTRADIUS*1.5)*cos(agent.angle+M_PI-3*M_PI/16),agent.pos.y+(conf::BOTRADIUS*1.5)*sin(agent.angle+M_PI-3*M_PI/16),0);
    glEnd();

    glBegin(GL_POLYGON); //body
    glColor3f(agent.red,agent.gre,agent.blu);
    drawCircle(agent.pos.x, agent.pos.y, conf::BOTRADIUS);
    glEnd();

    glBegin(GL_LINES);
    //outline
    if (agent.boost) glColor3f(0.8,0,0); //draw boost as green outline
    else glColor3f(0,0,0);

    for (int k=0;k<17;k++)
    {
        n = k*(M_PI/8);
        glVertex3f(agent.pos.x+r*sin(n),agent.pos.y+r*cos(n),0);
        n = (k+1)*(M_PI/8);
        glVertex3f(agent.pos.x+r*sin(n),agent.pos.y+r*cos(n),0);
    }
    //and spike
    glColor3f(0.5,0,0);
    glVertex3f(agent.pos.x,agent.pos.y,0);
    glVertex3f(agent.pos.x+(3*r*agent.spikeLength)*cos(agent.angle),agent.pos.y+(3*r*agent.spikeLength)*sin(agent.angle),0);
    glEnd();

    //and health
    int xo=18;
    int yo=-15;
    glBegin(GL_QUADS);
    //black background
    glColor3f(0,0,0);
    glVertex3f(agent.pos.x+xo,agent.pos.y+yo,0);
    glVertex3f(agent.pos.x+xo+5,agent.pos.y+yo,0);
    glVertex3f(agent.pos.x+xo+5,agent.pos.y+yo+40,0);
    glVertex3f(agent.pos.x+xo,agent.pos.y+yo+40,0);

    //health
    glColor3f(0,0.8,0);
    glVertex3f(agent.pos.x+xo,agent.pos.y+yo+20*(2-agent.health),0);
    glVertex3f(agent.pos.x+xo+5,agent.pos.y+yo+20*(2-agent.health),0);
    glVertex3f(agent.pos.x+xo+5,agent.pos.y+yo+40,0);
    glVertex3f(agent.pos.x+xo,agent.pos.y+yo+40,0);

    //if this is a hybrid, we want to put a marker down
    if (agent.hybrid) {
        glColor3f(0,0,0.8);
        glVertex3f(agent.pos.x+xo+6,agent.pos.y+yo,0);
        glVertex3f(agent.pos.x+xo+12,agent.pos.y+yo,0);
        glVertex3f(agent.pos.x+xo+12,agent.pos.y+yo+10,0);
        glVertex3f(agent.pos.x+xo+6,agent.pos.y+yo+10,0);
    }

    glColor3f(1-agent.herbivore,agent.herbivore,0);
    glVertex3f(agent.pos.x+xo+6,agent.pos.y+yo+12,0);
    glVertex3f(agent.pos.x+xo+12,agent.pos.y+yo+12,0);
    glVertex3f(agent.pos.x+xo+12,agent.pos.y+yo+22,0);
    glVertex3f(agent.pos.x+xo+6,agent.pos.y+yo+22,0);

    //how much sound is this bot making?
    glColor3f(agent.soundmul,agent.soundmul,agent.soundmul);
    glVertex3f(agent.pos.x+xo+6,agent.pos.y+yo+24,0);
    glVertex3f(agent.pos.x+xo+12,agent.pos.y+yo+24,0);
    glVertex3f(agent.pos.x+xo+12,agent.pos.y+yo+34,0);
    glVertex3f(agent.pos.x+xo+6,agent.pos.y+yo+34,0);

    //draw giving/receiving
    if (agent.dfood!=0) {

        float mag=cap(abs(agent.dfood)/conf::FOODTRANSFER/3);
        if (agent.dfood>0) glColor3f(0,mag,0); //draw boost as green outline
        else glColor3f(mag,0,0);
        glVertex3f(agent.pos.x+xo+6,agent.pos.y+yo+36,0);
        glVertex3f(agent.pos.x+xo+12,agent.pos.y+yo+36,0);
        glVertex3f(agent.pos.x+xo+12,agent.pos.y+yo+46,0);
        glVertex3f(agent.pos.x+xo+6,agent.pos.y+yo+46,0);
    }


    glEnd();

    //print stats if zoomed in enough
	if(scalemult > .7)
	{
		//generation count
		sprintf(buf2, "%i", agent.gencount);
		RenderString(agent.pos.x-conf::BOTRADIUS*1.5, agent.pos.y+conf::BOTRADIUS*1.8, GLUT_BITMAP_TIMES_ROMAN_24, buf2, 0.0f, 0.0f, 0.0f);
		//age
		sprintf(buf2, "%i", agent.age);
		RenderString(agent.pos.x-conf::BOTRADIUS*1.5, agent.pos.y+conf::BOTRADIUS*1.8+12, GLUT_BITMAP_TIMES_ROMAN_24, buf2, 0.0f, 0.0f, 0.0f);

		//health
		sprintf(buf2, "%.2f", agent.health);
		RenderString(agent.pos.x-conf::BOTRADIUS*1.5, agent.pos.y+conf::BOTRADIUS*1.8+24, GLUT_BITMAP_TIMES_ROMAN_24, buf2, 0.0f, 0.0f, 0.0f);

		//repcounter
		sprintf(buf2, "%.2f", agent.repcounter);
		RenderString(agent.pos.x-conf::BOTRADIUS*1.5, agent.pos.y+conf::BOTRADIUS*1.8+36, GLUT_BITMAP_TIMES_ROMAN_24, buf2, 0.0f, 0.0f, 0.0f);
	}
}
int beaconpics_main(struct beacon_loc orientation)
{
    int thresh=140;
    namedWindow("Original 1", WINDOW_NORMAL);
    namedWindow("Original 2", WINDOW_NORMAL);
    namedWindow("Original 3", WINDOW_NORMAL);
    namedWindow("Diff", WINDOW_NORMAL);

    //hsvParams hsv = {76,0,224,97,37,255};
    hsvParams hsv = {20,0,0,97,37,255};

    //Set up blob detection parameters
    SimpleBlobDetector::Params params;
// params.blobColor //can we use this???
// params.minDistBetweenBlobs = 50.0f;
    params.filterByInertia = true;
    params.filterByConvexity = false;
    params.filterByColor = false;
    params.filterByCircularity = false;
    params.filterByArea = true;

    params.minThreshold = 150;
    params.maxThreshold = 255;
    params.thresholdStep = 1;

    params.minArea = 0;
    params.minConvexity = 0.3;
    params.minInertiaRatio = 0.10;

    params.maxArea = 2000;
    params.maxConvexity = 10;


    vector<KeyPoint> keypoints;

    VideoCapture cap(0); //capture the video from web cam

    if ( !cap.isOpened() )  // if not success, exit program
    {
        cout << "Cannot open the web cam" << endl;
        return -1;
    }

    while(true) {

        Mat imgOriginal1 = getPic(cap);
        Mat imgOriginal2 = getPic(cap);
        Mat imgOriginal3 = getPic(cap);

        Mat imgHSV1,imgHSV2, imgHSV3;

        if(imgOriginal1.empty() || imgOriginal2.empty() || imgOriginal3.empty())
        {
            cout << "can not open " << endl;
            return -1;
        }

        Mat diff;
        absdiff(imgOriginal1,imgOriginal2,diff);
        cvtColor(diff, diff, COLOR_BGR2GRAY); //Convert the captured

        threshold(diff, diff, thresh, 255, cv::THRESH_BINARY);
        dilate(diff, diff, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

        //opencv 3.0 version
        //detect beacon blobs between pictures 1&2
        Ptr<SimpleBlobDetector> blobDetect = SimpleBlobDetector::create(params);
        blobDetect->detect( diff, keypoints );
        cout<<keypoints.size()<<endl;
        //detect blobs between images 2&3
        if(keypoints.size() ==0) {
            absdiff(imgOriginal2,imgOriginal3,diff);
            cvtColor(diff, diff, COLOR_BGR2GRAY); //Convert the captured

            threshold(diff, diff, thresh, 255, cv::THRESH_BINARY);
            dilate(diff, diff, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

            blobDetect = SimpleBlobDetector::create(params);
            blobDetect->detect( diff, keypoints );
        }
        cout<<keypoints.size()<<endl;

        Mat out;
        drawKeypoints(diff, keypoints, out, CV_RGB(0,0,0), DrawMatchesFlags::DEFAULT);
        /*//finding if things are green or red
        cvtColor(out, out, COLOR_BGR2HSV);
        inRange(out, Scalar(hsv.hL, hsv.sL, hsv.vL),
               Scalar(hsv.hH, hsv.sH, hsv.vH), out);
        blobDetect.detect( out, keypoints );
        drawKeypoints(out, keypoints, out, CV_RGB(0,0,0), DrawMatchesFlags::DEFAULT);

        for(int i=0;i<diff.rows;i++){
           for(int j=0;j<diff.cols;j++){
                  if(out.at<Vec3b>(i,j)[0]==0 && out.at<Vec3b>(i,j)[1]==0 && out.at<Vec3b>(i,j)[2]==0){
                      imgOriginalON.at<Vec3b>(i,j)=(0,0,0);
                  }
             }
          }
          inRange(imgOriginalON, Scalar(hsv.hL, hsv.sL, hsv.vL),
               Scalar(hsv.hH, hsv.sH, hsv.vH), out);
          blobDetect.detect( out, keypoints );
          drawKeypoints(out, keypoints, out, CV_RGB(0,0,0), DrawMatchesFlags::DEFAULT);
          */

        //Circle blobs
        for(int i = 0; i < keypoints.size(); i++)
        {
            if(keypoints[i].size>0)
                circle(out, keypoints[i].pt, 1.5*keypoints[i].size, CV_RGB(0,255,0), 1, 8);
        }
        string text;
        if(keypoints.size() == 4)
        {
            text = "Object Found";
            cout<<endl<<endl<<"Object Found"<<endl;
            Point cent;
            cent=findkeyPoint(keypoints);
//     cout<<"dist: "<<printDistanceFromLights(keypoints)<<endl;
            circle(out, cent, 5, CV_RGB(0,100,0), -1, 8);
            robot_angle(diff, cent.y, cent.x, 1);
        }
        else
        {
            text = "Error";
            cout<<endl<<endl<<"No Object Found"<<endl;
            //	while(keypoints.size() > 2)
            //	   thresh+=5;
        }
        imshow("Original 1", imgOriginal1); //show the original image
        imshow("Original 2", imgOriginal2); //show the original image
        imshow("Original 3", imgOriginal3); //show the original image
        imshow("Diff", out);
        waitKey(-1);
    }
    return 0;
}
int main(){
  uint8_t start_r, old_IPL;
  uint8_t hz50_scaler, hz5_scaler, hz1_scaler, sec;
  uint32_t tick = 0;

  hz50_scaler = hz5_scaler = hz1_scaler = sec = 0;

  touch_init();

  __delay_ms(200);
  lcd_initialize();             // Initialize the LCD 

  motor_init();

  lcd_clear();
  lcd_locate(0,0);
  lcd_printf("-- Ball position: --");

  timers_initialize();          // Initialize timers

  while (1) {
    start_r = 0;
    while(!start_r) {      
      // disable all maskable interrupts
      SET_AND_SAVE_CPU_IPL(old_IPL, 7);
      start_r = start;

      // enable all maskable interrupts
      RESTORE_CPU_IPL(old_IPL);
    }

    // Periodic real-time task code starts here = CENTER_X + RADIUS * cos(tick * SPEED);
//      Ypos_set = CENT!!!
    double pidX, pidY;
    uint16_t duty_us_x, duty_us_y;

    // 50Hz control task
    if(hz50_scaler == 0) {
      calcQEI(Xpos_set, Xpos, Ypos_set, Ypos);
//      Xpos_set = CENTER_X;
//      Xpos_set = CENTER_Y;
      Xpos_set = CENTER_X + RADIUS * cos(tick * SPEED);
      Ypos_set = CENTER_Y + RADIUS * sin(tick * SPEED);
      tick++;


      pidX = pidX_controller(Xpos);
      pidY = pidY_controller(Ypos);

      // TODO: Convert PID to motor duty cycle (900-2100 us)

      // setMotorDuty is a wrapper function that calls your motor_set_duty
      // implementation in flexmotor.c. The 2nd parameter expects a value
      // between 900-2100 us
//      duty_us_x = cap((pidX*1000.0), 2100, 900);
//      duty_us_y = cap((pidY*1000.0), 2100, 900);

      duty_us_x = cap((pidX + 1500), 2100, 900);
      duty_us_y = cap((pidY + 1500), 2100, 900);
      motor_set_duty(1, duty_us_x);
      motor_set_duty(2, duty_us_y+100);
//      setMotorDuty(MOTOR_X_CHAN, duty_us_x);
//      setMotorDuty(MOTOR_Y_CHAN, duty_us_y);
    }

    // 5Hz display task
    if(hz5_scaler == 0) {
//      lcd_locate(0,1);
//      lcd_printf("Xp=%.1f,Yp=%.1f", Xpos, Ypos);
//      lcd_locate(0,2);
//      lcd_printf("X*=%.1f, Y*=%.1f", Xpos_set, Ypos_set);
//      lcd_locate(0,3);
//      lcd_printf("pX=%.1f,pY=%.1f", pidX, pidY);
//      lcd_locate(0,4);
//      lcd_printf("dx=%u, dY=%u", duty_us_x, duty_us_y);
//
      if(deadline_miss >= 1) {
        lcd_locate(0,6);
        lcd_printf("%4d d_misses!!!", deadline_miss);
      }
    }

    // 1Hz seconds display task
    if(hz1_scaler == 0) {
      lcd_locate(0,7);
      lcd_printf("QEI: %5u", getQEI());
      sec++;
    }
    
    hz50_scaler = (hz50_scaler + 1) % 2;
    hz5_scaler = (hz5_scaler + 1) % 20;
    hz1_scaler = (hz1_scaler + 1) % 100;

    start = 0;
  }

  return 0;
}
Ejemplo n.º 20
0
void camera_feed()
{
	VideoCapture cap(0);
	if (cap.isOpened())
	{
		int distance[3], MUL = 1, dif = 0;
		char key;
		bool first_run = false, is_size_checked = false, moved = false, shoot = false;
		unsigned long max_contours_amount = 0;
		Point drawing_point, cursor, additional_point;
		vector<vector<Point>> contours, main_points;
		vector<Point> pen1, pen2, pens;
		vector<Vec4i> hierarchy;
		Mat frame, real_pic, drawing_frame, maze;
		Scalar low_boundry(45, 107, 52), high_boundry(86, 227, 160), color(100, 100, 100);
		//namedWindow("drawing_frame", 1);
		//namedWindow("frame", 1);
		cap >> frame;
		cursor = Point(20, 20);
		maze = imread("maze1.jpg");
		maze = maze / WHITE;
		maze = maze * WHITE;
		bitwise_not(maze, maze);
		
		

		RECT rect = { 0 }; // gaming stuff!
		HWND window = FindWindow("Chicken Invaders 5", "Chicken Invaders 5");
		Sleep(2000);
		if (window)
		{
			GetClientRect(window, &rect);
			SetForegroundWindow(window);
			SetActiveWindow(window);
			SetFocus(window);
		}

		while (true)
		{
			shoot = false;
			cap >> frame;
			real_pic = frame.clone();
			while (main_points.size() != 0)
			{
				main_points.pop_back();
			}
			if (!first_run)
			{
				drawing_frame = real_pic.clone();
				resize(drawing_frame, drawing_frame, Size(GetSystemMetrics(SM_CXSCREEN), GetSystemMetrics(SM_CYSCREEN) - 50));
				resize(maze, maze, Size(GetSystemMetrics(SM_CXSCREEN), GetSystemMetrics(SM_CYSCREEN) - 50));
				first_run = true;
			}
			flip(real_pic, real_pic, 1);

			cvtColor(frame, frame, COLOR_BGR2HSV);
			
			inRange(frame, low_boundry, high_boundry, frame);
			flip(frame, frame, 1);

			contours.clear();
			resize(frame, frame, Size(GetSystemMetrics(SM_CXSCREEN), GetSystemMetrics(SM_CYSCREEN)));
			findContours(frame, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
			is_size_checked = false;
			if (contours.size() != 0)
			{
				for (vector<vector<Point>>::iterator it = contours.begin(); it != contours.end(); it++)
				{
					if (it->size() > max_contours_amount * 0.7)
					{
						main_points.push_back(*it);
						max_contours_amount = it->size();
						is_size_checked = true;
					}
				}
			}
			if (is_size_checked)
			{
				moved = false;
				drawing_point = stabilized_point(main_points[0]);
				if (main_points.size() == 2)
				{
					if (stabilized_point(main_points[0]).x < stabilized_point(main_points[1]).x)
					{
						drawing_point = stabilized_point(main_points[1]);
						
					}
					shoot = true;
				}
				drawing_point.x += (drawing_point.x - drawing_frame.size().width / 2) / 10;
				drawing_point.y += (drawing_point.y - drawing_frame.size().height / 2) / 10;
				while (drawing_point.x > maze.size().width)
				{
					drawing_point.x--;
				}
				while (drawing_point.x < 0)
				{
					drawing_point.x++;

				}
				while (drawing_point.y > maze.size().height)
				{
					drawing_point.y--;
				}
				while (drawing_point.y < 0)
				{
					drawing_point.y++;
				}

				distance[0] = drawing_point.x - cursor.x;
				distance[1] = drawing_point.y - cursor.y;
				while (distance[0] != 0 && distance[1] != 0)
				{
					if (maze.at<Vec3b>(Point(cursor.x + distance[0] / 15, cursor.y))[0] != WHITE)
					{
						cursor.x += distance[0] / 15;
						distance[0] /= 15;
						moved = true;
					}
					if (maze.at<Vec3b>(Point(cursor.x, cursor.y + distance[1] / 15))[0] != WHITE)
					{
						cursor.y += distance[1] / 15;
						distance[1] /= 15;
						moved = true;
					}				
					if (!moved)
					{
						putText(drawing_frame, "Struck a wall!", Point(0, 40), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(WHITE, WHITE, BLACK, 1), 1, CV_AA);
						distance[0] = 0;
						distance[1] = 0;
					}
					
				}
				SetCursorPos(drawing_point.x, drawing_point.y); // gaming stuff!
				circle(drawing_frame, cursor, 13, Scalar(WHITE, BLACK, WHITE), 2);
				circle(drawing_frame, drawing_point, 13, Scalar(WHITE, BLACK, WHITE), -1);
				//circle(drawing_frame, stabilized_point(pen1), 13, Scalar(WHITE, WHITE, BLACK), -1);
			}
			else
			{
				putText(drawing_frame, "Lost drawing object!", Point(0, 20), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(WHITE, WHITE, BLACK, 1), 1, CV_AA);
				circle(drawing_frame, cursor, 13, Scalar(WHITE, WHITE, BLACK), 3);
			}
			if (shoot)
			{
				LeftClick(drawing_point.x, drawing_point.y);
			}
			key = waitKey(10);

			drawing_frame = maze + drawing_frame;
			bitwise_not(drawing_frame, drawing_frame);
			//imshow("drawing_frame", drawing_frame);
			//imshow("frame", frame);

			frame = BLACK;
			drawing_frame = BLACK;
			real_pic = BLACK;

		}
	}
Ejemplo n.º 21
0
void doomyTest(){
	// VideoCapture object
	cv::VideoCapture cap(0);
	cv::Mat augmentedImg = cv::imread("images/dragon.jpg");

	//~ cv::Mat sourceImg = cv::imread("images/template.jpg");
	cv::Mat markerImg = cv::imread("images/hiro.jpg");
	//~ cv::Mat sourceImg = cv::imread("images/abelHand.jpg");
	//~ cv::Mat sourceImg = cv::imread("images/whiteBox.jpg");
	//~ if (sourceImg.empty())
		//~ std::cout << "problem reading source image" << std::endl;
	//~ else
		//~ return;
	//~ //unsigned int frame_count = 0;
	cv::Mat goodTest = markerImg;
	cv::Mat unityM;
	unityM =  cv::Mat::zeros(3,3,CV_64F);
	unityM.at<double>(0,0) = 1.0;
	unityM.at<double>(1,1) = 1.0;
	unityM.at<double>(2,2) = 1.0;
	unsigned clearImageAfterNFrames = 90;
	//unsigned lastFramePersistance;
	unsigned frameNumber = 30;
	int WAITINGTIME = 30;
	double timeForFrame = 1.0/frameNumber;
	cv::Mat remainingMatrix = unityM;
	double cumulativeTime = 0.0;
	while(char(cv::waitKey(WAITINGTIME)) != 'q' && cap.isOpened()){
		double t0 = (double)cv::getTickCount();
		cv::Mat test;
		cap >> test;
		if(test.empty()){
			continue;
		}else{
			unityM.at<double>(0,2)= test.rows;
			unityM.at<double>(1,2)= test.cols;
			HomographyFinder a(augmentedImg, markerImg,test);
			// if maintaining homography matrix
			a.estimatePerspective();
			cv::Mat homographyM = a.getHomographyMatrix();
			if(a.isGoodHomographyMatrix()){
				remainingMatrix = homographyM;
			}
			if (cumulativeTime >= timeForFrame*clearImageAfterNFrames){
				cumulativeTime = 0.0;
				std::cout << timeForFrame*clearImageAfterNFrames << std::endl;
				remainingMatrix = unityM;
			}else{
				a.setHomographyMatrix(remainingMatrix);
			}
			// if new homography matrix change bestHomography and find new homography

			// else mantain the same homograpy but find a new one

			// show frame according to the frame rate and homography
			a.showMatches();
			a.showEstimatedPerspective();
			a.showAugmented();
			//~ goodTest = test;
		}
		t0 = ((double)cv::getTickCount() - t0) / cv::getTickFrequency();
		//~ t0 = (double)cv::getTickCount() - t0;
		cumulativeTime += t0;
		std::cout << "Frame rate = " << t0 << std::endl;
		std::cout << "time = " << t0 << std::endl;
	}
}
void main()

{   int i=0;
	String cn="haarcascade_frontalface_alt.xml";
	CascadeClassifier cd;
	vector<Rect> faces;
	bool cb=cd.load(cn);
	Point cent;
	bool lock=true;
	int nodetect_cnt=0;
	int clickdetectcnt=3;
	bool quit=false;
	
	try
{   
    VideoCapture cap(-1); 
	cap.set(CV_CAP_PROP_FRAME_WIDTH,320);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT,240);

    if(!cap.isOpened()) {
		std::cout<<"not found";
	}
	else{
		voce::init("./lib", false, true, "./grammar", "digits");
		std::cout<<"yeah";
	}

	std::cout<<cap.get(CV_CAP_PROP_FRAME_HEIGHT)<<"_________________"<<cap.get(CV_CAP_PROP_FRAME_WIDTH)<<std::endl;

	 for(;;)
    {   
		//std::cout<<"yahan aaya ";
		
	    Mat frame;
		rectval r1=sens();
		cap >> frame; 
		
		ellipse( frame, frame_center(), Size(5,5), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
		rectangle(frame,Point(r1.x,r1.y),Point(r1.x+r1.width,r1.y+r1.height),Scalar( 255, 0, 255 ));
		cd.detectMultiScale(frame,faces);
	
		for(int i=0 ; i<faces.size();i++){
		
		Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
		cent=center;
		if(lock==true)
			convert(center);
	    ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
		ellipse( frame, center, Size( 5, 5), 0, 0, 360, Scalar( 50, 205, 50 ), 2, 8, 0 );
	 	}
		imshow("edges", frame);
		int c=waitKey(20);
		if (voce::getRecognizerQueueSize() > 0)
		{
			std::string s = voce::popRecognizedString();

			if (std::string::npos != s.rfind("stop"))
			{
				quit = true;
				break;
			}
			std::string aopen="click",areset="center",apause="pause",aresume="resume",adblclick="double";
			if(aopen.compare(s)==0){
			    sendEvent(1);
			}
			if(areset.compare(s)==0)
				setCenter(cent.x,cent.y);
			if(apause.compare(s)==0)
				lock=false;
			if(aresume.compare(s)==0)
				lock=true;
			if(adblclick.compare(s)==0)
				sendEvent(2);
				
			std::cout << "You said: " << s << std::endl;
			//voce::synthesize(s);
		}
		//if(c==99)
			
		if(c==27)
		break;
		if(c==108)
			lock=false;
		if(c==107)
			lock=true;
		//cout<<c;
       
   }
	 voce::destroy();
  
}
catch( cv::Exception& e )
{
    std::cout<< "---------"<<e.what();
}

	//int a;
	//std::cin>>a;
}
Ejemplo n.º 23
0
int main( int argc, char **argv )
{
	
// %Tag(INIT)%
	ros::init(argc, argv, "visiontracker");
// %EndTag(INIT)%

// %Tag(NODEHANDLE)%
	ros::NodeHandle n;
// %EndTag(NODEHANDLE)%

// %Tag(PUBLISHER)%
	ros::Publisher chatter_pub = n.advertise<ros_umirtx_vision::VisPMsg>("visionposition", 1000);
// %EndTag(PUBLISHER)%

	
		
	int c = 0 ;
    VideoCapture cap(c); // open the default camera
    Mat frame, frameCopy, image;
    Mat rgbimg[3], tempimg, prodimg;
	Mat imgThresholded, imgHSV;
	Mat imgResult;
	
	int minmaxhsv[3][2] = {{100,140},{85,254},{128,264}};
	int status = 1;
	int iLastXY[2] = {-1,-1};
	double dArea = 0;
	int frameHeight = 480, frameWidth = 640;
	double xpos = 0.5;
	double ypos = 0.5;
    
    ros_umirtx_vision::VisPMsg msg;
    
    if(!cap.isOpened()) {
		cout << "Capture from CAM " <<  c << " didn't work" << endl;
		return -1;
	}
	
	createControlWindow("Control", minmaxhsv, &status);
	
	try{
    if(cap.isOpened())
    {
		cap >> frame;
		
		if( frame.empty() )
			exit(0);
		
		//frame.copyTo( frameCopy );
		flip( frame, frameCopy, -1 );
		
		Mat imgLines = Mat::zeros( frameCopy.size(), CV_8UC3 );
		Mat imgResult= Mat::zeros( frameCopy.size(), CV_8UC3 );
		frameHeight = frame.rows;
		frameWidth = frame.cols;
		
        cout << "In capture ..." << endl;
        while((status>0) and (ros::ok()))
        {
			try{
				cap >> frame;
				if( frame.empty() )
					break;
				//frame.copyTo( frameCopy );
				flip( frame, frameCopy, -1 );
				//std::cout << "H:" << frameCopy.rows << " W:" << frameCopy.cols << std::endl;
				
				//imshow("Original",frame);
			}
			catch(int e)
			{
				cout << "Something went wrong while getting camera frame" << endl;
			}
            
            try{
				selectRedObj(frameCopy, imgHSV, imgThresholded, minmaxhsv);
				getCenterOfObj(imgThresholded, imgLines, iLastXY, &dArea);
				
				msg.x = 100*((double)iLastXY[0])/frameWidth;
				msg.y = 100*(double)iLastXY[1]/frameHeight;
				msg.area = 100*dArea/frameWidth/frameHeight;
				chatter_pub.publish(msg);
				
				cvtColor(imgThresholded,imgThresholded, CV_GRAY2RGB);
				addWeighted( frameCopy, 1, imgThresholded, 0.4, 0.0, imgResult);
				circle(imgResult,Point(iLastXY[0],iLastXY[1]),5,Scalar( 0, 0, 255 ),-1);
				imgResult = imgResult + imgLines;
				
				imshow("Result",imgResult);
				//if(save>0)
				//	imwrite("/home/xavier/Pictures/saves/redobjdet-05.jpg",imgResult);
			}
			catch(int e){
				cout << "Something went wrong while processing image" << endl;
			}
			
			//save = 0;
			int key = waitKey( 10 );
			if( key > 0)
			{
				key &= 255;
				cout << "Button pressed: " << key << endl;
				
				if( key == ' ' )
				{
					waitKey( 10 );
					key = 0;
					while( key != ' ')
					{
						ros::spinOnce();
						key = waitKey( 20 );
						if(key>=0)
							key &= 255;
					}
						
				}
				else if(key == 'c')
				{
					//ros::spinOnce();
					break;
				}
				//else if(key == 's')
				//	save = 1;
			}
            
            ros::spinOnce();
        }

        //waitKey(0);



    }
	}
	catch(int e){
		cout << "Error occured!" << endl;
	}
	
    
    destroyAllWindows();

    return 0;
}
Ejemplo n.º 24
0
void createScenePrimitives()
{
	// sphere
	{
		int id = numRigidBodies++;
		PfxSphere sphere(1.0f);
		PfxShape shape;
		shape.reset();
		shape.setSphere(sphere);
		collidables[id].reset();
		collidables[id].addShape(shape);
		collidables[id].finish();
		bodies[id].reset();
		bodies[id].setMass(1.0f);
		bodies[id].setInertia(pfxCalcInertiaSphere(1.0f,1.0f));
		states[id].reset();
		states[id].setPosition(PfxVector3(-5.0f,5.0f,0.0f));
		states[id].setMotionType(kPfxMotionTypeActive);
		states[id].setRigidBodyId(id);
	}

	// box
	{
		int id = numRigidBodies++;
		PfxBox box(1.0f,1.0f,1.0f);
		PfxShape shape;
		shape.reset();
		shape.setBox(box);
		collidables[id].reset();
		collidables[id].addShape(shape);
		collidables[id].finish();
		bodies[id].reset();
		bodies[id].setMass(1.0f);
		bodies[id].setInertia(pfxCalcInertiaBox(PfxVector3(1.0f),1.0f));
		states[id].reset();
		states[id].setPosition(PfxVector3(0.0f,5.0f,5.0f));
		states[id].setMotionType(kPfxMotionTypeActive);
		states[id].setRigidBodyId(id);
	}

	// capsule
	{
		int id = numRigidBodies++;
		PfxCapsule capsule(1.5f,0.5f);
		PfxShape shape;
		shape.reset();
		shape.setCapsule(capsule);
		collidables[id].reset();
		collidables[id].addShape(shape);
		collidables[id].finish();
		bodies[id].reset();
		bodies[id].setMass(2.0f);
		bodies[id].setInertia(pfxCalcInertiaCylinderX(2.0f,0.5f,2.0f));
		states[id].reset();
		states[id].setPosition(PfxVector3(5.0f,5.0f,0.0f));
		states[id].setMotionType(kPfxMotionTypeActive);
		states[id].setRigidBodyId(id);
	}

	// cylinder
	{
		int id = numRigidBodies++;
		PfxCylinder cylinder(0.5f,1.5f);
		PfxShape shape;
		shape.reset();
		shape.setCylinder(cylinder);
		collidables[id].reset();
		collidables[id].addShape(shape);
		collidables[id].finish();
		bodies[id].reset();
		bodies[id].setMass(3.0f);
		bodies[id].setInertia(pfxCalcInertiaCylinderX(0.5f,1.5f,3.0f));
		states[id].reset();
		states[id].setPosition(PfxVector3(0.0f,10.0f,0.0f));
		states[id].setMotionType(kPfxMotionTypeActive);
		states[id].setRigidBodyId(id);
	}

	// convex mesh
	{
		PfxCreateConvexMeshParam param;

		param.verts = BarrelVtx;
		param.numVerts = BarrelVtxCount;
		param.vertexStrideBytes = sizeof(float)*6;

		param.triangles = BarrelIdx;
		param.numTriangles = BarrelIdxCount/3;
		param.triangleStrideBytes = sizeof(unsigned short)*3;

		PfxInt32 ret = pfxCreateConvexMesh(gConvex,param);
		if(ret != SCE_PFX_OK) {
			SCE_PFX_PRINTF("Can't create gConvex mesh.\n");
		}

		int id = numRigidBodies++;
		PfxShape shape;
		shape.reset();
		shape.setConvexMesh(&gConvex);
		collidables[id].reset();
		collidables[id].addShape(shape);
		collidables[id].finish();
		bodies[id].reset();
		bodies[id].setMass(3.0f);
		bodies[id].setInertia(pfxCalcInertiaSphere(1.0f,1.0f));
		states[id].reset();
		states[id].setPosition(PfxVector3(0.0f,15.0f,0.0f));
		states[id].setMotionType(kPfxMotionTypeActive);
		states[id].setRigidBodyId(id);
	}

	// combined primitives
	{
		int id = numRigidBodies++;

		//E Both shapes and incides buffer have to be kept when creating a combined shape.
		static PfxShape shapes[3];
		PfxUInt16 shapeIds[3]={0,1,2};
		collidables[id].reset(shapes,shapeIds,3);
		{
			PfxBox box(0.5f,0.5f,1.5f);
			PfxShape shape;
			shape.reset();
			shape.setBox(box);
			shape.setOffsetPosition(PfxVector3(-2.0f,0.0f,0.0f));
			collidables[id].addShape(shape);
		}
		{
			PfxBox box(0.5f,1.5f,0.5f);
			PfxShape shape;
			shape.reset();
			shape.setBox(box);
			shape.setOffsetPosition(PfxVector3(2.0f,0.0f,0.0f));
			collidables[id].addShape(shape);
		}
		{
			PfxCapsule cap(1.5f,0.5f);
			PfxShape shape;
			shape.reset();
			shape.setCapsule(cap);
			collidables[id].addShape(shape);
		}
		collidables[id].finish();
		bodies[id].reset();
		bodies[id].setMass(3.0f);
		bodies[id].setInertia(pfxCalcInertiaBox(PfxVector3(2.5f,1.0f,1.0f),3.0f));
		states[id].reset();
		states[id].setPosition(PfxVector3(0.0f,5.0f,0.0f));
		states[id].setMotionType(kPfxMotionTypeActive);
		states[id].setRigidBodyId(id);
	}
}
Ejemplo n.º 25
0
int Counter::startCount(std::string file, char frontRear/* = 'F'*/)
{
    cv::VideoCapture cap(file.c_str());
    if (!cap.isOpened()) {
        std::cout << "Could not open file" << std::endl;
        return 1;
    }
    fps = 1000/cap.get(CV_CAP_PROP_FPS);
    //int frate = 1000/fps;
    int frate = 20;
    int dumy = 13700;  // @debug  13700  15840   18246   18890   21900

    // Location recognition
    DigitRecognizer dr(1,10,5,7, "./origImages");
    dr.learnFromImages();
    dr.setClassifier();

    // set parameters
    if ('F'==frontRear) {
        setFrontDoor();
    } else {
        setRearDoor();
    }

    std::vector<cv::Point2f> tripWire;                  // points on the tripwire
    std::list<std::vector<cv::Point2f> > trajectories;  // a list of trajectories being tracked
    std::vector<std::list<int> > on_models;             // each model is a list of start times
    std::vector<std::list<int> > off_models;
    float mean_x=0.0f, mean_y=0.0f, var_x=0.0f, var_y=0.0f, length=0.0f;    // trajectory stats

    cv::Mat capframe, frame, image, gray, prevGray, location;
    cv::Mat doorHistBG, door, doorHist;
    cv::Size winSize(31,31);            // window size for optical flow computation
    cv::TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
    int onPassengers = 0;
    int offPassengers = 0;
    int missPassengers = 0;
    int histSize = 2;                   // size of background histogram
    float range[] = { 0, 256 };         // range of pixel values for histogram calculation
    const float* histRange = { range }; //
    std::string prevGPS, currGPS, speed;

    generateTrackingPoints(tripWire);

    while (true) {
        int fno = cap.get(CV_CAP_PROP_POS_FRAMES);
        if (fno>=dumy) {
            std::cout << "";
        }

        cap >> capframe;
        if (capframe.empty()) break;

        frame = capframe(cv::Rect(0,0,580,450));
        //cv::warpPerspective(frame, frame, M, frame.size() );
        frame.copyTo(image);
        cv::cvtColor(image, gray, CV_BGR2GRAY);
        //gammaCorrection(gray);  // note: it becomes worse with Gamma (anti-) correction

        if (prevGray.empty()) {
            gray.copyTo(prevGray);
        }

        // check gps location
        location = capframe(cv::Rect(810, 90, 90, 30));
        currGPS = dr.analyseLocationImage(location, false);
        /*int gpsDistance = 0;
        if (!prevGPS.empty()) {
            std::inner_product(prevGPS.begin(), prevGPS.end(), currGPS.begin(), gpsDistance,
                               std::plus<int>(), std::not_equal_to<char>());
        }
        // add points to trajectories /// NEED TO KNOW THAT GPS DOESN'T CHANGE FOR SEVERAL FRAMES
        if(trajectories.size()<tripWire.size()-10 && gpsDistance<3) { //160 0.8
            addPoints(trajectories, tripWire, fno);
        }*/

        // check if door is closed
        door = gray(rectDoor);
        if (fno<5) {
            cv::Mat tmpDoorHistBG;
            //cv::calcHist(&door, 1, 0, cv::Mat(), tmpDoorHistBG, 1, &histSize, &histRange, true, false);
            tmpDoorHistBG = Utility::HogComp(door);
            cv::normalize(tmpDoorHistBG, tmpDoorHistBG, 1, 0, cv::NORM_L2, -1, cv::Mat());
            if (doorHistBG.empty()) {
                doorHistBG = tmpDoorHistBG;
            } else {
                cv::addWeighted(doorHistBG, 0.7, tmpDoorHistBG, 0.3, 0, doorHistBG, -1);
            }
        }
        //cv::calcHist(&door, 1, 0, cv::Mat(), doorHist, 1, &histSize, &histRange, true, false);
        doorHist = Utility::HogComp(door);
        cv::normalize(doorHist, doorHist, 1, 0, cv::NORM_L2, -1, cv::Mat());
        //float similarityDoor = doorHistBG.dot(doorHist);
        float similarityDoor = cv::compareHist(doorHistBG, doorHist, CV_COMP_CORREL);
        bool bDoorOpen = similarityDoor<0.9;

        // add points to trajectories
        if(trajectories.size()<tripWire.size()-10 && bDoorOpen) { //160 0.8
            addPoints(trajectories, tripWire, fno);
        }

        std::vector<uchar> status;
        std::vector<float> err;
        std::vector<cv::Point2f> nextPoints;
        std::vector<cv::Point2f> prevPoints = lastPoints(trajectories);
        if (prevPoints.empty()==false) {
            cv::calcOpticalFlowPyrLK(prevGray, gray, prevPoints, nextPoints, status, err, winSize, 3, termcrit, 0, 0.001);
        }

        int i=0;
        std::list<std::vector<cv::Point2f> >::iterator iTrack = trajectories.begin();
        for (; iTrack!=trajectories.end(); i++) {
            int szTrack = iTrack->size();
            isValidTrack(*iTrack, mean_x, mean_y, var_x, var_y, length);

            if ((szTrack>3) && (var_x<1.0f) && (var_y<1.0f)) { // stationary points
                iTrack = trajectories.erase(iTrack);
            } else if ((!status[i] || err[i]>13.0) && (szTrack>10)) { // lost of tracking
                iTrack->at(0).y = 1.0;
                iTrack++;
            } else if (szTrack>80) { // too long, remove  120
                iTrack = trajectories.erase(iTrack);
            } else if (szTrack>30) { // long trajectory, try to check 80
                iTrack->at(0).y = 2.0;
                iTrack->push_back(nextPoints[i]);
                iTrack++;
            } else {
                iTrack->push_back(nextPoints[i]);
                iTrack++;
            }
        }

        // update models according to the direction of trajectories
        std::vector<int> startTimes;
        getStartTimes(trajectories, startTimes, fno);
        std::vector<int>::iterator iTime = startTimes.begin();
        for (; iTime!=startTimes.end(); iTime++) {
            int overall_direction = getMajorityDirection(trajectories, *iTime);
            for (i=0, iTrack=trajectories.begin(); iTrack!=trajectories.end(); i++) {
                drawtrajectory(*iTrack, image);
                if (((int)(iTrack->at(0).x) == *iTime) && (iTrack->at(0).y>0.0f)) { // only use trajectories long enough
                    bool validTrack = isValidTrack(*iTrack, mean_x, mean_y, var_x, var_y, length);
                    int onoff = onOroff(*iTrack);
                    if (validTrack && (onoff==overall_direction)) {
                        switch(onoff) {
                        case 0: {offPassengers = updateModel(off_models, *iTrack, onoff);
                            /*std::vector<cv::Point2f>::iterator iit = iTrack->begin();
                            while (iit!=iTrack->end()) {
                                std::cout << iit->x << " " << iit->y << " ";
                                ++iit;
                            }
                            std::cout << std::endl;*/
                            iTrack = trajectories.erase(iTrack);
                            continue;}
                        case 1: {onPassengers = updateModel(on_models, *iTrack, onoff);
                            iTrack = trajectories.erase(iTrack);
                            continue;}
                        case 2: {missPassengers++;
                            iTrack = trajectories.erase(iTrack);
                            continue;}
                        default: std::cout << "Error: Wrong branch!" << std::endl;
                        }
                    }
                    if ((int)(iTrack->at(0).y) == 1) { // lost tracking
                        iTrack = trajectories.erase(iTrack);
                    }
                }

                iTrack++;
            }
        }

        //cv::rectangle(image, rectDoor, cv::Scalar(0,255,0));
        showResultImage(image, onPassengers, offPassengers, currGPS, speed);

        if ((char)cv::waitKey(frate/speedratio)==27) break;
        cv::swap(prevGray, gray);
        std::swap(currGPS, prevGPS);
    }

    return 0;
}
Ejemplo n.º 26
0
// 曲线绘制.
void DataGrids::DrawCurve(void)
{

#define SHOW_LOWLEVEL 0

#define X(_x) UserX((_x)+xMarginLeft)
#define Y(_y) UserY((_y)+yMarginDown)

	WideString strList[] =
	{
		L"Vl(低电平电压)",
		L"Vh(高电平电压)",
		L"Tr(上升沿)",
		L"Th(高电平)",
		L"Tf(下降沿)",
		L"Tl(低电平)",
	};

	Gdiplus::SolidBrush solidBrush(Gdiplus::Color(255, 0, 0, 255));
	Gdiplus::Pen pX(Gdiplus::Color(255, 0, 0, 0),1);
	Gdiplus::AdjustableArrowCap cap(8,6,true);
	Gdiplus::Font font(L"Times New Roman",8);
	Gdiplus::SolidBrush s( Gdiplus::Color(255, 0, 0, 0));
	Gdiplus::PointF *pComm = new Gdiplus::PointF[4];

	// 有上升.下降沿的斜坡.
	Gdiplus::Pen p(Gdiplus::Color(255, 0, 0, 255),2);
	//先将点计算好.
	Gdiplus::PointF *points = new Gdiplus::PointF[10];

	// 低电平位置为可用高度的 %20.
	// 高电平位置位可用高度的 %80.
	// 占空比位%50.
	// 上升沿/下降沿使用的%5的位置.
	// 低电平先开始.
	// 开始区域.(xMarkHeight + 20 , yMarksHeigh + 20).
	// 结束区域.(width - 20, heigh - 20).

	double fLvPos = 0.1 + 0.1 * m_fLRatio / 100, fHvPos = 0.4 + 0.4 * m_fHRatio/100, fHvLen = (m_fDuty*0.8/100.0), fLvLen = ((100-m_fDuty)*0.8/100), fRiseLen = 0.05,fFallLen= 0.05;
	double xMarginLeft = xMarkHeight + 30;
	double yMarginDown = yMarkHeight + 20;
	double xMarginRight  = 20;
	double yMarginTop  = 20;
	double RealWidth = m_iWidth - xMarginLeft - xMarginRight;
	double RealHeight = m_iHeight - yMarginDown - yMarginTop;
	int idx = 0;
	points[idx++] = Gdiplus::PointF((int)X(0),(int)Y(fLvPos * RealHeight));
	points[idx++] = Gdiplus::PointF((int)X(fHvLen * RealWidth),(int)Y(fLvPos * RealHeight));
	points[idx++] = Gdiplus::PointF((int)X((fHvLen + fRiseLen)* RealWidth),(int)Y(fHvPos * RealHeight));
	points[idx++] = Gdiplus::PointF((int)X((fHvLen + fRiseLen + fLvLen)* RealWidth),(int)Y(fHvPos * RealHeight));
	points[idx++] = Gdiplus::PointF((int)X((fHvLen + fRiseLen + fLvLen + fFallLen)* RealWidth),(int)Y(fLvPos * RealHeight));
	points[idx++] = Gdiplus::PointF((int)X(RealWidth),(int)Y(fLvPos * RealHeight));

	m_stGrp->DrawLines(&p,points,idx);

	//设置位虚线.
	pX.SetDashStyle(Gdiplus::DashStyleDash);
	pX.SetAlignment(Gdiplus::PenAlignmentCenter);
	pX.SetDashOffset(20.0);

	// P0 起点. P1 上升沿的起点. P2上升沿结束点. P3 下降沿起点. P4 下降沿的结束点. P5 结束点.
#if SHOW_LOWLEVEL
	pComm[0].X = (int)points[0].X;
	pComm[0].Y = (int)points[0].Y;
	pComm[1].X = pComm[0].X;
	pComm[1].Y = UserY(m_iHeight - yMarginTop);
	m_stGrp->DrawLines(&pX,pComm,2);
#endif

	pComm[0].X = (int)points[1].X;
	pComm[0].Y = (int)points[1].Y;
	pComm[1].X = pComm[0].X;
	pComm[1].Y = UserY(m_iHeight - yMarginTop);
	m_stGrp->DrawLines(&pX,pComm,2);

	pComm[0].X = (int)points[2].X;
	pComm[0].Y = (int)points[2].Y;
	pComm[1].X = pComm[0].X;
	pComm[1].Y = UserY(m_iHeight - yMarginTop - 35);
	m_stGrp->DrawLines(&pX,pComm,2);

	pComm[0].X = (int)points[3].X;
	pComm[0].Y = (int)points[3].Y;
	pComm[1].X = pComm[0].X;
	pComm[1].Y = UserY(m_iHeight - yMarginTop - 35);
	m_stGrp->DrawLines(&pX,pComm,2);

	pComm[0].X = (int)points[4].X;
	pComm[0].Y = (int)points[4].Y;
	pComm[1].X = pComm[0].X;
	pComm[1].Y = UserY(m_iHeight - yMarginTop);
	m_stGrp->DrawLines(&pX,pComm,2);

	// 准备画笔的箭头.
	cap.SetFillState(false);
	pX.SetCustomEndCap(&cap);
	pX.SetCustomStartCap(&cap);


	// Vl -- .P0~P1中点位置开始.到轴线上--双箭头.
	pComm[0].X = (int)(points[0].X + points[1].X) / 2;
	pComm[0].Y = (int)points[0].Y;
	pComm[1].X = pComm[0].X;
	pComm[1].Y = UserY(xAxisOffset);

	m_stGrp->DrawLines(&pX,pComm,2);
	m_stGrp->DrawString(strList[0].c_bstr(),strList[0].Length(),&font,Gdiplus::PointF(pComm[0].X + 10 ,(pComm[0].Y + pComm[1].Y)/2),&s);

	// Vh -- .P2~P3中点位置开始.到轴线上--双箭头.
	// 高电平线.
	pComm[0].X = (int)(points[2].X + points[3].X) / 2;
	pComm[0].Y = (int)points[2].Y;
	pComm[1].X = pComm[0].X;
	pComm[1].Y = UserY(xAxisOffset);
	m_stGrp->DrawLines(&pX,pComm,2);
	m_stGrp->DrawString(strList[1].c_bstr(),strList[1].Length(),&font,Gdiplus::PointF(pComm[0].X + 10 ,(pComm[0].Y + pComm[1].Y)/2),&s);

	//
#if SHOW_LOWLEVEL
	pComm[0].X =  points[0].X;
	pComm[0].Y =  UserY(m_iHeight - yMarginTop - 30);
	pComm[1].X =  points[1].X;
	pComm[1].Y =  UserY(m_iHeight - yMarginTop - 30);
	m_stGrp->DrawLines(&pX,pComm,2);
	m_stGrp->DrawString(strList[5].c_bstr(),strList[5].Length(),&font,Gdiplus::PointF((pComm[0].X + pComm[1].X)/2 -30 , pComm[0].Y + 10 ),&s);
#endif

	pComm[0].X =  points[1].X;
	pComm[0].Y =  UserY(m_iHeight - yMarginTop - 40);
	pComm[1].X =  points[2].X;
	pComm[1].Y =  UserY(m_iHeight - yMarginTop - 40);
	m_stGrp->DrawLines(&pX,pComm,2);
	m_stGrp->DrawString(strList[2].c_bstr(),strList[2].Length(),&font,Gdiplus::PointF(pComm[0].X + 5 , pComm[0].Y - 30 ),&s);

	pComm[2].X =  points[3].X;
	pComm[2].Y =  UserY(m_iHeight - yMarginTop - 40);
	pComm[3].X =  points[4].X;
	pComm[3].Y =  UserY(m_iHeight - yMarginTop - 40);
	m_stGrp->DrawLines(&pX,pComm+2,2);
	m_stGrp->DrawString(strList[4].c_bstr(),strList[4].Length(),&font,Gdiplus::PointF(pComm[2].X - 30 , pComm[2].Y - 20 ),&s);

	pComm[1].Y =  pComm[1].Y + 10;
	pComm[2].Y =  pComm[1].Y;
	m_stGrp->DrawLines(&pX,pComm+1,2);
	m_stGrp->DrawString(strList[3].c_bstr(),strList[3].Length(),&font,Gdiplus::PointF((pComm[1].X + pComm[2].X)/2 - 30, pComm[2].Y + 15 ),&s);

	delete points;
	delete pComm;
#undef X(_x)
#undef Y(_y)
}
Ejemplo n.º 27
0
int main( int argc, char** argv )
{
  ros::init(argc, argv, "face_detect");
  ros::NodeHandle n;
  
  opencvCommands = n.advertise<robotBrain::opencvMessage>("opencv_commands",1000);
  image_transport::ImageTransport it(n);
  pub = it.advertise("camera/image", 1);
  
  cv::VideoCapture cap(1);
  
  if(!cap.isOpened()){
		ROS_FATAL("opencv:  COULD NOT OPEN CAMERA" );
		sendError();
	}
  
  //Load the cascades
	
  if( !face_cascade.load( face_cascade_name ) ){ 
    ROS_FATAL("--(!)Error loading FACE CASCADE(!)--" ); 
    sendError(); 
  }
  if( !eyes_cascade.load( eyes_cascade_name ) ){ 
    ROS_FATAL("--(!)Error loading EYE CASCADE(!)--"); 
    sendError(); 
  }
  
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
  char key;
    while ( n.ok() ){
      cap.read(frame);
      if( !frame.empty() ) colorDetect();//detectAndDisplay(); 
      else {
	ROS_FATAL("opencv: FRAME FAIL " );
	sendError();	
      }
	//showing image
	cv::namedWindow( "Patrolling Android View", CV_WINDOW_AUTOSIZE );
  	cv::startWindowThread();
	cv::imshow( "Patrolling Android View", imgHSV);
      if (color & !face ) faceDetect();			//if we have a color and we havent previously detected a face we look for a face
      
      if(face) personTracking();				//if we have a face we follow the color 
      else {							//if not 
	message.camera = 's';
	message.errorOpenCV = '0';
	opencvCommands.publish( message );
      }
      key = cv::waitKey(100);
switch (key)
        {
            //hue
            case 'q':
                if (hue_low == hue_high)
                    ROS_INFO("hue_low must be less than hue_high");
                else 
                hue_low = hue_low + 1;
            break;
            case 'a':
                if (hue_low == 0) 
                    ROS_INFO("Hue is minimum");
                else
                    hue_low = hue_low - 1;
            break;
            case 'w':
                if (hue_high == 255)
                    ROS_INFO("Hue is maximum");
                else
                    hue_high = hue_high + 1;
            break;
            case 's':
                if (hue_high == hue_low)
                    ROS_INFO("hue_high must be greater than hue_low");
                else
                    hue_high = hue_high - 1;
            break;
           
            //saturation 
            case 'e':
                if (sat_low == sat_high)
                    ROS_INFO("sat_low must be less than sat_high");
                else 
                sat_low = sat_low + 1;
            break;
            case 'd':
                if (sat_low == 0) 
                    ROS_INFO("sat is minimum");
                else
                    sat_low = sat_low - 1;
            break;
            case 'r':
                if (sat_high == 255)
                    ROS_INFO("sat is maximum");
                else
                    sat_high = sat_high + 1;
            break;
            case 'f':
                if (sat_high == sat_low)
                    ROS_INFO("sat_high must be greater than sat_low");
                else
                    sat_high = sat_high - 1;
            break;
            
            //value 
            case 't':
                if (val_low == val_high)
                    ROS_INFO("val_low must be less than val_high");
                else 
                val_low = val_low + 1;
            break;
            case 'g':
                if (val_low == 0) 
                    ROS_INFO("val is minimum");
                else
                    val_low = val_low - 1;
            break;
            case 'y':
                if (val_high == 255)
                    ROS_INFO("val is maximum");
                else
                    val_high = val_high + 1;
            break;
            case 'h':
                if (val_high == val_low)
                    ROS_INFO("val_high must be greater than val_low");
                else
                    val_high = val_high - 1;
            break;
        }
      //ROS_INFO("Frames");
	ROS_INFO("Hue: %d-%d\tSat: %d-%d\tVal: %d-%d\n", hue_low, hue_high, sat_low, sat_high, val_low, val_high);
   }
  ROS_FATAL("COULD NOT CAPTURE FRAME");
  return -1;
}
Ejemplo n.º 28
0
void startSlam(int v)
{
	FileStorage fs("config.yml", FileStorage::READ);
	Mat cam, dist, rpos, q, s;

	fs["cameraMatrix"] >> cam;
	fs["distMatrix"] >> dist;
	fs["sensorPos"] >> rpos;
	fs["q"] >> q;
	fs["s"] >> s;

	fs.release();

	ml = new MarkerLocator(cam, dist, rpos, 100.);

//	Mat q = (Mat_<double>(2,1) << .01, .1*M_PI/180);
//	Mat s = (Mat_<double>(2,1) << 10, .5*M_PI/180);

	slam = new EkfSlam(&scan, q, s);
	#ifdef GUI
	namedWindow("EKFSlam");
	disp = new MapDisplay("Map");
	waitKey();
	#endif

	Mat u = (Mat_<double>(2,1) << 0, 0);

	int fd = openPort();
	oiStart();
	setSafeMode();
	readDist();
	readAngle();

	VideoCapture cap(0);
	if(!cap.isOpened()) {
		printf("Failed capture\n");
		return;
	}

	VideoWriter record("/tmp/demo.avi", CV_FOURCC('D','I','V','X'), 30, Size(1340,600), true);
	if(!record.isOpened()) {
		printf("Failed writer\n");
		return;
	}

	clock_t t;
	time_t stime;
	while(1) {
		t = clock();
		setDrive(v,v);
		double mindist = 99999;

		while(msElap(t) < 100 && !hitWall()) {
			u.at<double>(0) = (double)readDist();
			u.at<double>(1) = M_PI*(double)readAngle()/180;
			mindist = slamStep(u, cap, record);
		}

		// backup before turn when running into something
		if(hitWall() || mindist < 700) {
			if(hitWall()) {
				// backup
				setDrive(-15,-15);
				t = clock();
				while(msElap(t) < 500) {
					u.at<double>(0) = (double)readDist();
					u.at<double>(1) = M_PI*(double)readAngle()/180;
					slamStep(u, cap, record);
				}
			}

			// turn
			if(time(&stime)%60 < 30) {
				setDrive(-5, 5);
			}
			else
				setDrive(5,-5);

			t = clock();
			while(msElap(t) < 250) {
				u.at<double>(0) = 0;
				u.at<double>(1) = M_PI*(double)readAngle()/180;
				slamStep(u, cap, record);
			}
		}
		if(waitKey(10) == 27)
			break;
	}

	setDrive(0,0);
	imwrite("lastFrame.png", g_outFrame);

	close(fd);

	delete ml;
	delete slam;
	#ifdef GUI
	delete disp;
	#endif
}
int main(int argc, const char** argv)
{
#if (VISP_HAVE_OPENCV_VERSION >= 0x020100) && (defined(VISP_HAVE_ZBAR) || defined(VISP_HAVE_DMTX))
  int opt_device = 0;
  int opt_barcode = 0; // 0=QRCode, 1=DataMatrix

  for (int i=0; i<argc; i++) {
    if (std::string(argv[i]) == "--device")
      opt_device = atoi(argv[i+1]);
    else if (std::string(argv[i]) == "--code-type")
      opt_barcode = atoi(argv[i+1]);
    else if (std::string(argv[i]) == "--help") {
      std::cout << "Usage: " << argv[0]
                << " [--device <camera number>] [--code-type <0 for QRcode | 1 for DataMatrix>] [--help]"
                << std::endl;
      return 0;
    }
  }
  std::cout << "Use device: " << opt_device << std::endl;

  try {
    vpImage<unsigned char> I; // for gray images

    //! [Construct grabber]
#if defined(VISP_HAVE_V4L2)
    vpV4l2Grabber g;
    std::ostringstream device;
    device << "/dev/video" << opt_device;
    g.setDevice(device.str());
    g.setScale(1);
    g.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
    cv::VideoCapture cap(opt_device); // open the default camera
    if(!cap.isOpened()) { // check if we succeeded
      std::cout << "Failed to open the camera" << std::endl;
      return -1;
    }
    cv::Mat frame;
    cap >> frame; // get a new frame from camera
    vpImageConvert::convert(frame, I);
#endif
    //! [Construct grabber]

#if defined(VISP_HAVE_X11)
    vpDisplayX d(I);
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI d(I);
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV d(I);
#endif
    vpDisplay::setTitle(I, "ViSP viewer");

    vpDetectorBase *detector = NULL;
#if (defined(VISP_HAVE_ZBAR) && defined(VISP_HAVE_DMTX))
    if (opt_barcode == 0)
      detector = new vpDetectorQRCode;
    else
      detector = new vpDetectorDataMatrixCode;
#elif defined(VISP_HAVE_ZBAR)
    detector = new vpDetectorQRCode;
    (void)opt_barcode;
#elif defined(VISP_HAVE_DMTX)
    detector = new vpDetectorDataMatrixCode;
    (void)opt_barcode;
#endif

    for(;;) {
      //! [Acquisition]
#if defined(VISP_HAVE_V4L2)
      g.acquire(I);
#else
      cap >> frame; // get a new frame from camera
      vpImageConvert::convert(frame, I);
#endif
      //! [Acquisition]
      vpDisplay::display(I);

      bool status = detector->detect(I);
      std::ostringstream legend;
      legend << detector->getNbObjects() << " bar code detected";
      vpDisplay::displayText(I, 10, 10, legend.str(), vpColor::red);

      if (status) {
        for(size_t i=0; i < detector->getNbObjects(); i++) {
          std::vector<vpImagePoint> p = detector->getPolygon(i);
          vpRect bbox = detector->getBBox(i);
          vpDisplay::displayRectangle(I, bbox, vpColor::green);
          vpDisplay::displayText(I, bbox.getTop()-20, bbox.getLeft(), "Message: \"" + detector->getMessage(i) + "\"", vpColor::red);
          for(size_t j=0; j < p.size(); j++) {
            vpDisplay::displayCross(I, p[j], 14, vpColor::red, 3);
            std::ostringstream number;
            number << j;
            vpDisplay::displayText(I, p[j]+vpImagePoint(10,0), number.str(), vpColor::blue);
          }
        }
      }

      vpDisplay::displayText(I, (int)I.getHeight()-25, 10, "Click to quit...", vpColor::red);
      vpDisplay::flush(I);
      if (vpDisplay::getClick(I, false)) // a click to exit
        break;
    }
    delete detector;
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#else
  (void)argc;
  (void)argv;
#endif
}
Ejemplo n.º 30
0
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}