示例#1
0
/** @function doHarris */
int Panorama::doHarris()
{

    Mat R; Mat Rtemp; // Corner Response function
    Mat Ix, Iy, Ixy, Ix2, Iy2; // the second moment eigenvalues function
    
    double maxValtemp, minValtemp;
    double minVal; double maxVal; 
    int sigma = 3; // Gaussian sigma
    float k = 0.04; // the alpha of Response function
    int aperture_size =3, block_size = 3; double scale =  1.0; // parameters of sobel first order derivative.
    char* window = "Harris."; // the name of Harris result

    /////////////////////////////////////////////////////////////////////////////////////////////////////////
    /////////////////////////////////////////////////////////////////////////////////////////////////////////
    /////////////////////////////////////////////////////////////////////////////////////////////////////////
            
    cout << "Processing Harris Corner Detector..." << endl;
    /* Initialize the corner response function and the temp mat */
    R = Mat::zeros( srcGray.size(), CV_32FC1 );
    Rtemp = Mat::zeros( srcGray.size(), CV_32FC1 );
    
    /* Use Sobel function to calculate the first order derivative of both x and y */
    Sobel( srcGray, Ix, CV_32F, 1, 0, aperture_size, scale, 0, BORDER_DEFAULT );
    Sobel( srcGray, Iy, CV_32F, 0, 1, aperture_size, scale, 0, BORDER_DEFAULT );
    
    /* Calculate the Gaussian Derivative */
    GaussianBlur(Iy, Iy, Size(block_size, block_size), sigma, 0 ,BORDER_DEFAULT);
    GaussianBlur(Ix, Ix, Size(block_size, block_size), sigma, 0 ,BORDER_DEFAULT);
    
    /* Calculate the square of each intensity */
    pow(Ix,2,Ix2);
    pow(Iy,2,Iy2);
    
    /* Calculate the Gaussian Derivative */
    GaussianBlur(Iy2, Iy2, Size(block_size, block_size), sigma, 0 ,BORDER_DEFAULT);
    GaussianBlur(Ix2, Ix2, Size(block_size, block_size), sigma, 0 ,BORDER_DEFAULT);
    
    /* Calculate the Corner Response function */
    for( int j = 0; j < srcGray.rows; j++ )
    { for( int i = 0; i < srcGray.cols; i++ )
    {
        float lambda_1 = Ix2.at<float>( j, i, 0);
        float lambda_2 = Iy2.at<float>( j, i, 0);
        Rtemp.at<float>(j, i, 0) = lambda_1 * lambda_2 - k * pow( ( lambda_1 + lambda_2 ), 2 );
    }
    }
    
    minMaxLoc( Rtemp, &minValtemp, &maxValtemp, 0, 0, Mat() );
    
    /* Normalize Corner Response function as the maxium value is 1 */
    for( int j = 0; j < srcGray.rows; j++ )
    { for( int i = 0; i < srcGray.cols; i++ )
    {  
        R.at<float>(j, i) =  1 / maxValtemp * Rtemp.at<float>(j, i, 0);
    }
    }

    /* Find local maxima of response function (nonmaximum suppression)*/
    minMaxLoc( R, &minVal, &maxVal, 0, 0, Mat() );
    
    /* Create Window  */
    namedWindow( window, CV_WINDOW_AUTOSIZE );
    int currentLevel = 5;
    int maxLevel = 100;
    
    
    double threshold = ( maxVal - minVal ) * currentLevel/maxLevel ;
    
    for( int j = 0; j < srcGray.rows; j++ )
    { 
        for( int i = 0; i < srcGray.cols; i++ )
        {   
            if( R.at<float>(j,i) >  threshold)
            {
                circle( srcCopy, Point(i,j), 4, Scalar(255,255,255), 0, 0, 0 ); 
            
            }
        
        }
    }
    imshow( window, srcCopy );
    /*
    delete &minVal; delete &maxVal;
    delete &maxValtemp; delete &minValtemp; 
    delete &R; delete &Rtemp;
    delete &Ix; delete &Iy; delete &Ix2; delete &Iy2; delete &Ixy;
    */
    return(0);
}
示例#2
0
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}
示例#3
0
Reader::Reader(const string &cascade_path): window_name("Display Window"){
  face_cascade.load(cascade_path);
  namedWindow(window_name, WINDOW_AUTOSIZE);
}
示例#4
0
void PAN::show(){
	namedWindow("original pan", WINDOW_NORMAL);
	imshow("original pan", *panimage.img);
	waitKey();
}
示例#5
0
featureFinder::featureFinder(StereoSource& source) : LonNewFrame(this), _stereo(), _disparityMap()
{
    source.onNewData += &LonNewFrame;
    namedWindow("disp", 1);
    setDefaultBMState(_stereo.state);
}
示例#6
0
int main (int argc, const char * argv[])
{
    clock_t start = clock();
    
    const char* filename = argc >= 2 ? argv[1] : "road3.png";
    
    // create image matrix
    // loading image in non-grayscale causes an error
    Mat src = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
    if (src.empty()) {
        //help();
        cout << "cannot open " << filename << endl;
        return -1;
    }
    
    // beginning time:
    clock_t canny_start = clock();
    
    // create destination matrix
    Mat dst, cdst;
    // use Canny for edge-detection
    // source, destinaton, threshold1, threshold2, aperturesize=3, L2gradient=false
    blur(dst, dst, Size(3,3));
    Canny(src, dst, CANNY_T1, CANNY_T2, CANNY_APERTURE);
    cvtColor(dst, cdst, CV_GRAY2RGB);
    
    // time of canny
    clock_t canny_end = clock();
    double canny_time = (double)(canny_end-canny_start)/CLOCKS_PER_SEC;
    
    // ================ PROBABILISTIC HOUGH LINE TRANSFORM ==================
    //      creates line segments
    // dst: edge-detector output (should be grayscale) 
    // lines: vector to store lines found;
    // rho: resolution of parameter r in pixels (using 1)
    // theta: resolution of parameter theta in radians (using 1 degree)
    // threshold: The minimum number of intersections to “detect” a line
    // minLinLength: The minimum number of points that can form a line. Lines with less than this number of points are disregarded.
    // maxLineGap: The maximum gap between two points to be considered in the same line.
    
    clock_t hough_start = clock();
    
    vector<Vec4i> lines;
    HoughLinesP(dst, lines, 1, CV_PI/180, HLINES_THRESH, HLINES_MINLINE, HLINES_MINGAP);
    
    // filter out horizontal lines
    remove_horizontal(&lines);
    remove_skylines(&lines, dst.rows);
    
    // time of HoughLinesP()
    clock_t hough_end = clock();
    double hough_time = (double)(hough_end-hough_start)/CLOCKS_PER_SEC;
    
    // --------------------------
    
    clock_t lines_start = clock();
    
    vector<Vec4i> lane_lines = combine_lines(lines);
    lane_lines = extend_lines(lane_lines, dst.cols, dst.rows);
    
    // time of lane_lines, extend_lines()
    clock_t lines_end = clock();
    double lines_time = (double)(lines_end-lines_start)/CLOCKS_PER_SEC;
    
    // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=-
    //cout << "size of lines: " << lines.size() << endl;
    //cout << "size of lane_lines: " << lane_lines.size() << endl;
    //cout << "width: " << dst.cols << "  height: " << dst.rows << endl;
    //line(cdst, Point(0,0), Point(100,100), Scalar(255,255,255), 2, CV_AA);
    // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=-

    clock_t draw_start = clock();
    
    // display result:
    
    for( size_t i = 0; i < lines.size(); i++ )
    {
        Vec4i l = lines[i];
        //line( cdst, Point(l[X1], l[Y1]), Point(l[X2], l[Y2]), Scalar(255,0,0), 1, CV_AA);
     
        // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=-
        //cout << i << " (" << l[X1] << "," << l[Y1] << ") \t(" << l[X2] << "," << l[Y2] << ")" << endl; 
        // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=-
    }
     
    cout << "------" << endl;
    // display "lane lines"
    for( size_t i = 0; i < lane_lines.size(); i++ )
    {
        Vec4i l = lane_lines[i];
        line( cdst, Point(l[X1], l[Y1]), Point(l[X2], l[Y2]), Scalar(0,255,255), 2, CV_AA);
        
        // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=-
        //cout << i << " (" << l[X1] << "," << l[Y1] << ") \t(" << l[X2] << "," << l[Y2] << ")" << endl; 
        // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=-
    }
    cout << endl;
    
    // depending on # of lines, draw either one or two lanes
    if (lane_lines.size() > 2)
        cdst = draw_2lanes(cdst, lane_lines);
    else
        cdst = draw_1lane(cdst, lane_lines);
    
    // time for drawing lines
    clock_t draw_end = clock();
    double draw_time = (double)(draw_end-draw_start)/CLOCKS_PER_SEC;
    
    
    // --------------------------
    
    clock_t image_start = clock();
    
    // create output image: .png file
    vector<int> compression_params;
    compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
    compression_params.push_back(9);    // 0-9 for png quality
    imwrite("output.png", cdst, compression_params);
    
    // time for generating the image and total time
    clock_t end = clock();
    double image_time = (double)(end-image_start)/CLOCKS_PER_SEC;
    double total_time = (double)(end-start)/CLOCKS_PER_SEC;
    
    // --------------------------
    // display time results:
    cout << "canny time: " << canny_time << " s" << endl;
    cout << "hough time: " << hough_time << " s" << endl;
    cout << "lines time: " << lines_time << " s" << endl;
    cout << "draw time:  " << draw_time << " s" << endl;
    cout << "img time:   " << image_time << " s" << endl;
    cout << "TOTAL TIME: " << total_time << " s" << endl;
    
    // display output
    namedWindow("original image");
    imshow("original image", src);
    waitKey(1000);  // waits 1 seconds
    namedWindow("lines detected");
    imshow("lines detected", cdst);
    waitKey();
    
    return 0;
}
示例#7
0
int main() {

	string line;
	ifstream tfile("color.cfg");
	if (tfile.is_open()) {
		int obj_count = 0;
    	while (tfile.good()) {
    		for (int i = 0; i < 6; i++) {
      			getline(tfile, line);
      			if (line == "") {
      				break;
      			}
      			test_thresh[obj_count*6 + i] = atoi(line.c_str());
      			cout << test_thresh[obj_count*6 + i] << " ";
      		}
      		cout << endl;
      		if (line == "") {
      			break;
      		}
      		obj_count++;
    	}
    	tfile.close();
  	} else {
  		cout << "Unable to open file";
  	}
  	
    init_opencv();
		
	ofstream myfile("color.cfg");
	if (!myfile.is_open()) {
		cout << "failed to open file";
		return -1;
	}
	namedWindow("raw",1);
	namedWindow("blobs",1);
    //namedWindow("scatter",1);
    
    long double tLast;
    long double tAvg = 0;
    int k = 1;
    
    for (int i = 0; i < num_obj; i++) {
    	string s = obj[i];
    	cout << "Calibrating " << s << endl;
    	for (int j = 0; j < 6; j++) {
    		cout << test_thresh[i*6 + j] << " ";
    	}
    	cout << endl;
    	
    	while (1) {
    	
    	    tLast = clock();
    	    
    		int out = step(&raw_display, &hsv_display, &scatter_display,
    		    &(test_thresh[i*6]), 1);
    		    
    		long double tCurr = clock();
		    long double tDiff = tCurr - tLast;
		    tAvg = 0.9*tAvg + 0.1*tDiff;
		    
    		imshow("raw", *raw_display);
    		imshow("blobs", *hsv_display);
            //imshow("scatter", *scatter_display);
        			
		    if (i%100 == 0) {
		        //cout << out << endl;
		        //cout << CLOCKS_PER_SEC/tAvg << endl;
		    }
		    k++;
		    
        	char c = waitKey(10);
        	if (c == -1) {
        		continue;
        	}
        	if (c == ' ') {
        		for (int j = 0; j < 6; j++) {
		    		myfile << test_thresh[i*6 + j] << endl;
		    	}
        		break;
        	}
        	switch (c) {
        		case 'q':
        			test_thresh[i*6 + 0] += 1;
        			break;
        		case 'a':
        			test_thresh[i*6 + 0] -= 1;
        			break;
        		case 'w':
        			test_thresh[i*6 + 1] += 1;
        			break;
        		case 's':
        			test_thresh[i*6 + 1] -= 1;
        			break;
        		case 'e':
        			test_thresh[i*6 + 2] += 1;
        			break;
        		case 'd':
        			test_thresh[i*6 + 2] -= 1;
        			break;
        		case 'r':
        			test_thresh[i*6 + 3] += 1;
        			break;
        		case 'f':
        			test_thresh[i*6 + 3] -= 1;
        			break;
        		case 't':
        			test_thresh[i*6 + 4] += 1;
        			break;
        		case 'g':
        			test_thresh[i*6 + 4] -= 1;
        			break;
        		case 'y':
        			test_thresh[i*6 + 5] += 1;
        			break;
        		case 'h':
        			test_thresh[i*6 + 5] -= 1;
        			break;
        			
        		case 'Q':
        			test_thresh[i*6 + 0] += 10;
        			break;
        		case 'A':
        			test_thresh[i*6 + 0] -= 10;
        			break;
        		case 'W':
        			test_thresh[i*6 + 1] += 10;
        			break;
        		case 'S':
        			test_thresh[i*6 + 1] -= 10;
        			break;
        		case 'E':
        			test_thresh[i*6 + 2] += 10;
        			break;
        		case 'D':
        			test_thresh[i*6 + 2] -= 10;
        			break;
        		case 'R':
        			test_thresh[i*6 + 3] += 10;
        			break;
        		case 'F':
        			test_thresh[i*6 + 3] -= 10;
        			break;
        		case 'T':
        			test_thresh[i*6 + 4] += 10;
        			break;
        		case 'G':
        			test_thresh[i*6 + 4] -= 10;
        			break;
        		case 'Y':
        			test_thresh[i*6 + 5] += 10;
        			break;
        		case 'H':
        			test_thresh[i*6 + 5] -= 10;
        			break;
        	}
        	for (int j = 0; j < 6; j++) {
        		cout << test_thresh[i*6 + j] << " ";
        	}
        	cout << endl;
        	
    	}
    }
    
    myfile.close();
}
示例#8
0
//Perform a background subtraction of one camera
void depthBackgroundSub_Par(KinectSensor* cam, ofstream* outDebug)
{
	char camId[20];
	_itoa(cam->getIdCam(), camId, 10);
	char windName_Back[50];
	strcpy(windName_Back, "Background subtraction ");
	strcat(windName_Back, camId);
	Mat backImg(Size(XN_VGA_X_RES, XN_VGA_Y_RES), CV_8UC1);
	namedWindow(windName_Back);

	cam->startDevice();

	bool stop = false;
	bool firstTime = true;

	int total = XN_VGA_Y_RES*XN_VGA_X_RES;
	BackgroundDepthSubtraction* subtractor;
	//allocate enough memory in advance (% of the total points)
	XnPoint3D* points2D = new XnPoint3D[MAX_FORGROUND_POINTS];	
	int numPoints = 0;
	int contFrames = 0;

	while (!stop)
	{
		//wait for the next frame to be ready
		cam->waitAndUpdate();
		//recover the depth map
		const XnDepthPixel* dm = cam->getDepthMap();

		//ptime time_start_wait(microsec_clock::local_time());
		if (contFrames == 0)//store the background model
			subtractor = new BackgroundDepthSubtraction(dm);
		else 
			numPoints = subtractor->subtraction(points2D, dm); //returns the num poins of foreground
		
		//ptime time_end_wait(microsec_clock::local_time());
		//time_duration duration_wait(time_end_wait - time_start_wait);
		//(*outDebug) << "Time report(bgs "<< camId << "): " << duration_wait.total_microseconds() << endl;
				
		Utils::initMat1u(backImg, 0);
		subtractor->createBackImage(points2D, backImg, numPoints);

		//display image
		imshow(windName_Back, backImg);
		char c = cvWaitKey(1);
		stop = (c == 27) || (contFrames == 250);
		
//		stop = (contFrames == 250);
		//for recorded videos
	//	if (cam->getDepthNode()->GetFrameID() == 1)
	//		if (firstTime ? firstTime = false : stop = true);

		contFrames++;
	}
	//ptime time_end(microsec_clock::local_time());
	//time_duration duration(time_end - time_start);
	//double totalSecs = duration.total_microseconds()/1000000;
	//double fps = contFrames/totalSecs;
	//cout << "Fps: " << fps << endl;

	cam->stopDevice();

	//free memory
	delete(points2D);
	delete(subtractor);
}
示例#9
0
//Perform a background subtraction in two cameras in a sequencial way
void depthBackgroundSub_Seq(KinectSensor* cam1,  KinectSensor* cam2)
{
	char* windName_1 = "BackgroundSub 1";
	char* windName_2 = "BackgroundSub 2";
	Mat backImg1(Size(XN_VGA_X_RES, XN_VGA_Y_RES), CV_8UC1);
	Mat backImg2(Size(XN_VGA_X_RES, XN_VGA_Y_RES), CV_8UC1);
	namedWindow(windName_1);
	namedWindow(windName_2);

	cam1->startDevice();
	cam2->startDevice();

	bool stop = false;
	bool firstTime = true;

	int total = XN_VGA_Y_RES*XN_VGA_X_RES;
	BackgroundDepthSubtraction *subtractor1, *subtractor2;
	//allocate enough memory in advance (% of the total points)
	XnPoint3D* points2D_1 = new XnPoint3D[MAX_FORGROUND_POINTS];	
	XnPoint3D* points2D_2 = new XnPoint3D[MAX_FORGROUND_POINTS];	
	int numPoints_1 = 0;
	int numPoints_2 = 0;
	int contFrames = 0;

//	unsigned short depth[MAX_DEPTH];
//	unsigned short depth2[MAX_DEPTH];
	char *depth_data, *depth_data2;
	while (!stop)
	{
		//wait for the next frame to be ready
		cam1->waitAndUpdate();
		cam2->waitAndUpdate();
		//recover the depth map
		const XnDepthPixel* dm1 = cam1->getDepthMap();
		const XnDepthPixel* dm2 = cam2->getDepthMap();


		//ptime time_start_wait(microsec_clock::local_time());
		if (contFrames == 0)//store the background model
		{
			subtractor1 = new BackgroundDepthSubtraction(dm1);
			subtractor2 = new BackgroundDepthSubtraction(dm2);
		}
		else 
		{
			numPoints_1 = subtractor1->subtraction(points2D_1, dm1); //returns the num poins of foreground
			numPoints_2 = subtractor2->subtraction(points2D_2, dm2); //returns the num poins of foreground
		}
		
		//ptime time_end_wait(microsec_clock::local_time());
		//time_duration duration_wait(time_end_wait - time_start_wait);
		//(*outDebug) << "Time report(bgs 1_2): " << duration_wait.total_microseconds() << endl;
		
		Utils::initMat1u(backImg1, 0);
		Utils::initMat1u(backImg2, 0);
		subtractor1->createBackImage(points2D_1, backImg1, numPoints_1);
		subtractor2->createBackImage(points2D_2, backImg2, numPoints_2);

		imshow(windName_1, backImg1);
		imshow(windName_2, backImg2);
		////display image
		char c = cvWaitKey(1);
		stop = (c == 27) || (contFrames == 250);

		stop = (contFrames == 250);
		
		//for recorded videos
	//	if (cam2->getDepthNode()->GetFrameID() == 1)
	//		if (firstTime ? firstTime = false : stop = true);

		contFrames++;
	}
	//ptime time_end(microsec_clock::local_time());
	//time_duration duration(time_end - time_start);
	//double totalSecs = duration.total_microseconds()/1000000;
	//double fps = contFrames/totalSecs;
	//cout << "Fps: " << fps << endl;

	cam1->stopDevice();
	cam2->stopDevice();

	//free memory
	delete(points2D_1);
	delete(points2D_2);
	delete(subtractor1);
	delete(subtractor2);
}
int main(int argc, char* argv[])
{
	//if we would like to calibrate our filter values, set to true.
	bool calibrationMode = true;

	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	Mat threshold;
	Mat HSV;

	if(calibrationMode){
		//create slider bars for HSV filtering
		createTrackbars();
	}
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	waitKey(1000);
	while(1){
		//store image to matrix
		capture.read(cameraFeed);

		src = cameraFeed;

  		if( !src.data )
  		{ return -1; }

		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);

		if(calibrationMode==true){

		//need to find the appropriate color range values
		// calibrationMode must be false

		//if in calibration mode, we track objects based on the HSV slider values.
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
			morphOps(threshold);
			imshow(windowName2,threshold);

		//the folowing for canny edge detec
			/// Create a matrix of the same type and size as src (for dst)
	  		dst.create( src.size(), src.type() );
	  		/// Convert the image to grayscale
	  		cvtColor( src, src_gray, CV_BGR2GRAY );
	  		/// Create a window
	  		namedWindow( window_name, CV_WINDOW_AUTOSIZE );
	  		/// Create a Trackbar for user to enter threshold
	  		createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold);
	  		/// Show the image
			trackFilteredObject(threshold,HSV,cameraFeed);
		}
		else{
			//create some temp fruit objects so that
			//we can use their member functions/information
			Object blue("blue"), yellow("yellow"), red("red"), green("green");

			//first find blue objects
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(blue,threshold,HSV,cameraFeed);
			//then yellows
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(yellow,threshold,HSV,cameraFeed);
			//then reds
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(red,threshold,HSV,cameraFeed);
			//then greens
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,green.getHSVmin(),green.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(green,threshold,HSV,cameraFeed);

		}
		//show frames
		//imshow(windowName2,threshold);

		imshow(windowName,cameraFeed);
		//imshow(windowName1,HSV);

		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		waitKey(30);
	}
	return 0;
}
int multiViewFaceDetection(Mat &srcImg, CascadeClassifier &faceCascade, CascadeClassifier &faceCascade2, vector<FacePositionInfo>& storeFpi)
{
	// If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
	int facetype = -1;
    Mat srcImgGray;
    if (srcImg.channels() == 3) {
        cvtColor(srcImg, srcImgGray, CV_BGR2GRAY);
    }
    else if (srcImg.channels() == 4) {
        cvtColor(srcImg, srcImgGray, CV_BGRA2GRAY);
    }
    else {
        // Access the input image directly, since it is already grayscale.
        srcImgGray = srcImg;
    }	
	//preprocess
    // Possibly shrink the image, to run much faster.
	int scaledWidth = SCALEDWIDTH;
    Mat inputImg;
    float scale = srcImgGray.cols / (float)scaledWidth;
    if (srcImgGray.cols > scaledWidth) {
        // Shrink the image while keeping the same aspect ratio.
        int scaledHeight = cvRound(srcImgGray.rows / scale);
        resize(srcImgGray, inputImg, Size(scaledWidth, scaledHeight));
    }
    else {
        // Access the input image directly, since it is already small.
        inputImg = srcImgGray;
    }
    // Standardize the brightness and contrast to improve dark images.
    Mat equalizedImg;
    equalizeHist(inputImg, equalizedImg);
	

	Rect faceRect;
	FacePositionInfo tmpfpi;
	// Find the largest face.
	int findface = 0;
#ifdef _DEBUG
	namedWindow("Debug");
#endif
	if(!findface){
		detectLargestObject_afterprocess(equalizedImg, scale, srcImgGray.cols, srcImgGray.rows, faceCascade, faceRect, scaledWidth);
		// Check if a face was detected.		
		if (faceRect.width > 0) {
			rect2FPI(faceRect,tmpfpi,0,srcImgGray.cols,srcImgGray.rows);
			storeFpi.push_back(tmpfpi);
			findface = 1;
			facetype = 1; // front face
		}
	}
	if(!findface){
		//turn to right
		detectLargestObject_afterprocess(equalizedImg, scale, srcImgGray.cols, srcImgGray.rows, faceCascade2, faceRect, scaledWidth);
		// Check if a face was detected.		
		if (faceRect.width > 0) {
#ifdef _DEBUG
			Rect faceRect2;
			faceRect2.x = cvRound(faceRect.x / scale);
            faceRect2.y = cvRound(faceRect.y / scale);
            faceRect2.width = cvRound(faceRect.width / scale);
            faceRect2.height = cvRound(faceRect.height / scale);
			rectangle(equalizedImg,faceRect2,CV_RGB(255,0,0),2,CV_AA);
			imshow("DEBUG",equalizedImg);
#endif
#ifdef _DEBUG
			printf("@@@@Find Profile Face -- RIGHT\n");
#endif
			rect2FPI(faceRect,tmpfpi,0,srcImgGray.cols,srcImgGray.rows);
			storeFpi.push_back(tmpfpi);
			findface = 1;
			facetype = 2; // right face
		}		
		//turn to left
		if(!findface){
			Mat equalizedImg_flip;
			flip(equalizedImg,equalizedImg_flip,1);
			detectLargestObject_afterprocess(equalizedImg_flip, scale, srcImgGray.cols, srcImgGray.rows, faceCascade2, faceRect, scaledWidth);
			// Check if a face was detected.		
			if (faceRect.width > 0) {
#ifdef _DEBUG
			Rect faceRect2;
			faceRect2.x = cvRound(faceRect.x / scale);
            faceRect2.y = cvRound(faceRect.y / scale);
            faceRect2.width = cvRound(faceRect.width / scale);
            faceRect2.height = cvRound(faceRect.height / scale);
			rectangle(equalizedImg_flip,faceRect2,CV_RGB(255,0,0),2,CV_AA);
			imshow("DEBUG",equalizedImg_flip);
#endif
				faceRect.x = cvRound(faceRect.x / scale);
				faceRect.width = cvRound(faceRect.width / scale);
				faceRect.x = equalizedImg_flip.cols - (faceRect.x + faceRect.width);				
				faceRect.x = cvRound(faceRect.x * scale);
				faceRect.width = cvRound(faceRect.width * scale);
#ifdef _DEBUG
				printf("@@@@Find Profile Face -- LEFT\n");
#endif
				rect2FPI(faceRect,tmpfpi,0,srcImgGray.cols,srcImgGray.rows);
				storeFpi.push_back(tmpfpi);
				findface = 1;
				facetype = 3; // left face
			}
		}
	}
	/*
	if(!findface){
		IplImage iplInput = equalizedImg;
		Mat equalizedImgRotated;	
		//equalizedImg.copyTo(equalizedImgRotated);
		IplImage iplInputRotated = equalizedImgRotated;
		int anglelist[] = {45,-45};
		int count = 0;
		while(!findface){
			if(count == sizeof(anglelist)/sizeof(int))
				break;		
			//GS_rotate(&iplInput, &iplInputRotated, anglelist[count]);    	
			rotateImageMat(equalizedImg,equalizedImgRotated,anglelist[count]);
#ifdef _DEBUG
			imshow( "DEBUG", equalizedImgRotated); 
#endif
			detectLargestObject_afterprocess(equalizedImgRotated, scale, srcImgGray.cols, srcImgGray.rows, faceCascade, faceRect,scaledWidth);		
			// Check if a face was detected.
			if (faceRect.width > 0) {
#ifdef _DEBUG
				printf("****Find Rotated Face\n");
#endif
				rect2FPI(faceRect,tmpfpi,anglelist[count],srcImgGray.cols,srcImgGray.rows);
				storeFpi.push_back(tmpfpi);
				findface = 1;
				facetype = 4 + count; // 4 -> 45' 5 -> -45'
			}
			count++;
		}
	}
	*/
    return facetype;
}
void pintaI(string im) {
    namedWindow("pinta Imagen", WINDOW_AUTOSIZE);
    imshow("pinta Imagen", leeimagen(im, -1));
    waitKey(0);
    destroyWindow("pinta Imagen");
}
int main(void)
{
/* Create an object that decodes the input video stream. */
    VideoCapture cap(0); // open the default camera
    if(!cap.isOpened())  // check if we succeeded
        return -1;

    Mat edges;
    namedWindow("edges",1);
     Mat frame;
     cap >> frame; // get a new frame from camera
/*CvCapture *input_video = cvCaptureFromFile("C:\\Documents and Settings\\David Stavens\\Desktop\\223B-Demo\\optical_flow_input.avi");
if (input_video == NULL)
{
/* Either the video didn't exist OR it uses a codec OpenCV
* doesn't support.
*/
/*fprintf(stderr, "Error: Can't open video.\n");
return -1;
}*/
/* This is a hack. If we don't call this first then getting capture
* properties (below) won't work right. This is an OpenCV bug. We
* ignore the return value here. But it's actually a video frame.
*/
cvQueryFrame( frame );
/* Read the video's frame size out of the AVI. */
CvSize frame_size;
frame_size.height =
(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
frame_size.width =
(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );
/* Determine the number of frames in the AVI. */
long number_of_frames;
/* Go to the end of the AVI (ie: the fraction is "1") */
cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_AVI_RATIO, 1. );
/* Now that we're at the end, read the AVI position in frames */
number_of_frames = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES );
/* Return to the beginning */
cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, 0. );
/* Create three windows called "Frame N", "Frame N+1", and "Optical Flow"
* for visualizing the output. Have those windows automatically change their
* size to match the output.
*/
cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
long current_frame = 0;
while(true)
{
static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C =
NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;
/* Go to the frame we want. Important if multiple frames are queried in
* the loop which they of course are for optical flow. Note that the very
* first call to this is actually not needed. (Because the correct position
* is set outsite the for() loop.)
*/
cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, current_frame );
/* Get the next frame of the video.
* IMPORTANT! cvQueryFrame() always returns a pointer to the _same_
* memory location. So successive calls:
* frame1 = cvQueryFrame();
* frame2 = cvQueryFrame();
* frame3 = cvQueryFrame();
* will result in (frame1 == frame2 && frame2 == frame3) being true.
* The solution is to make a copy of the cvQueryFrame() output.
*/
frame = cvQueryFrame( input_video );
if (frame == NULL)
{
/* Why did we get a NULL frame? We shouldn't be at the end. */
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}
/* Allocate another image if not already allocated.
* Image has ONE challenge of color (ie: monochrome) with 8-bit "color" depth.
* This is the image format OpenCV algorithms actually operate on (mostly).
*/
allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
/* Convert whatever the AVI image format is into OpenCV's preferred format.
* AND flip the image vertically. Flip is a shameless hack. OpenCV reads
* in AVIs upside-down by default. (No comment :-))
*/
cvConvertImage(frame, frame1_1C, CV_CVTIMG_FLIP);
/* We'll make a full color backup of this frame so that we can draw on it.
* (It's not the best idea to draw on the static memory space of cvQueryFrame().)
*/
allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame1, CV_CVTIMG_FLIP);
/* Get the second frame of video. Sample principles as the first. */
frame = cvQueryFrame( input_video );
if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}
allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame2_1C, CV_CVTIMG_FLIP);
/* Shi and Tomasi Feature Tracking! */
/* Preparation: Allocate the necessary storage. */
allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );
/* Preparation: This array will contain the features found in frame 1. */
CvPoint2D32f frame1_features[400];
/* Preparation: BEFORE the function call this variable is the array size
* (or the maximum number of features to find). AFTER the function call
* this variable is the number of features actually found.
*/
int number_of_features;
/* I'm hardcoding this at 400. But you should make this a #define so that you can
* change the number of features you use for an accuracy/speed tradeoff analysis.
*/
number_of_features = 400;
/* Actually run the Shi and Tomasi algorithm!!
* "frame1_1C" is the input image.
* "eig_image" and "temp_image" are just workspace for the algorithm.
* The first ".01" specifies the minimum quality of the features (based on the
eigenvalues).
* The second ".01" specifies the minimum Euclidean distance between features.
* "NULL" means use the entire input image. You could point to a part of the
image.
* WHEN THE ALGORITHM RETURNS:
* "frame1_features" will contain the feature points.
* "number_of_features" will be set to a value <= 400 indicating the number of
feature points found.
*/
cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, &
number_of_features, .01, .01, NULL);
/* Pyramidal Lucas Kanade Optical Flow! */
/* This array will contain the locations of the points from frame 1 in frame 2. */
CvPoint2D32f frame2_features[400];
/* The i-th element of this array will be non-zero if and only if the i-th feature
of
* frame 1 was found in frame 2.
*/
char optical_flow_found_feature[400];
/* The i-th element of this array is the error in the optical flow for the i-th
feature
* of frame1 as found in frame 2. If the i-th feature was not found (see the
array above)
* I think the i-th entry in this array is undefined.
*/
float optical_flow_feature_error[400];
/* This is the window size to use to avoid the aperture problem (see slide
"Optical Flow: Overview"). */
CvSize optical_flow_window = cvSize(3,3);
/* This termination criteria tells the algorithm to stop when it has either done
20 iterations or when
* epsilon is better than .3. You can play with these parameters for speed vs.
accuracy but these values
* work pretty well in many situations.
*/
CvTermCriteria optical_flow_termination_criteria
= cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
/* This is some workspace for the algorithm.
* (The algorithm actually carves the image into pyramids of different resolutions
.)
*/
allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );
/* Actually run Pyramidal Lucas Kanade Optical Flow!!
* "frame1_1C" is the first frame with the known features.
* "frame2_1C" is the second frame where we want to find the first frame's
features.
* "pyramid1" and "pyramid2" are workspace for the algorithm.
* "frame1_features" are the features from the first frame.
* "frame2_features" is the (outputted) locations of those features in the second
frame.
* "number_of_features" is the number of features in the frame1_features array.
* "optical_flow_window" is the size of the window to use to avoid the aperture
problem.
* "5" is the maximum number of pyramids to use. 0 would be just one level.
* "optical_flow_found_feature" is as described above (non-zero iff feature found
by the flow).
* "optical_flow_feature_error" is as described above (error in the flow for this
feature).
* "optical_flow_termination_criteria" is as described above (how long the
algorithm should look).
* "0" means disable enhancements. (For example, the second aray isn't pre-
initialized with guesses.)
*/
cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features,
frame2_features, number_of_features, optical_flow_window, 5,
optical_flow_found_feature, optical_flow_feature_error,
optical_flow_termination_criteria, 0 );
/* For fun (and debugging :)), let's draw the flow field. */
for(int i = 0; i < number_of_features; i++)
{
/* If Pyramidal Lucas Kanade didn't really find the feature, skip it. */
if ( optical_flow_found_feature[i] == 0 )
 continue;
int line_thickness;
 line_thickness = 1;
/* CV_RGB(red, green, blue) is the red, green, and blue components
* of the color you want, each out of 255.
*/
CvScalar line_color;
 line_color = CV_RGB(255,0,0);
/* Let's make the flow field look nice with arrows. */
/* The arrows will be a bit too short for a nice visualization because of the
high framerate
* (ie: there's not much motion between the frames). So let's lengthen them
by a factor of 3.
*/
CvPoint p,q;
p.x = (int) frame1_features[i].x;
p.y = (int) frame1_features[i].y;
q.x = (int) frame2_features[i].x;
q.y = (int) frame2_features[i].y;
;
double angle;
 angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
double hypotenuse;
 hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) )
/* Here we lengthen the arrow by a factor of three. */
q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
q.y = (int) (p.y - 3 * hypotenuse * sin(angle));
/* Now we draw the main line of the arrow. */
}
/* "frame1" is the frame to draw on.
* "p" is the point where the line begins.
* "q" is the point where the line stops.
* "CV_AA" means antialiased drawing.
* "0" means no fractional bits in the center cooridinate or radius.
*/
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
/* Now draw the tips of the arrow. I do some scaling so that the
* tips look proportional to the main line of the arrow.
*/
p.x = (int) (q.x + 9 * cos(angle + pi / 4));
p.y = (int) (q.y + 9 * sin(angle + pi / 4));
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
p.x = (int) (q.x + 9 * cos(angle - pi / 4));
p.y = (int) (q.y + 9 * sin(angle - pi / 4));
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
}
/* Now display the image we drew on. Recall that "Optical Flow" is the name of
* the window we created above.
*/
cvShowImage("Optical Flow", frame1);
/* And wait for the user to press a key (so the user has time to look at the
image).
* If the argument is 0 then it waits forever otherwise it waits that number of
milliseconds.
* The return value is the key the user pressed.
*/
int key_pressed;
key_pressed = cvWaitKey(0);
}
示例#14
0
文件: main.cpp 项目: nem0301/Tetris
int main(int argc, char* argv[]) {

	int width = SIZE * WIDTH;
	int height = SIZE * HEIGHT;
	int size = SIZE;

	Mat img(height, width, CV_8UC3, Scalar(0));
	namedWindow("Display Image", WINDOW_AUTOSIZE);

	initialPlate(plate);
	while (true) {

		LBlock b(Point2f(WIDTH / 2, 3), plate);

		Block& block = b;

		Point2f point(0, 0);

		time_t tick, tock;
		time(&tick);
		while (true) {
			//image initialize by black
			img = Mat::zeros(height, width, CV_8UC3);

			//draw whole plate
			for (int y = 0; y < HEIGHT; y++) {
				for (int x = 0; x < WIDTH; x++) {
					int temp = y * WIDTH + x;
					if (plate[temp] != 0) {
						Point2f tempPoint1(x * size, y * size);
						Point2f tempPoint2(x * size + size - 1,
								y * size + size - 1);
						Rect rect(tempPoint1, tempPoint2);
						rectangle(img, rect, Scalar(255, 255, 255), -1);
					}
				}
			}

			//get input
			int input = cvWaitKey(1);
			if (input == 65361) {		//left
				block.move(-1);
			} else if (input == 65362) {	//up
				block.rotate();
			} else if (input == 65363) {	//right
				block.move(1);
			} else if (input == 65364) {	//down
				block.move(0);
			} else if (input == 32) {		//space
				block.moveFullDown();
				checkLines(plate);
				break;
			} else if (input == 27)		//esc
				return 0;;

			time(&tock);
			if (tock - tick > 0) {
				int deadOrNot = block.move(0);
				tick = tock;
				if (deadOrNot == -1) {
					break;
				}
			}

			imshow("Display Image", img);

		}
	}

	destroyAllWindows();
	cvWaitKey();
	return 0;
}
示例#15
0
void BinarizationViewer::showBinarizedImgs() {
    
    Mat srcBGRImg, srcHSVImg, srcYCrCbImg;
    Mat bgrChannelImgs[3], hsvChannelImgs[3], ycrcbChannelImgs[3];

    vector<string> channelNames = {};
    int trackbarInitValue = 128;
    namedWindow("Blue", CV_WINDOW_AUTOSIZE);
    namedWindow("Green", CV_WINDOW_AUTOSIZE);
    namedWindow("Red", CV_WINDOW_AUTOSIZE);
    namedWindow("Hue", CV_WINDOW_AUTOSIZE);
    namedWindow("Saturation", CV_WINDOW_AUTOSIZE);
    namedWindow("Value", CV_WINDOW_AUTOSIZE);
    namedWindow("Y", CV_WINDOW_AUTOSIZE);
    namedWindow("Cr", CV_WINDOW_AUTOSIZE);
    namedWindow("Cb", CV_WINDOW_AUTOSIZE);

    cvCreateTrackbar("B_Threshold", "Blue", &trackbarInitValue, 255, onBlueTrackbar);
    cvCreateTrackbar("G_Threshold", "Green", &trackbarInitValue, 255, onGreenTrackbar);
    cvCreateTrackbar("R_Threshold", "Red", &trackbarInitValue, 255, onRedTrackbar);
    cvCreateTrackbar("H_Threshold", "Hue", &trackbarInitValue, 255, onHueTrackbar);
    cvCreateTrackbar("S_Threshold", "Saturation", &trackbarInitValue, 255, onSaturationTrackbar);
    cvCreateTrackbar("V_Threshold", "Value", &trackbarInitValue, 255, onValueTrackbar);
    cvCreateTrackbar("Y_Threshold", "Y", &trackbarInitValue, 255, onYTrackbar);
    cvCreateTrackbar("Cr_Threshold", "Cr", &trackbarInitValue, 255, onCrTrackbar);
    cvCreateTrackbar("Cb_Threshold", "Cb", &trackbarInitValue, 255, onCbTrackbar);

    cvSetTrackbarPos("B_Threshold", "Blue", 128);
    cvSetTrackbarPos("G_Threshold", "Green", 128);
    cvSetTrackbarPos("R_Threshold", "Red", 128);
    cvSetTrackbarPos("H_Threshold", "Hue", 128);
    cvSetTrackbarPos("S_Threshold", "Saturation", 128);
    cvSetTrackbarPos("V_Threshold", "Value", 128);
    cvSetTrackbarPos("Y_Threshold", "Y", 128);
    cvSetTrackbarPos("Cr_Threshold", "Cr", 128);
    cvSetTrackbarPos("Cb_Threshold", "Cb", 128);

    _isShowing = true;
	while(_isShowing) {
        srcBGRImg = _cameraManager.getFrame();

        cvtColor(srcBGRImg, srcHSVImg, CV_BGR2HSV);
        cvtColor(srcBGRImg, srcYCrCbImg, CV_BGR2YCrCb);

        split(srcBGRImg, bgrChannelImgs);
        split(srcHSVImg, hsvChannelImgs);
        split(srcYCrCbImg, ycrcbChannelImgs);

        threshold(bgrChannelImgs[0], bgrChannelImgs[0], binarizationViewerBlueThreshold, 255, CV_THRESH_BINARY);
        threshold(bgrChannelImgs[1], bgrChannelImgs[1], binarizationViewerGgreenThreshold, 255, CV_THRESH_BINARY);
        threshold(bgrChannelImgs[2], bgrChannelImgs[2], binarizationViewerRedThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[0], hsvChannelImgs[0], binarizationViewerHueThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[1], hsvChannelImgs[1], binarizationViewerSaturationThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[2], hsvChannelImgs[2], binarizationViewerValueThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[0], ycrcbChannelImgs[0], binarizationViewerYThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[1], ycrcbChannelImgs[1], binarizationViewerCrThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[2], ycrcbChannelImgs[2], binarizationViewerCbThreshold, 255, CV_THRESH_BINARY);
        
        imshow("src", srcBGRImg);
        imshow("Blue", bgrChannelImgs[0]);
        imshow("Green", bgrChannelImgs[1]);
        imshow("Red", bgrChannelImgs[2]);
        imshow("Hue", hsvChannelImgs[0]);
        imshow("Saturation", hsvChannelImgs[1]);
        imshow("Value", hsvChannelImgs[2]);
        imshow("Y", ycrcbChannelImgs[0]);
        imshow("Cr", ycrcbChannelImgs[1]);
        imshow("Cb", ycrcbChannelImgs[2]);

		int key = waitKey(1);
        if(key == 27) break;
    }

    destroyAllWindows();
}
示例#16
0
void skizImage::NamedWindow(string windowName)
{
	namedWindow(windowName);
}
示例#17
0
void *image_show( void *)        /*analiza imagem*/
{
    Mat frameCopy;
    Mat frameAnalize;
    Mat result;
    mouseInfo.event=-1;
    while(1)
    {
        pthread_mutex_lock(&in_frame);
        frameCopy=frame;
        pthread_mutex_unlock(&in_frame);

        pthread_mutex_lock(&in_mouseInfo);
        if(mouseInfo.x > 100 && mouseInfo.y >100 && mouseInfo.event==EVENT_LBUTTONDOWN)
        {
            Cerro;
            printf("Change! \n");
            Rect myDim(mouseInfo.x-25,mouseInfo.y-25, 50, 50);
            frameAnalize = frameCopy(myDim).clone();     
            frameAnalize.copyTo(frameAnalize);
        }
        else if(mouseInfo.event == -1)
        {
            Rect myDim(100,100, 50, 50);
            frameAnalize = frameCopy(myDim);     
            frameAnalize.copyTo(frameAnalize);
            mouseInfo.event=-2;
        }
        pthread_mutex_unlock(&in_mouseInfo);

        /// Create the result matrix
        int result_cols =  frameCopy.cols - frameAnalize.cols + 1;
        int result_rows = frameCopy.rows - frameAnalize.rows + 1;
        result.create( result_cols, result_rows, CV_32FC1 );

        /// Do the Matching and Normalize
        int match_method=1; //1-5
        matchTemplate( frameCopy, frameAnalize, result, match_method );
        normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );

        /// Localizing the best match with minMaxLoc
        double minVal; double maxVal; Point minLoc; Point maxLoc;
        Point matchLoc;
        minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );

        /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
        if( match_method  == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
            { matchLoc = minLoc; }
        else
            { matchLoc = maxLoc; }
        
        /// Show me what you got
        rectangle( frameCopy, matchLoc, Point( matchLoc.x + frameAnalize.cols , matchLoc.y + frameAnalize.rows ), Scalar::all(0), 2, 8, 0 );
        rectangle( result, matchLoc, Point( matchLoc.x + frameAnalize.cols , matchLoc.y + frameAnalize.rows ), Scalar::all(0), 2, 8, 0 );

        /// make a dif with the original and the matched
        Rect myDim2(matchLoc.x,matchLoc.y,50 , 50);
        Mat frameAnalizado = frameCopy(myDim2).clone(); 
        Mat subt = frameAnalize - frameAnalizado;

        /// Make a simple text to debug
        char str[256];
        sprintf(str, "x:%d/y:%d", matchLoc.x, matchLoc.y);
        putText(frameCopy, str, cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);

        sprintf(str, "maxVal:%.8f/minVal:%.8f", maxVal, minVal);
        putText(frameCopy, str, cvPoint(30,60), FONT_HERSHEY_COMPLEX_SMALL, 0.6, cvScalar(200,200,250), 1, CV_AA);

        /// Show de imgs
        imshow("image_show",frameCopy);
        namedWindow("image_show", CV_WINDOW_NORMAL); waitKey(30);
        imshow("analize",frameAnalize);
        namedWindow("analize", CV_WINDOW_NORMAL); waitKey(30);
        imshow("result",result);
        namedWindow("result", CV_WINDOW_NORMAL); waitKey(30);
        imshow("analizado",frameAnalizado);
        namedWindow("analizado", CV_WINDOW_NORMAL); waitKey(30);
        imshow("sub",subt);
        namedWindow("sub", CV_WINDOW_NORMAL); waitKey(30);

        usleep(10);
        
    }
    Cerro; printf("Image_show Down !\n");
    return NULL;
}
示例#18
0
void onlineclust::data_proc::ImgShow(cv::Mat const& Image, const char* name)
{
  namedWindow( name, cv::WINDOW_NORMAL ); // Create a window for display.
  imshow( name, Image );                // Show our image inside it.
    //waitKey(600); // Wait for a keystroke in the window
}
示例#19
0
 bool AGui::init() {
   namedWindow(mainWinTitle.c_str(), WINDOW_AUTOSIZE);
   return true;
 }
示例#20
0
void ShowImage(Mat image,int type,string WindowName)
{
    namedWindow( WindowName, WINDOW_AUTOSIZE );// Create a window for display.
    image.convertTo(image,type);
    imshow( WindowName, image);
}
示例#21
0
void FrameAnalyser::analyseObjects(string filename)
{

    ausgabe.open(filename.c_str(), ios::out);

    int frames = 0;
    double zeit = 0;
    char c;
    int time = 1;


    ColoredObjectDetector g(DObject::YellowBuoy);
    ColoredObjectDetector r(DObject::RedBuoy,time);
    namedWindow("Ausgabe",CV_WINDOW_FREERATIO);

    int d[8];
    for (int i = 0; i < 8; i++)
        d[i] = 0;

    for (int var = myStack->size()-1; var > 0; var-=time) {

        clock_t start, end;
        Frame f = myStack->getFrame(var);

        start = clock();
        g.getObjects(f);
        r.getObjects(f);
        end = clock();

        Mat im = f.getImage();

        zeit += end - start;

        imshow("Ausgabe",im);

        cout << endl << "1: boje zu sehen." << endl;
        cout << "2: boje erkannt" << endl;
        cout << "3: boje sicher erkannt" << endl;
        cout << "4: falsche boje erkannt" << endl << endl;

        char c = 0;
        while (c != 32) {
            c = waitKey(0);
            int k = ((int) c) - 48;
            switch(k){
            case 1:
                d[0] = d[0] +1;
                break;
            case 2:
                d[0] = d[0] +1;
                d[1] = d[1] +1;
                break;
            case 3:
                d[0] = d[0] +1;
                d[1] = d[1] +1;
                d[2] = d[2] +1;
                break;
            case 4:
                d[3] = d[3] +1;
            }

//           cout << k << ": " << d[k-1] << "\t";
        }

        cout << endl;

        frames++;

        zeit += (end-start);

        ausgabe << d[0] << "\t";
        ausgabe << d[1] << "\t";
        ausgabe << d[2] << "\t";
        ausgabe << d[3] << "\t";

        ausgabe << (end-start) << "\t";
        ausgabe << CLOCKS_PER_SEC << endl;
    }


    destroyWindow("Ausgabe");

    ausgabe.close();

    cout << "Frames: " << frames << endl;
    cout << "Zu sehen: " << d[0]  << endl;
    cout << "erkannt: " << d[1]  << endl;
    cout << "sicher erkannt: " << d[2]  << endl;
    cout << "falsch erkannt: " << d[3]  << endl;
    cout << "Clocks per second: " << zeit/frames << endl;
    cout << "Millisekunden: " << zeit/frames/CLOCKS_PER_SEC*1000 << endl;

}
示例#22
0
Mat ScreenDetector::getTransformationMatrix(Error& error)
{
    bool approxFound = false;

    // convert image to HSV
    cvtColor(img, hsv, CV_BGR2HSV);

    // threshold the image
    inRange(hsv, hsvMin, hsvMax, thresholded);

    // Optimize threshold by reducing noise
    erode(thresholded, thresholded, getStructuringElement(MORPH_ELLIPSE, Size(erodeDilateSize, erodeDilateSize)) );
    dilate( thresholded, thresholded, getStructuringElement(MORPH_ELLIPSE, Size(erodeDilateSize, erodeDilateSize)) );
    dilate( thresholded, thresholded, getStructuringElement(MORPH_ELLIPSE, Size(erodeDilateSize, erodeDilateSize)) );
    erode(thresholded, thresholded, getStructuringElement(MORPH_ELLIPSE, Size(erodeDilateSize, erodeDilateSize)) );
    GaussianBlur(thresholded, thresholded, Size(3,3), 0);

    Mat forContours;
    thresholded.copyTo(forContours);
    // find all contours
    Contours contours;
    Contour approximatedScreen;
    findContours(forContours, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
    int nbContours = contours.size();
    cout << nbContours << " contours found, debug: " << DEBUG << endl;

    if(nbContours == 0)
    {
        error.setError("Unable to find the screen",
                       "The camera doesn't detect any screen or green element."
                       "Please check if your screen is turned on and directed toward the screen");
        return img;
    }

    sort(contours.begin(), contours.end(), contour_compare_area);

    // find the contour with the biggest area that have 4 points when approximated
    for(int i=0; i < nbContours; ++i)
    {
        approxPolyDP(contours.at(i), approximatedScreen, approximateEpsilon * arcLength(contours.at(i), true), true);
        // our screen has 4 point when approximated
        if(approximatedScreen.size() == 4)
        {
            approxFound = true;
            break;
        }
    }

    if(!approxFound)
    {
        error.setError("Unable to find the screen properly",
                       "It seems that the screen is not fully detectable by the camera. Try to reduce light in your room");
        return img;
    }

    if(DEBUG)
    {
        namedWindow("debug", WINDOW_KEEPRATIO);
        namedWindow("thresholded_calibration", WINDOW_KEEPRATIO);
        Mat debug = Mat::zeros(img.rows, img.cols, CV_8UC3);
        polylines(debug, approximatedScreen, true, Scalar(0,0,255), 3);
        imshow("debug", debug);
        imshow("thresholded_calibration", thresholded);
    }

    return transformImage(approximatedScreen);
}
示例#23
0
//------------------------------------------------------------------------------
String PAN::authenticate(String CWD,String fileoutput){
	Point matchLoc;
	float percentage, threshold;
	float average = 0;
	int count = 0;
	Mat big_image;
	big_image = panimage.img->clone();//big image
	resize(big_image, big_image, Size(2000, 1500));
	if (!big_image.data)
	{
		std::cout << "Error reading images " << std::endl; return"";
	}
	Mat temp, temp1[3];
	if (big_image.channels() >= 2){
		cvtColor(big_image, temp, COLOR_BGR2GRAY);
	}
	//split(temp, temp1);
	big_image = temp.clone();
	/*img_1 = temp2.clone();
	resize(img_2, img_2, Size(600, 400));
	*///-- Step 1: Detect the keypoints using SURF Detector
	vector<KeyPoint> keypoints_big, keypoints_small;
	int minHessian = 200;
	//FeatureDetector * detector = new SURF();
	FastFeatureDetector detector;
	detector.detect(big_image, keypoints_big);
	cout << "big sift done\n\n";

	//-- Step 2: Calculate descriptors (feature vectors)
	int Threshl = 10;
	int Octaves = 3;
	//(pyramid layer) from which the keypoint has been extracted
	float PatternScales = 1.0f;
	//declare a variable BRISKD of the type cv::BRISK
	Mat descriptors_2, descriptors_small;
	BRISK BRISKD;

	//BRISKD.detect(img_1, keypoints_1);
	//BRISKD.detect(img_2, keypoints_2);
	BRISKD.compute(big_image, keypoints_big, descriptors_2);

	cout << "big brisk done\n\n";



	int i = 0;
	for ( i = 0; i < 7; i++){
		String path(CWD);
		// setting up input standard containers used for matching to
		String temp = "win1";
		temp = temp + char(i + 48) + ".jpg";
		path = path + temp;
		Mat find = imread(path, CV_LOAD_IMAGE_UNCHANGED);
		//cout << path << "\n\n";
		if (find.data == NULL){ break; }
		//templateMatch(*panimage.img, find, matchLoc, threshold, percentage);
		//-------------------------------------------------------------------------------------
		if (!find.data)
		{
			std::cout << "Error reading images " << std::endl; return "";
		}

		if (find.channels() >= 2){
			cvtColor(find,find, COLOR_BGR2GRAY);
		}


		//img_1 = temp2.clone();
		resize(find ,find, Size(1200, 600));
		//-- Step 1: Detect the keypoints using SURF Detector
		vector<KeyPoint>  keypoints_small;
		int minHessian = 200;
		detector.detect(find, keypoints_small);
		cout << "small sift done\n\n";

		//-- Step 2: Calculate descriptors (feature vectors)
		int Threshl = 10;
		int Octaves = 3;
		//(pyramid layer) from which the keypoint has been extracted
		float PatternScales = 1.0f;
		//declare a variable BRISKD of the type cv::BRISK
		Mat descriptors_small;
		//BRISKD.detect(img_1, keypoints_1);
		//BRISKD.detect(img_2, keypoints_2);
		BRISKD.compute(find, keypoints_small, descriptors_small);
		cout << "brisk done\n\n";

		//-------------------------------------------------------------------------------------
		

		//-- Step 3: Matching descriptor vectors using FLANN matcher
		//FlannBasedMatcher matcher;

		BFMatcher matcher;
		std::vector< DMatch > matches;
		matcher.match(descriptors_small, descriptors_2, matches);
		cv::Mat all_matches;
		drawMatches(find, keypoints_small, big_image, keypoints_big, matches, all_matches, cv::Scalar::all(-1), cv::Scalar::all(-1), vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
		
		namedWindow("BRISK", CV_WINDOW_NORMAL);
			imshow("BRISK", all_matches);
		cv::waitKey(0);
		double max_dist = 0; double min_dist = 800;

		//-- Quick calculation of max and min distances between keypoints
		for (int i = 0; i < descriptors_small.rows; i++)
		{
			double dist = matches[i].distance;
			if (dist < min_dist) min_dist = dist;
			if (dist > max_dist) max_dist = dist;
		}

		//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
		//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
		//-- small)
		//-- PS.- radiusMatch can also be used here.
		std::vector< DMatch > good_matches;

		for (int i = 0; i < descriptors_small.rows; i++)
		{
			if (matches[i].distance <= 1.2 * min_dist)		{
				good_matches.push_back(matches[i]);
			}
		}

		//-- Draw only "good" matches
		Mat img_matches;
		drawMatches(find, keypoints_small, big_image, keypoints_big,good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

		////-- Show detected matches
		namedWindow("Good Matches", CV_WINDOW_NORMAL);
		imshow("Good Matches", img_matches);
		waitKey();
		for (int i = 0; i < (int)good_matches.size(); i++)
		{
			//printf("-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx);
		}
		percentage = (float)((float)good_matches.size() / (float)matches.size()) * 100;

		
		int width, height;
		width = find.size().width; height = find.size().height;
		//cout << i + 1 << "--LOC x=" << matchLoc.x << "  y=" << matchLoc.y << "  % match= " << percentage << "\n";
		if (percentage > 50){
			average += percentage;
			count++;
		}
		fileoutput = fileoutput + to_string(percentage) + "$";
		//cout << percentage << "$";
		//rectangle(find,Rect(matchLoc.x, matchLoc.y, width, height),0,1,4,0); //removed to prevent alteration of the find image
		find.release();
		}
	if (count != 0){
		average /= count;
	}
	else { average = 0; }
	
	//cout << "Authenticity is " << average <<"\n";
	authenticity = average;
	fileoutput = fileoutput + to_string(average) + "$";
	//cout << to_string(average) << "$";
	cout << fileoutput;
	if (i == 0){ cout << "database not loaded"; }
	return fileoutput;
}
示例#24
0
void run(const std::string& filename, bool highpass) {
    // A gray image
    cv::Mat_<float> img = cv::imread(filename, CV_LOAD_IMAGE_GRAYSCALE);

    //Pad the image with borders using copyMakeBorders. Use getOptimalDFTSize(A+B-1). See G&W page 251,252 and 263 and dft tutorial. (Typicly A+B-1 ~ 2A is used)
    int rows = cv::getOptimalDFTSize(2*img.rows);
    int cols = cv::getOptimalDFTSize(2*img.cols);
    int imgRows = img.rows;
    int imgCols = img.cols;
    cv::copyMakeBorder(img,img,0,rows-img.rows,0,cols-img.cols,cv::BORDER_CONSTANT,cv::Scalar(0));

    //Copy the gray image into the first channel of a new 2-channel image of type Mat_<Vec2f>, e.g. using merge(), save it in img_dft
    //The second channel should be all zeros.
   cv::Mat_<float> imgs[] = {img.clone(), cv::Mat_<float>(img.rows, img.cols, 0.0f)};
   cv::Mat_<cv::Vec2f> img_dft;
   cv::merge(imgs, 2, img_dft);

   // Compute DFT
   cv::dft(img_dft, img_dft);

   // Split
   cv::split(img_dft, imgs);

   // Compute magnitude/phase
   cv::Mat_<float> magnitude, phase;
   cv::cartToPolar(imgs[0], imgs[1], magnitude, phase);

   // Shift quadrants for viewability
   dftshift(magnitude);

   // Logarithm of magnitude
   cv::Mat_<float> magnitudel;

   // Output image for HPF
   cv::Mat_<float> imgout;

   if(highpass) {
      // High-pass filter: remove the low frequency parts in the middle of the spectrum
      const int sizef = 50;
      magnitude(cv::Rect(magnitude.cols/2-sizef/2, magnitude.rows/2-sizef/2, sizef, sizef)) = 0.0f;


      // Take logarithm of modified magnitude
      magnitudel = magnitude + 1.0f;
      cv::log(magnitudel, magnitudel);

      // Shift back quadrants of the spectrum
      dftshift(magnitude);

      // Compute complex DFT output from magnitude/phase
      cv::polarToCart(magnitude, phase, imgs[0], imgs[1]);

      // Merge DFT into one image and restore
      cv::merge(imgs, 2, img_dft);
      cv::dft(img_dft, imgout, cv::DFT_INVERSE + cv::DFT_SCALE + cv::DFT_REAL_OUTPUT);

      //Cut away the borders
      imgout = imgout(cv::Rect(0,0,imgCols,imgRows));
   } else {
      // Take logarithm of magnitude
      magnitudel = magnitude + 1.0f;
      cv::log(magnitudel, magnitudel);
   }


   // Show
   cv::normalize(img, img, 0.0, 1.0, CV_MINMAX);
   cv::normalize(magnitudel, magnitudel, 0.0, 1.0, CV_MINMAX);
   cv::normalize(phase, phase, 0.0, 1.0, CV_MINMAX);
	 namedWindow("Input",WINDOW_NORMAL);
	 namedWindow("Magnitude",WINDOW_NORMAL);
	 namedWindow("Output",WINDOW_NORMAL);
   cv::imshow("Input", img);
   cv::imshow("Magnitude", magnitudel);
   if(highpass) {
      cv::normalize(imgout, imgout, 0.0, 1.0, CV_MINMAX);
      cv::imshow("Output", imgout);
   }
   cv::waitKey();
}
示例#25
0
vector<vector<double>> calibrator(CvCapture* cap){

	// Ball Threshold values
	int h_min, s_min, v_min, h_max, s_max, v_max, write, variablecount,getconfig;

	namedWindow("Thresh");
	namedWindow("Thresitud");
	Mat frame;
	vector<Mat> frames;
	vector<double> ball;
	vector<double> goal;
	vector<double> lines;
	vector<int> *arrptr;
	bool onlyonce = true;
	vector<vector<double>> values;

	string filename = "config.txt";
	string variable;
	string line_from_config;
	ofstream fout;
	ifstream fin;
	vector<string> line;
	variablecount = 1;

	fin.open(filename);

	getconfig = 0;
	write = 0;
	h_min = 0;
    s_min = 0;
    v_min = 0;
	h_max = 255,
	s_max = 255;
	v_max = 255;

	createTrackbar( "H min", "Thresh", &h_min, SLIDER_MAX, NULL);
	createTrackbar( "H max", "Thresh", &h_max, SLIDER_MAX, NULL);
	createTrackbar( "S min", "Thresh", &s_min, SLIDER_MAX, NULL);
	createTrackbar( "S max", "Thresh", &s_max, SLIDER_MAX, NULL);
	createTrackbar( "V min", "Thresh", &v_min, SLIDER_MAX, NULL);
	createTrackbar( "V max", "Thresh", &v_max, SLIDER_MAX, NULL);
	// WRITE to file
	createTrackbar("WRITE", "Thresh", &write, 1, NULL);
	createTrackbar("QUIT CONFIG", "Thresh", &getconfig, 1, NULL);

	cout<< "Enne ifi" << endl;

	if (fin.is_open())
	{
		cout << "IF" << endl;
		getline(fin,line_from_config);
		while (line_from_config != "")
		{
			cout<<"WHILE"<<endl;
			line = split(line_from_config, " ");
			if (line[0] == "Ball"){
				for (int i = 1; i < line.size(); i++){
					ball.push_back(atoi(line[i].c_str()));
					cout<<line[i]<<endl;
				}
			}

			else if (line[0] == "Goal"){
				for (int i = 1; i < line.size(); i++){
					goal.push_back(atoi(line[i].c_str()));
					cout<<line[i]<<endl;
				}
			}

			else if (line[0] == "Lines"){
				for (int i = 1; i < line.size(); i++){
					lines.push_back(atoi(line[i].c_str()));
					cout<<line[i]<<endl;
				}
			}
			else
			{
				break;
			}



			getline(fin,line_from_config);
		}
		values.push_back(ball);
		values.push_back(goal);
		values.push_back(lines);

	}
	else
	{
		cout<<"File is empty or not opened"<<endl;
	}

	while (true){

	if (write == 1)	{

		if (onlyonce)
		{
			fout.open(filename);
			values.clear();
			onlyonce = false;
		}


		if(variablecount == 1){
			variable = "Ball";
			ball.push_back(h_min);
			ball.push_back(s_min);
			ball.push_back(v_min);
			ball.push_back(h_max);
			ball.push_back(s_max);
			ball.push_back(v_max);
			values.push_back(ball);

		}
		else if(variablecount == 2){
			variable = "Goal";
			goal.push_back(h_min);
			goal.push_back(s_min);
			goal.push_back(v_min);
			goal.push_back(h_max);
			goal.push_back(s_max);
			goal.push_back(v_max);
			values.push_back(goal);

		}
		else if(variablecount == 3){
			variable = "Lines";
			lines.push_back(h_min);
			lines.push_back(s_min);
			lines.push_back(v_min);
			lines.push_back(h_max);
			lines.push_back(s_max);
			lines.push_back(v_max);
			values.push_back(lines);
		}


		fout << variable << " " << h_min << " " << s_min <<
			" " << v_min << " "  << h_max << " " << s_max <<
			" " << v_max << endl;

		cout <<  variable << " " << h_min << " " << s_min <<
			" " << v_min << " "  << h_max << " " << s_max <<
			" " << v_max << endl;

		variablecount = variablecount +1;

		h_min = 0;
		s_min = 0;
		v_min = 0;
		h_max = 255,
		s_max = 255;
		v_max = 255;
		write = 0;


		setTrackbarPos("H min", "Thresh", h_min);
		setTrackbarPos("S min", "Thresh", s_min);
		setTrackbarPos("V min", "Thresh", v_min);
		setTrackbarPos("H max", "Thresh", h_max);
		setTrackbarPos("S max", "Thresh", s_max);
		setTrackbarPos("V max", "Thresh", v_max);
		setTrackbarPos("WRITE", "Thresh", write);
	}

	if (getconfig == 1)
	{

	}


	// take a frame, threshold it, display the thresholded image
	frame = cvQueryFrame( cap );
	frames = thresholder(frame,h_min, s_min, v_min, h_max, s_max, v_max ); // added values as argument, previously h_min, s_min ....

	imshow("Thresh", frames[0]);
	imshow("CALIBRATOR", frames[0]);



	int c = cvWaitKey(10);
	if( (char)c == 27) {
    cvDestroyAllWindows();
    return values;
	}

	}

}
示例#26
0
void MultipleProcess::run()
{

    int FLAGS = CV_GUI_NORMAL | CV_WINDOW_AUTOSIZE;

    if (_input.size() != _process.size())
        return;

    if (_showOutput)
    {
        for (size_t i = 0; i < _input.size(); i++)
        {
            namedWindow(_windowName + to_string(i), FLAGS);
        }

    }
    if (matching)
        namedWindow(_matchinName, FLAGS);
    
    
    vector<Ptr<ProcessListenerWrapper>> wrappers;
    if (_mListener && _process.size())
    {
        for (size_t i = 0; i < _process.size(); i++)
        {
            Ptr<ProcessListenerWrapper> w = new ProcessListenerWrapper(i, _process[i], this);
            wrappers.push_back(w);
            cv::setMouseCallback(_windowName + to_string(i), MultipleProcess::mouseCallback, wrappers[i]);
        }
    }
    if (_mListener && matching)
        cv::setMouseCallback(_matchinName, Processor::mouseCallback, matching);
        
    if (!_input.size() && !_process.size())
        return;


    vector<long> frameN;
    Mat freezeFrame;
    bool freezed = true;
    bool running = true;
    int key = Keys::NONE;

    // initialize the freezeFrame
    bool allSequencesReady = true;
    vector<Mat>  freezedFrames;
    vector<bool> hasFrame;

    for (size_t i = 0; i < _input.size(); i++)
    {
        Mat tmp;
        bool _retrieved = _input[i]->getFrame(tmp, _startFrame);
        allSequencesReady = allSequencesReady && _retrieved;
        freezedFrames.push_back(tmp);
        hasFrame.push_back(_retrieved);
		long frame = 0;
		frame += _startFrame;
		frameN.push_back(frame);
    }

    while (running && allSequencesReady)
    {

        vector<Mat> frame(_input.size()), frameOut(_input.size());
        if (!freezed || key == Keys::n)
        {
            for( size_t i = 0; i < _input.size(); i++)
            {
                hasFrame[i] = _input[i]->getFrame(frame[i], 1);
                freezedFrames[i] = frame[i];
				frameN[i]++;
            }
        }
        else if (!freezed || key == Keys::p)
        {
            for( size_t i = 0; i < _input.size(); i++)
            {
                if (frameN[i] > 0)
                {
                    hasFrame[i] = _input[i]->getFrame(frame[i], -1);
                    freezedFrames[i] = frame[i];
                    frameN[i]--;
                }
            }
        }
        else
        {
            for( size_t i = 0; i < _input.size(); i++)
            {
                frame[i] = freezedFrames[i];
            }
        }

        bool allSequencesFinished = false;
        for (size_t i = 0; i < _input.size(); i++)
        {
            allSequencesFinished = allSequencesFinished || hasFrame[i];
        }

        if (allSequencesFinished)
        {

            for (size_t i = 0; i < _input.size(); i++)
            {

                if (_process.size())
                    _process[i]->operator()(frameN[i], frame[i], frameOut[i]);

                if (_showOutput && !frameOut[i].empty())
                    cv::imshow(_windowName + to_string(i), frameOut[i]);
                if (_output.size())
                    _output[i]->writeFrame(frameOut[i]);
            }
            if (matching)
            {
                Mat tmp, moutput;
                matching->operator()(0, tmp, moutput);
                cv::imshow(_matchinName, moutput);
            }
            key = Keys::NONE;

            try
            {
                key = waitKey(1);
            }
            catch (...)
            {
                //...
            }

            if (key == Keys::ESC)
            {
                running = false;
            }
            if (key == Keys::SPACE || _pause)
            {
                _pause = false;
                for (size_t i = 0; i < _input.size(); i++)
                {
                    freezedFrames[i] = frame[i];
                }
                freezed = !freezed;
            }
            if (_kListener && _process.size() && key != Keys::NONE)
            {
                for (size_t i = 0; i < _input.size(); i++)
                {
                    if (_activeWindow == i)
                        _process[i]->keyboardInput(key);
                }
            }
            
        }
        else
        {
            break;
        }
    }
    
    
    destroyAllWindows();
}
示例#27
0
void* camera(void* arg) {
	//pFormatCtx=(AVFormatContext *)arg;
	char key;
	drawing=false;
	Ball.roll = Ball.pitch = Ball.gaz = Ball.yaw = 0;
	pthread_mutex_init(&mutexVideo, NULL);
	liste.suivant=NULL;
#if output_video == ov_remote_ffmpeg
	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);
#else	
	VideoCapture cap(0); //capture video webcam
#endif



#if output_video != ov_remote_ffmpeg

	if (!cap.isOpened()) {
		cout << "Impossible de lire le flux de la camera" << endl;
		return NULL;
	}
	Mat frame;
	cap >> frame;
	fSize.width = frame.cols;
	fSize.height = frame.rows;
#endif

	// Initialise les fenetres
	namedWindow(winDetected, 1);
	namedWindow(winOutputVideo, 1);

	//Creer une image noir de taille de notre image tmp
	Mat imgLines = Mat::zeros(fSize.height, fSize.width, CV_8UC3);

	while (true) {

#if output_video != ov_remote_ffmpeg
		bool bSuccess = cap.read(imgOriginal); // Nouvelle capture
		if (!bSuccess) {
			cout << "Impossible de lire le flux video" << endl;
			break;
		}
#else
		pthread_mutex_lock(&mutexVideo);
		memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
		pthread_mutex_unlock(&mutexVideo);
		imgOriginal = cv::cvarrToMat(img, true);
#endif
		pthread_t mtId,ocId;
		//Appel aux threads de tracking
		pthread_create(&mtId, NULL, &matchTemplate, NULL);
		pthread_create(&ocId, NULL, &opencv, NULL);
		
		pthread_join(mtId,NULL);
		pthread_join(ocId,NULL);

		//Fonction permettant d'interpreter les résultats des deux tracking
		Ball.setRealPos();

		// Genere la fenetre de repere 
		imgLines.setTo(Scalar(255, 255, 255));
		drawCross(imgLines, fSize.width / 2, fSize.height / 2, Scalar(0, 0, 255));
		drawCross(imgLines, posX, posY, Scalar(0, 255, 0));

		imgOriginal = imgOriginal & imgLines; // Croise les resultats à la fenetre de sortie //

		// Affichage des fenetre //
		imshow(winDetected, imgDetection);			//Pour montrer l image avec le masque
		//imshow(winRepere, imgLines);				//Pour montrer la fenetre de repere
		imshow(winOutputVideo, imgOriginal);		//Image d origine
		string Action = "Mouvement a effectuer : ";
		ObjCoord tmp = Ball.getRealPos();
		cout << "x " << tmp.Xcoord << " y " << tmp.Ycoord << " z " << tmp.Zcoord << endl;
		/*
		if(tmp.Zcoord == -1){
			Action += "Recule, "; Ball.pitch = 0.05f;
		}
		else if(tmp.Zcoord == 1){
			Action += "Avance, "; Ball.pitch = -0.05f;
		}
		else
		{
			Ball.pitch = 0;
		}
		*/
		if (tmp.Xcoord <= 35.0 && tmp.Xcoord != 0) {
			Ball.yaw = -0.2f;
			Action += "Gauche ("+ to_string(Ball.yaw)+"%), ";
		} else if (tmp.Xcoord >= 65.0) {
			Ball.yaw = 0.2f;
			Action += "Droite ("+ to_string(Ball.yaw)+"%), ";
		}
		else
		{
			Ball.yaw = 0;	
		}
		if (tmp.Ycoord >= 65.0) {
			Action += "Descendre";  Ball.gaz = -0.2f;
		} else if (tmp.Ycoord <= 35.0 && tmp.Ycoord != 0) {
			Action += "Monter";    Ball.gaz = 0.2f;
		}
		else
		{
			Ball.gaz = 0;
		}
		/*if(Ball.pitch != 0) {
			Ball.roll = Ball.yaw / 2;
			Ball.yaw = 0;
		}*/
		if(tmp.Xcoord == 0 && tmp.Ycoord == 0 && tmp.Zcoord == 0)
		{
			Ball.roll = Ball.pitch = Ball.gaz = Ball.yaw = 0;
			
		}
		if(Ball.pitch == 0)
			AtCmd::sendMovement(0, Ball.roll, Ball.pitch, Ball.gaz, Ball.yaw);
		else
			AtCmd::sendMovement(3, Ball.roll, Ball.pitch, Ball.gaz, Ball.yaw);
		//cout << Action << endl;
		key=waitKey(10);
		if(key == 10)
		{
			enVol=true;
			key=-1;
		}
		else if (key != -1) //Attends qu'une touche soit presser pour quitter
		{
			break;
		}
	}
	stopTracking=true;
	destroyAllWindows();
	return NULL;
}
/* constructor */
frameHandler::frameHandler(VideoCapture& inputCapture){
  capture = inputCapture;  
  logFile.open("../log/log.txt");
  //create a window to show the result
  namedWindow("Vedio",CV_WINDOW_AUTOSIZE); 
}
示例#29
0
文件: RIAR.cpp 项目: osilvam/Practica
int main(int argc, char* argv[])
{
    srand (time(0));    

    if(argc < 4)
    {
        cerr << "ERROR: The number of arguments is incorrect" << endl << "Enter:\targ_1 = user_definition_file\t arg_2 = genetic_encoding_file\t arg_3 = port_number";
        return -1;
    }

    if(system((char*)"mkdir -p NEAT_organisms") == -1)
    {
        cerr << "TRAIN ERROR:\tFailed to create folder 'NEAT_organisms'" << endl;
    }

    if(system("rm -f NEAT_organisms/*") == -1)
    {
        cerr << "TRAIN ERROR:\tFailed to remove files inside of 'NEAT_organisms'" << endl;
    }

    SimFiles * simfile = new SimFiles();
    Fitness * fitness = new Fitness();
    RobotVREP * vrep = new RobotVREP(false, atoi(argv[3]));    
    Retina * retina = new Retina(); 

    // ============= VREP INITIALIZATIONS ============= //
    
    Joint * rightWheel = new Joint((char*)"SCALE", (char*)"motor_der");
    Joint * leftWheel = new Joint((char*)"SCALE", (char*)"motor_izq");
    vrep->addJoint(rightWheel);
    vrep->addJoint(leftWheel);

    VisionSensor * visionSensor = new VisionSensor((char*)"VisionSensor");
    vrep->addVisionSensor(visionSensor);

    Object * centerDummy = new Object((char*)"modi_dummy");
    Object * Modi = new Object((char*)"MODI");
    vrep->addObject(centerDummy);
    vrep->addObject(Modi);
    
    CollisionObject * chasis = new CollisionObject((char*)"Collision_MODI_1#");
    CollisionObject * rueda1 = new CollisionObject((char*)"Collision_MODI_2#");
    CollisionObject * rueda2 = new CollisionObject((char*)"Collision_MODI_3#");
    vrep->addCollisionObject(chasis);
    vrep->addCollisionObject(rueda1);
    vrep->addCollisionObject(rueda2);
    vector < CollisionObject * > structure = {chasis, rueda1, rueda2};

    vector < Object * > cubes;

    // Set random position of Obstacles

    double y0 = -2;

    for(int cp_y = 0; cp_y < 9; cp_y++)
    {   
        double x0 = -2 + 0.25*(cp_y%2);

        for(int cp_x = 0; cp_x < 8 + (cp_y + 1)%2; cp_x++)
        {            
            if(9*cp_y + cp_x != 40)
            {
                stringstream sstm1;
                sstm1 << "Obstacle" << 9*cp_y+cp_x<< "#";

                Object * obstacle = new Object((char*)sstm1.str().c_str());
                vrep->addObject(obstacle);

                double rand1 = rand()%201 - 100;
                double rand2 = rand()%201 - 100;

                vector < double > position;

                position.push_back(x0 + rand1/100*.10);
                position.push_back(y0 + rand2/100*.10);
                position.push_back(0.05);

                vrep->setObjectPosition(obstacle, position);

                cubes.push_back(obstacle);
            }

            x0 = x0 + 0.5;
        }

        y0 = y0 + 0.5;
    }

    // ================================================ //

    // ========== NEAT INITIALIZATIONS =========== //

    vector < double > output(2,0.0);
    vector < double > input(NX*NY+2,0.0);

    Population population(argv[1], argv[2], (char *)"NEAT_RIAR", (char *)"./NEAT_organisms");

    // ================================================ //
    
    namedWindow( "Display window", WINDOW_AUTOSIZE ); // Create a window for display.

    int finalChampionGeneration = 0;
    int finalChampionPopulation = 0;
    double finalChampionFitness = 0.0;

    for(int g = 0; g < population.GENERATIONS; g++)
    {
        fitness->resetGenerationValues();

        int generationChampionPopulation = 0;
        double generationChampionFitness = 0.0;
     
        for(int p = 0; p < population.POPULATION_MAX; p++)
        {
            fitness->resetPopulationValues();

            int sim_time = 0;
            bool flag = true;
            int flag_times = 0;
            double rightVel = 0.0;
            double leftVel = 0.0;

            vrep->setJointTargetVelocity(rightWheel, 0.0);
            vrep->setJointTargetVelocity(leftWheel, 0.0);

            double rand1 = rand()%201 - 100;
            double rand2 = rand()%201 - 100;
            double rand3 = rand()%201 - 100;

            vector < double > position, orientation;

            position.push_back(rand1/100*.10);
            position.push_back(rand2/100*.10);
            position.push_back(0.03011);

            orientation.push_back(0);
            orientation.push_back(0);
            orientation.push_back(rand3/100*M_PI);
            
            vrep->setObjectPosition(Modi, position);
            vrep->setObjectOrientation(Modi, orientation);

            unsigned char * image;
            Mat frameRGB = Mat(NX, NY, CV_8UC3);
            Mat frameGRAY = Mat(NX, NY, CV_8UC1);

            stringstream message1, message2, video_name;            

            message1 << "Generation " << g << " Population " << p;
            vrep->addStatusbarMessage((char*)message1.str().c_str());

            video_name << "G" << g << "P" << p;
            //vrep->changeVideoName((char *)video_name.str().c_str(), simx_opmode_oneshot_wait);

            simfile->openRobotMovementFile(g, p);
            simfile->openRobotMotorVelocityFile(g, p);
            
            clog << "=======  G" << g << " P" << p << "  =======  " << endl;

            vrep->startSimulation(simx_opmode_oneshot_wait);

            timeval tv1, tv2;       
            gettimeofday(&tv1, NULL);            

            while(sim_time < TIME_SIMULATION && flag)
            {            
                image = vrep->getVisionSensorImage(visionSensor);  
                frameRGB.data = image;
                flip(frameRGB, frameRGB, 0);
                cvtColor(frameRGB,frameGRAY,CV_BGR2GRAY);
                
                Mat tmp = frameGRAY;
                Mat frame = tmp;

                tmp = retina->GetImg(frame);

                resize(tmp, frame, Size(0,0) , 6.0, 6.0, (int)INTER_NEAREST );
                imshow( "Display window", frame );
                waitKey(10);

                for(int i = 0; i < NY; i++)
                {
                    for(int j = 0;j < NX; j++)
                    {
                        input.at(i*NX + j) = (double)frame.at<uchar>(i,j)/255*2-1;
                    }
                }
                
                input.at(NX*NY) = (double)((2.0/(MAX_VEL - MIN_VEL))*(rightVel - MIN_VEL) - 1.0);
                input.at(NX*NY + 1) = (double)((2.0/(MAX_VEL - MIN_VEL))*(leftVel - MIN_VEL) - 1.0);

                output = population.organisms.at(p).eval(input);

                rightVel = output.at(0) + rightVel;
                leftVel = output.at(1) + leftVel;

                if(rightVel > MAX_VEL) rightVel = MAX_VEL;
                else if(rightVel < MIN_VEL) rightVel = MIN_VEL;
                if(leftVel > MAX_VEL) leftVel = MAX_VEL;
                else if(leftVel < MIN_VEL) leftVel = MIN_VEL;

                vrep->setJointTargetVelocity(rightWheel,-rightVel);
                vrep->setJointTargetVelocity(leftWheel,leftVel);                                              

                if(sim_time > TIME_INIT_MEASURING)
                {
                    position = vrep->getObjectPosition(centerDummy);
                    orientation = vrep->getObjectOrientation(centerDummy);

                    simfile->addRobotMovementFile((double)sim_time/1000000.0, position, orientation.at(2));
                    simfile->addRobotMotorVelocityFile((double)sim_time/1000000.0, rightVel, leftVel);

                    fitness->measuringValues(position, rightVel, leftVel, vrep->readCollision(structure));

                    if (abs(orientation.at(0)) > 0.78 || abs(orientation.at(1)) > 0.78)
                    {
                        flag_times++;
                        if(flag_times > 10) flag = false;
                    }else
                        flag_times = 0;
                }                         

                usleep(DELTA_TIME - EXECUTION_TIME);
                sim_time += DELTA_TIME;
            }            

            vrep->stopSimulation(simx_opmode_oneshot_wait);

            gettimeofday(&tv2, NULL);
            long int simulationtime = ((tv2.tv_sec - tv1.tv_sec)*1000000L + tv2.tv_usec) - tv1.tv_usec;   

            simfile->closeRobotMovementFile();
            simfile->closeRobotMotorVelocityFile();  

            if (flag)
            {                
                population.organisms.at(p).fitness = fitness->calculateFitness();             
                simfile->addFileResults(fitness->getFitness(), g, p);

                clog << "Fitness:\t" << fitness->getFitness() << endl;
                clog << "Distance:\t" << fitness->getDistance() << endl;
                clog << "Tiempo de simulación:\t" << (double)simulationtime/1000000 << endl;
                clog << endl;            

                message2 << "FITNESS : " << fitness->getFitness();
                vrep->addStatusbarMessage((char*)message2.str().c_str());

                if(generationChampionFitness < fitness->getFitness())
                {
                    generationChampionPopulation = p;
                    generationChampionFitness = fitness->getFitness();
                }
            }
            else
            {
                clog << "OVERTURNING! The simulation has stopped" << endl;
                population.organisms.at(p).fitness = FAILED_FITNESS;
            }                
        }

        simfile->addFileChampion(generationChampionFitness, g, generationChampionPopulation);
        simfile->addFileFitness(fitness->getGenerationFitness(), g);        

        //////////////////////////// SAVE CHAMPION FILES /////////////////////////////////

        stringstream generation_champion_filename;
        generation_champion_filename << "NEAT_organisms/Champion_G" << g << "P" << generationChampionPopulation << ".txt";
        population.organisms.at(generationChampionPopulation).save((char *)generation_champion_filename.str().c_str());

        stringstream cp_gen_champion_movement, cp_gen_champion_motorVelocity;

        cp_gen_champion_movement << "cp simulation_files/movement/movement_G" << g << "P" << generationChampionPopulation << ".txt ./simulation_files/movement/Champion_G" << g << "P" << generationChampionPopulation << ".txt";
        cp_gen_champion_motorVelocity << "cp simulation_files/motorVelocity/motorVelocity_G" << g << "P" << generationChampionPopulation << ".txt ./simulation_files/motorVelocity/Champion_G" << g << "P" << generationChampionPopulation << ".txt";

        if(system((char*)cp_gen_champion_movement.str().c_str()) == -1)
        {
            cerr << "TRAIN ERROR:\tFailed to copy the Champion movement File" << endl;
        }
        else
        {
            if(system("rm -f ./simulation_files/movement/movement_G*.txt") == -1)
            {
                cerr << "TRAIN ERROR:\tFailed to remove useless files" << endl;
            }
        }

        if(system((char*)cp_gen_champion_motorVelocity.str().c_str()) == -1)
        {
            cerr << "TRAIN ERROR:\tFailed to copy the Champion motor velocity File" << endl;
        }
        else
        {
            if(system("rm -f ./simulation_files/motorVelocity/motorVelocity_G*.txt") == -1)
            {
                cerr << "TRAIN ERROR:\tFailed to remove useless files" << endl;
            }
        }

        ///////////////////////////////////////////////////////////////////////////////////

        population.epoch();        

        if(finalChampionFitness < generationChampionFitness)
        {
            finalChampionGeneration = g;
            finalChampionPopulation = generationChampionPopulation;
            finalChampionFitness = generationChampionFitness;
        }
    }

    //////////////////////////// SAVE CHAMPION FILES /////////////////////////////////

    stringstream cp_champion_organism, cp_champion_movement, cp_champion_motorVelocity;
    
    cp_champion_organism << "cp NEAT_organisms/Champion_G" << finalChampionGeneration << "P" << finalChampionPopulation << ".txt ./NEAT_organisms/Champion.txt";
    cp_champion_movement << "cp simulation_files/movement/Champion_G" << finalChampionGeneration << "P" << finalChampionPopulation << ".txt ./simulation_files/movement/Champion.txt";
    cp_champion_motorVelocity << "cp simulation_files/motorVelocity/Champion_G" << finalChampionGeneration << "P" << finalChampionPopulation << ".txt ./simulation_files/motorVelocity/Champion.txt";
        
    if(system((char*)cp_champion_organism.str().c_str()) == -1)
    {
        cerr << "TRAIN ERROR:\tFailed to copy the Champion Organism File" << endl;
    }

    if(system((char*)cp_champion_movement.str().c_str()) == -1)
    {
        cerr << "TRAIN ERROR:\tFailed to copy the Champion Movement File" << endl;
    }

    if(system((char*)cp_champion_motorVelocity.str().c_str()) == -1)
    {
        cerr << "TRAIN ERROR:\tFailed to copy the Champion Motor Velocity File" << endl;
    }
    
    ///////////////////////////////////////////////////////////////////////////////////

    clog << "Fitness champion: " << finalChampionFitness << "\n\n"<< endl;

    delete(vrep);
    delete(simfile);
    delete(fitness);
    
    return(0);
}
///////////////////////////////////////////////////////
// Panel::CalibrateCamera() Description
///////////////////////////////////////////////////////
void Panel::CalibrateCamera(string sFilePath)
{
	help();

	//! [file_read]
	Settings s;
	const string inputSettingsFile = sFilePath;
	FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
	if (!fs.isOpened())
	{
		cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
//		return -1;
	}
	fs["Settings"] >> s;
	fs.release();                                         // close Settings file
	//! [file_read]

	//FileStorage fout("settings.yml", FileStorage::WRITE); // write config as YAML
	//fout << "Settings" << s;

	if (!s.goodInput)
	{
		cout << "Invalid input detected. Application stopping. " << endl;
//		return -1;
	}

	vector<vector<Point2f> > imagePoints;
	Mat cameraMatrix, distCoeffs;
	Size imageSize;
	int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
	clock_t prevTimestamp = 0;
	const Scalar RED(0, 0, 255), GREEN(0, 255, 0);
	const char ESC_KEY = 27;
	int counter = 1;

	//! [get_input]
	for (;;)
	{
		Mat view;
		bool blinkOutput = false;

		view = s.nextImage();

		//-----  If no more image, or got enough, then stop calibration and show result -------------
		if (mode == CAPTURING && imagePoints.size() >= (size_t)s.nrFrames)
		{
			if (runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
				mode = CALIBRATED;
			else
				mode = DETECTION;
		}
		if (view.empty())          // If there are no more images stop the loop
		{
			// if calibration threshold was not reached yet, calibrate now
			if (mode != CALIBRATED && !imagePoints.empty())
				runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
			break;
		}
		//! [get_input]

		imageSize = view.size();  // Format input image.
		if (s.flipVertical)    flip(view, view, 0);

		//! [find_pattern]
		vector<Point2f> pointBuf;

		bool found;
		switch (s.calibrationPattern) // Find feature points on the input format
		{
		case Settings::CHESSBOARD:
			found = findChessboardCorners(view, s.boardSize, pointBuf,
				CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
			break;
		case Settings::CIRCLES_GRID:
			found = findCirclesGrid(view, s.boardSize, pointBuf);
			break;
		case Settings::ASYMMETRIC_CIRCLES_GRID:
			found = findCirclesGrid(view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID);
			break;
		default:
			found = false;
			break;
		}
		//! [find_pattern]
		//! [pattern_found]
		if (found)                // If done with success,
		{
			// improve the found corners' coordinate accuracy for chessboard
			if (s.calibrationPattern == Settings::CHESSBOARD)
			{
				Mat viewGray;
				cvtColor(view, viewGray, COLOR_BGR2GRAY);
				cornerSubPix(viewGray, pointBuf, Size(11, 11),
					Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
			}

			if (mode == CAPTURING &&  // For camera only take new samples after delay time
				(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC))
			{
				imagePoints.push_back(pointBuf);
				prevTimestamp = clock();
				blinkOutput = s.inputCapture.isOpened();
			}

			// Draw the corners.
			drawChessboardCorners(view, s.boardSize, Mat(pointBuf), found);
		}
		//! [pattern_found]
		//----------------------------- Output Text ------------------------------------------------
		//! [output_text]
		string msg = (mode == CAPTURING) ? "100/100" :
			mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
		int baseLine = 0;
		Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
		Point textOrigin(view.cols - 2 * textSize.width - 10, view.rows - 2 * baseLine - 10);

		if (mode == CAPTURING)
		{
			if (s.showUndistorsed)
				msg = format("%d/%d Undist", (int)imagePoints.size(), s.nrFrames);
			else
				msg = format("%d/%d", (int)imagePoints.size(), s.nrFrames);
		}

		putText(view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);

		if (blinkOutput)
			bitwise_not(view, view);
		//! [output_text]
		//------------------------- Video capture  output  undistorted ------------------------------
		//! [output_undistorted]
		if (mode == CALIBRATED && s.showUndistorsed)
		{
			Mat temp = view.clone();
			undistort(temp, view, cameraMatrix, distCoeffs);
		}
		//! [output_undistorted]
		//------------------------------ Show image and check for input commands -------------------
		//! [await_input]
		
		namedWindow("Image View" + to_string(counter), WINDOW_NORMAL);
		resizeWindow("Image View" + to_string(counter), 640, 480);
		imshow("Image View" + to_string(counter), view);
		char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);

		cout << "Image " << to_string(counter) << " Completed" << endl;
		counter++;

		if (key == ESC_KEY)
			break;

		if (key == 'u' && mode == CALIBRATED)
			s.showUndistorsed = !s.showUndistorsed;

		if (s.inputCapture.isOpened() && key == 'g')
		{
			mode = CAPTURING;
			imagePoints.clear();
		}
		//! [await_input]
	}

	// -----------------------Show the undistorted image for the image list ------------------------
	//! [show_results]
	if (s.inputType == Settings::IMAGE_LIST && s.showUndistorsed)
	{
		Mat view, rview, map1, map2;
		initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
			getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
			imageSize, CV_16SC2, map1, map2);

		m_mainMap1 = map1;
		m_mainMap2 = map2;

		for (size_t i = 0; i < s.imageList.size(); i++)
		{
			view = imread(s.imageList[i], 1);
			if (view.empty())
				continue;
			remap(view, rview, map1, map2, INTER_LINEAR);
			imshow("Image View", rview);
			char c = (char)waitKey();
			if (c == ESC_KEY || c == 'q' || c == 'Q')
				break;
		}
	}
	//! [show_results]

//	return 0;

}