Example #1
0
int main()
{

	if(run_tests_only)
	{
		MyLine3D::runTest();
		return 0;
	}

	//CvMat *camera_inner_calibration_matrix; 
	bool show_surf_example=false;
	bool show_calibration_from_camera_and_undistortion=false;
	if(show_calibration_from_camera_and_undistortion)
	{
		CvMat *object_points_all=0;
		CvMat *image_points_all=0;
		CvMat *points_count_all=0;
		CvMat *camera_matr=0;
		CvMat *distor_coefs=0;
		CvMat *rotation_vecs=0;
		CvMat *transpose_vecs=0;
		vector<CvPoint2D32f> qu_calibr_points;
		IplImage* frameCam1;
		cvNamedWindow("WindowCam1",CV_WINDOW_KEEPRATIO);
		CvCapture *captureCam1=cvCreateCameraCapture(0);
		IplImage *quarterFrame;
		CvPoint2D32f *cornersFounded= new CvPoint2D32f[100];
		int cornersCount=0;
		int result_Found=0;
		// getting snapshots for inner camera calibration from video camera
		bool capture_flag=false;
		while(true)
		{
			frameCam1=cvQueryFrame(captureCam1);
			quarterFrame=cvCreateImage(cvSize((frameCam1->width),(frameCam1->height)),IPL_DEPTH_8U,3);
		
			cvCopy(frameCam1,quarterFrame);
			if(capture_flag)
			{
				result_Found=cvFindChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,&cornersCount);//,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS |CV_CALIB_CB_FAST_CHECK);
				cvDrawChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,cornersCount,result_Found);
				if(result_Found>0)
					AddPointsToInnerCalibrate(qu_calibr_points,cornersFounded,cornersCount);
				capture_flag=false;
				cvShowImage("WindowCam1",quarterFrame);
				if(result_Found>0)
					cvWaitKey(0);
			}
			char c=cvWaitKey(33);
			if(c==27)
				break;
			if(c==32 || c=='y' || c=='Y')
				capture_flag=true;
			cvShowImage("WindowCam1",quarterFrame);
			cvReleaseImage(&quarterFrame);
		
		}
		cvReleaseImage(&quarterFrame);
	
		cvReleaseCapture(&captureCam1);
		cvDestroyWindow("WindowCam1");
	
		PrintAllPointsForInnerCalibrate(qu_calibr_points,chess_b_szW*chess_b_szH);
		InitCvMatPointsParametersForInnerCallibration_part1(qu_calibr_points,chess_b_szW*chess_b_szH,object_points_all,image_points_all,points_count_all,chess_b_szW,chess_b_szH);
		InitOtherCameraParametersForInnerCallibration_part2(qu_calibr_points.size()/(chess_b_szW*chess_b_szH),camera_matr,distor_coefs,rotation_vecs,transpose_vecs);
		double calibration_error_result=cvCalibrateCamera2(object_points_all,
													image_points_all,
													points_count_all,
													cvSize(imgW,imgH),
													camera_matr,
													distor_coefs,
													rotation_vecs,
													transpose_vecs,
													CV_CALIB_FIX_PRINCIPAL_POINT|CV_CALIB_FIX_ASPECT_RATIO|CV_CALIB_ZERO_TANGENT_DIST
													);
		WriteMatrixCoef(camera_matr);
		WriteMatrixCoef(distor_coefs);
		//camera_inner_calibration_matrix=cvCreateMat(3,3,CV_32FC1);
		//cvCopy(camera_matr,camera_inner_calibration_matrix);
		cvSave("camera_calibration_inner.txt",camera_matr,"camera_inner_calibration_matrix");
		cvSave("camera_calibration_dist.txt",distor_coefs,"distor_coefs","coeficients of distortions");
		cout<<"Total Error:"<<calibration_error_result<<endl;
		cout<<"Average Calibration Error :"<<(calibration_error_result)/qu_calibr_points.size()<<endl;
	//undistortion example
		IplImage *frame_cur;
		IplImage *undistor_image;
		cvNamedWindow("cameraUndistor",CV_WINDOW_KEEPRATIO);
		CvCapture *captureCam2=cvCreateCameraCapture(0);
		bool undist_flag=false;
		while(true)
		{
			frame_cur= cvQueryFrame(captureCam2);
			undistor_image=cvCreateImage(cvSize((frame_cur->width),(frame_cur->height)),IPL_DEPTH_8U,3);
			if(undist_flag)
			{
				cvUndistort2(frame_cur,undistor_image,camera_matr,distor_coefs);
			}
			else
			{
				cvCopy(frame_cur,undistor_image);
			}
			cvShowImage("cameraUndistor",undistor_image);
			char c=cvWaitKey(33);
			if(c==27)
				break;
			if(c=='u'||c=='U')
				undist_flag=!undist_flag;

			cvReleaseImage(&undistor_image);

		}
		cvReleaseImage(&undistor_image);
		cvReleaseCapture(&captureCam2);
		cvDestroyWindow("cameraUndistor");
	}//ending undistortion_example
	
	if(show_surf_example)
	{
		//using SURF
		
		initModule_nonfree();// added at 16.04.2013
		CvCapture* capture_cam_3=cvCreateCameraCapture(0);
		cvNamedWindow("SURF from Cam",CV_WINDOW_KEEPRATIO);
		cvCreateTrackbar("Hessian Level","SURF from Cam",0,1000,onTrackbarSlide1);
		IplImage* buf_frame_3=0;
		IplImage* gray_copy=0;
		IplImage* buf_frame_3_copy=0;
	
		CvSeq *kp1,*descr1;
		CvMemStorage *storage=cvCreateMemStorage(0);
	
		CvSURFPoint *surf_pt;
		bool surf_flag=false;
		while(true)
		{
			buf_frame_3=cvQueryFrame(capture_cam_3);
		
			if(surf_flag)
			{
				surf_flag=false;
				gray_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,1);
				buf_frame_3_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,3);
			
				cvCvtColor(buf_frame_3,gray_copy,CV_RGB2GRAY);
				//cvSetImageROI(gray_copy,cvRect(280,200,40,40));
				cvExtractSURF(gray_copy,NULL,&kp1,&descr1,storage,cvSURFParams(0.0,0));
				cvReleaseImage(&gray_copy);
				re_draw=true;
			
				while(true)
				{
					if(re_draw)
					{
			
						cvCopy(buf_frame_3,buf_frame_3_copy);
						double pi=acos(-1.0);
						for(int i=0;i<kp1->total;i++)
						{
							surf_pt=(CvSURFPoint*)cvGetSeqElem(kp1,i);
							if(surf_pt->hessian<min_hessian)
								continue;
							int pt_x,pt_y;
							pt_x=(int)(surf_pt->pt.x);
							pt_y=(int)(surf_pt->pt.y);
							int sz=surf_pt->size;
							int rad_angle=(surf_pt->dir*pi)/180;
				
							cvCircle(buf_frame_3_copy,cvPoint(pt_x,pt_y),1/*sz*/,CV_RGB(0,255,0));
							cvLine(buf_frame_3_copy,cvPoint(pt_x,pt_y),cvPoint(pt_x+sz*cosl(rad_angle),pt_y-sz*sinl(rad_angle)),CV_RGB(0,0,255));
						}
						cvShowImage("SURF from Cam",buf_frame_3_copy);
					
					}
					char c=cvWaitKey(33);
					if(c==27)
					{
					
					
						break;
					}
				}
				cvReleaseImage(&buf_frame_3_copy);
			}
			
			cvShowImage("SURF from Cam",buf_frame_3);
			char ch=cvWaitKey(33);
			if(ch==27)
				break;
			if(ch==32)
				surf_flag=true;
		
		}
		if(gray_copy!=0)
			cvReleaseImage(&gray_copy);
		cvReleaseCapture(&capture_cam_3);
		cvDestroyWindow("SURF from Cam");
	}//ending SURF_example

	CvFont my_font=cvFont(1,1);
	cvInitFont(&my_font,CV_FONT_HERSHEY_SIMPLEX,1.0,1.0);

	cvNamedWindow("twoSnapshots",CV_WINDOW_KEEPRATIO);
	cvCreateTrackbar("Select LLine","twoSnapshots",0,1000,onTrackbarSlideSelectLine);
	CvCapture *capture_4 = 0;
	
	IplImage* left_img=0;
	IplImage* right_img=0;
	IplImage* cur_frame_buf=0;
	IplImage* gray_img_left=0;
	IplImage* gray_img_right=0;
	IplImage* merged_images=0;
	IplImage* merged_images_copy=0;
	CvMat *fundamentalMatrix = 0;
	vector<KeyPoint> key_points_left;
	Mat descriptors_left; 
	vector<KeyPoint> key_points_right;
	Mat descriptors_right;
	//CvMemStorage *mem_stor=cvCreateMemStorage(0);*/
	float min_hessian_value=1001.0f;

	double startValueOfFocus = 350;

	char* left_image_file_path = "camera_picture_left.png";
	char* right_image_file_path = "camera_picture_right.png";

	Array left_points, right_points;
	left_points.init(1,1);
	right_points.init(1,1);
	Array forReconstructionLeftPoints, forReconstructionRightPoints;
	forReconstructionLeftPoints.init(1,1);
	forReconstructionRightPoints.init(1,1);

	

	while(true)
	{
		char ch=cvWaitKey(33);
		if(ch==27)
			break;
		// open left and right images
		if(ch == 'o' || ch == 'O')
		{
			openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img );
			MergeTwoImages(left_img,right_img,merged_images);
		}
		// save both left and right images from camera
		if(ch == 's' || ch == 'S')
		{
			if( left_img != 0 )
				cvSaveImage(left_image_file_path, left_img);
			if( right_img != 0)
				cvSaveImage(right_image_file_path, right_img);
		}

		if(ch=='l'||ch=='L')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			
			cur_frame_buf=cvQueryFrame(capture_4);
			if(left_img==0)
				left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
			cvCopy(cur_frame_buf,left_img);

			if(right_img == 0)
			{
				right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
				cvCopy(cur_frame_buf,right_img);
			}

			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='r'||ch=='R')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			cur_frame_buf=cvQueryFrame(capture_4);
			if(right_img==0)
				right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
			cvCopy(cur_frame_buf,right_img);

			if(left_img == 0)
			{
				left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
				cvCopy(cur_frame_buf,left_img);
			}
			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='b'||ch=='B')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			cur_frame_buf=cvQueryFrame(capture_4);
			cvCopy(cur_frame_buf,left_img);
			cvCopy(cur_frame_buf,right_img);
		}
		if(ch=='q'||ch=='Q' && left_img!=0)
		{
			//proceed left
			extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left);

		}
		if(ch=='w'||ch=='W' && right_img!=0)
		{
			//proceed right
			extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right);			

		}
		if(ch=='m'||ch=='M' && left_img!=0 && right_img!=0)
		{
			//merge two images in to bigger one
			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='c'||ch=='C' && merged_images!=0)
		{
			//comparison of two images
			if(fundamentalMatrix != 0)
			{
				cvReleaseMat(& fundamentalMatrix);
				fundamentalMatrix = 0;
			}
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();
			
			GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points);
		}

		if(ch == 'E' || ch == 'e')
		{
			//drawing lines for corresponding points
			KeyPoint *leftPoint,*rightPoint,*leftPoint2,*rightPoint2;
			int width_part=merged_images->width>>1;
			/*for(int iL=0;iL<left_to_right_corresponding_points.size();iL++)
			{
				leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left,left_to_right_corresponding_points[iL].first);
				rightPoint=(CvSURFPoint*)cvGetSeqElem(key_points_right,left_to_right_corresponding_points[iL].second);
				cvLine(merged_images,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(255,0,0));
			}*/
			
			int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size();
			bool* acceptedLeftToRightCorrespondings = 0;
			getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points,
				key_points_left,
				key_points_right,
				fundamentalMatrix,
				acceptedLeftToRightCorrespondings,
				sizeOfAccepptedLeftToRightCorrespondings);

			
			while(true)
			{
				merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3);
				cvCopy(merged_images,merged_images_copy);
				int iL=selectedLeftLine;
				int iR=iL;
				if(iL>=left_to_right_corresponding_points.size())
					iL=left_to_right_corresponding_points.size()-1;
				if(iR>=right_to_left_corresponding_points.size())
					iR=right_to_left_corresponding_points.size()-1;
				char str[100]={0};
				if(iL >= 0 )
				{
					bool isLeftToRightLineIsAccepted = acceptedLeftToRightCorrespondings[iL];
				
					// difference value
					sprintf(str,"%f",left_to_right_corresponding_points[iL].comparer_value);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-40),&my_font,CV_RGB(0,255,0));
					// count of Matches
					sprintf(str,"%d",left_to_right_corresponding_points[iL].counterOfMatches);
					cvPutText(merged_images_copy,str,cvPoint(200,merged_images_copy->height-40),&my_font,CV_RGB(255,255,0));
					// median of compared values
					sprintf(str,"%lf",left_to_right_corresponding_points[iL].medianOfComparedMatches);
					cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0));

					// Variance of compared values
					sprintf(str,"V=%lf",left_to_right_corresponding_points[iL].Variance());
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0));

					// Standard deviation of compared values
					sprintf(str,"SD=%lf",sqrt( left_to_right_corresponding_points[iL].Variance() ));
					cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0));

					double SD = sqrt( left_to_right_corresponding_points[iL].Variance() ) ;
					double median = left_to_right_corresponding_points[iL].medianOfComparedMatches;
					double compValue = left_to_right_corresponding_points[iL].comparer_value;
					double mark_1_5 = median - 1.5 * SD - compValue;

					// Mark 1.5
					sprintf(str,"m1.5=%lf", mark_1_5);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-120),&my_font,CV_RGB(0,255,0));

					sprintf(str,"angle=%lf", left_to_right_corresponding_points[iL].degreesBetweenDeltaVector);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-150),&my_font,CV_RGB(0,255,0));

					

					leftPoint= &(key_points_left[ left_to_right_corresponding_points[iL].comp_pair.first ]);
					rightPoint=&(key_points_right[ left_to_right_corresponding_points[iL].comp_pair.second ]);
				
					cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(0,255,0));

					drawEpipolarLinesOnLeftAndRightImages(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y),
						cvPoint(rightPoint->pt.x,rightPoint->pt.y), fundamentalMatrix);

					CvScalar color = CV_RGB(255, 0, 0);
					if(isLeftToRightLineIsAccepted)
					{
						color = CV_RGB(0,255,0);
					}

					cvCircle(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y), 5, color);
					cvCircle(merged_images_copy, cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y), 5, color);
				}
				//cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x,rightPoint->pt.y),CV_RGB(255,0,255));
				if(iR >= 0 )
				{
					sprintf(str,"%f",right_to_left_corresponding_points[iR].comparer_value);
					cvPutText(merged_images_copy,str,cvPoint(width_part,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0));
					rightPoint2= &(key_points_right [right_to_left_corresponding_points[iR].comp_pair.first]);
					leftPoint2= &(key_points_left [right_to_left_corresponding_points[iR].comp_pair.second]);
					cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,0));
				}
				//cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x+width_part,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,255));
				
				cvShowImage("twoSnapshots",merged_images_copy);
				cvReleaseImage(&merged_images_copy);
				char ch2=cvWaitKey(33);
				if(ch2==27)
					break;
				if(ch2=='z' && selectedLeftLine>0)
				{
					selectedLeftLine--;
				}
				if(ch2=='x' && selectedLeftLine<1000)
				{
					selectedLeftLine++;
				}
				if( ch2 == 'a' || ch2 == 'A')
				{
					acceptedLeftToRightCorrespondings[selectedLeftLine] = true;
				}
				if( ch2 == 'd' || ch2 == 'D')
				{
					acceptedLeftToRightCorrespondings[selectedLeftLine] = false;
				}
			}//end of while(true)

			SaveAcceptedCorresspondings(
					left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings
					);
			ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings,
					left_points,
					right_points
					);


			delete[] acceptedLeftToRightCorrespondings;
		}
		if( ch == 'T' || ch == 't')
		{
			clock_t startTime = clock();

			openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img );
			// proceed left
			extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left);
			//proceed right
			extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right);	
			//comparison of two images
			if(fundamentalMatrix != 0)
			{
				cvReleaseMat(& fundamentalMatrix);
				fundamentalMatrix = 0;
			}
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();
			
			GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points);

			// searching fundamental matrix and corresponding points
			findFundamentalMatrixAndCorrespondingPointsForReconstruction(
				left_to_right_corresponding_points,
				right_to_left_corresponding_points,
				fundamentalMatrix,
				key_points_left,
				key_points_right,
				descriptors_left,
				descriptors_right,
				left_img,
				right_img,
				gray_img_left,
				gray_img_right,
				forReconstructionLeftPoints,
				forReconstructionRightPoints,
				min_hessian_value, 450);
			// selecting points for finding model parameters

			int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size();
			bool* acceptedLeftToRightCorrespondings = 0;
			getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points,
				key_points_left,
				key_points_right,
				fundamentalMatrix,
				acceptedLeftToRightCorrespondings,
				sizeOfAccepptedLeftToRightCorrespondings);

			ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings,
					left_points,
					right_points
					);

			delete[] acceptedLeftToRightCorrespondings;

			// start process of determination parameters of model and reconstruction of scene
			cv::Mat mat_left_img(left_img, true);
			cv::Mat mat_right_img(right_img, true);
			mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, 
				mat_left_img, mat_right_img,
				forReconstructionLeftPoints, forReconstructionRightPoints);
			mat_left_img.release();
			mat_right_img.release();


			cout << "Code execution time: "<< double( clock() - startTime ) / (double)CLOCKS_PER_SEC<< " seconds." << endl;
		}
		if( ch == 'I' || ch == 'i')
		{	

			//-- Step 3: Matching descriptor vectors using FLANN matcher
			FlannBasedMatcher matcher;
			std::vector< DMatch > matches;
			matcher.match( descriptors_left, descriptors_right, matches );

			//double max_dist = 0; double min_dist = 100;

			////-- Quick calculation of max and min distances between keypoints
			//for( int i = 0; i < descriptors_left.rows; i++ )
			//{ double dist = matches[i].distance;
			//	if( dist < min_dist ) min_dist = dist;
			//	if( dist > max_dist ) max_dist = dist;
			//}

			//printf("-- Max dist : %f \n", max_dist );
			//printf("-- Min dist : %f \n", min_dist );

			//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
			//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
			//-- small)
			//-- PS.- radiusMatch can also be used here.
			//std::vector< DMatch > good_matches;
			
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();

			for( int i = 0; i < descriptors_left.rows; i++ )
			{ 
				//if( matches[i].distance <= max(2*min_dist, 0.02) )
				{
					//good_matches.push_back( matches[i]); 
					left_to_right_corresponding_points.push_back( ComparedIndexes(matches[i].distance, pair<int, int> (i, matches[i].trainIdx)) );
				}
			}
			
			cout<< "Count of good matches :" << left_to_right_corresponding_points.size() << endl;

			stable_sort(left_to_right_corresponding_points.begin(),left_to_right_corresponding_points.end(),my_comparator_for_stable_sort);
		}

		//if( ch == 'K' || ch == 'k')
		//{
		//	CvSURFPoint *leftPoint;
		//	//proceed left
		//	gray_img_left=cvCreateImage(cvSize((left_img->width),(left_img->height)),IPL_DEPTH_8U,1);
		//	cvCvtColor(left_img,gray_img_left,CV_RGB2GRAY);
		//	cvExtractSURF(gray_img_left,NULL,&key_points_left,&descriptors_left,mem_stor,cvSURFParams(min_hessian_value,0));

		//	cv::Mat mat_gray_leftImage(gray_img_left, true);
		//	cvReleaseImage(&gray_img_left);
		//	// proceed right
		//	gray_img_right=cvCreateImage(cvSize((right_img->width),(right_img->height)),IPL_DEPTH_8U,1);
		//	cvCvtColor(right_img,gray_img_right,CV_RGB2GRAY);
		//	cv::Mat mat_gray_rightImage(gray_img_right, true);
		//	cvReleaseImage(&gray_img_right);
		//	vector<Point2f> LK_left_points;
		//	vector<Point2f> LK_right_points;

		//	LK_right_points.resize(key_points_left->total);

		//	for( int i = 0; i < key_points_left->total; i++)
		//	{
		//		leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left, i);
		//		LK_left_points.push_back(Point2f( leftPoint->pt.x, leftPoint->pt.y));
		//	}
		//	
		//	vector<uchar> status;
  //          vector<float> err;

		//	cv::calcOpticalFlowPyrLK(
		//		mat_gray_leftImage,
		//		mat_gray_rightImage, 
		//		LK_left_points,
		//		LK_right_points, 
		//		status,
		//		err);
		//	int width_part=merged_images->width>>1;
		//	
		//	float minErr = err[0];

		//	for(int k = 0; k < err.size(); k++)
		//	{
		//		if(status[k] && err[k] < minErr) 
		//		{
		//			minErr = err[k];
		//		}
		//	}

		//	cout<< "Lucass Kanade min error: " << minErr<< endl;

		//	int i = 0;
		//	merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3);
		//	cvCopy(merged_images,merged_images_copy);
		//	for(; i < LK_left_points.size(); ++i)
		//	{
		//		if(err[i] < 5 * minErr && status[i])
		//		{
		//			cvLine(merged_images_copy,cvPoint(LK_left_points[i].x,LK_left_points[i].y),cvPoint(LK_right_points[i].x+width_part,LK_right_points[i].y),
		//					CV_RGB(100 + (( i *3) % 155), 100+ ((i*7)%155), 100+ ((i*13)%155)));
		//		}
		//	}

		//	cvShowImage("twoSnapshots",merged_images_copy);
		//		
		//	while(true)
		//	{

		//		char ch2=cvWaitKey(33);
		//		if(ch2==27)
		//			break;
		//		
		//	}
		//	
		//	cvReleaseImage(&merged_images_copy);

		//	status.clear();
		//	err.clear();
		//	LK_left_points.clear();
		//	LK_right_points.clear();
		//	mat_gray_leftImage.release();
		//	mat_gray_rightImage.release();
		//}

		if( ch == 'F' || ch == 'f')
		{
			findFundamentalMatrixAndCorrespondingPointsForReconstruction(
				left_to_right_corresponding_points,
				right_to_left_corresponding_points,
				fundamentalMatrix,
				key_points_left,
				key_points_right,
				descriptors_left,
				descriptors_right,
				left_img,
				right_img,
				gray_img_left,
				gray_img_right,
				forReconstructionLeftPoints,
				forReconstructionRightPoints,
				min_hessian_value);


		}
		if( ch == 'P' || ch == 'p')
		{
			cv::Mat mat_left_img(left_img, true);
			cv::Mat mat_right_img(right_img, true);
			mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, 
				mat_left_img, mat_right_img,
				forReconstructionLeftPoints, forReconstructionRightPoints);
			mat_left_img.release();
			mat_right_img.release();
		}
		if(merged_images!=0)
		{
			cvShowImage("twoSnapshots",merged_images);
		}
		
	}
void main(int argc, char** argv)
{
	cvNamedWindow("src",0 );
	cvNamedWindow("warp image",0 );
	cvNamedWindow("warp image (grey)",0 );
	cvNamedWindow("Smoothed warped gray",0 );
	cvNamedWindow("threshold image",0 );
	cvNamedWindow("canny",0 );
	cvNamedWindow("final",1 );
		
	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
	float Z=1;

	dstQuad[0].x = 216; //src Top left
	dstQuad[0].y = 15;
	dstQuad[1].x = 392; //src Top right
	dstQuad[1].y = 6;
	dstQuad[2].x = 12; //src Bottom left
	dstQuad[2].y = 187;
	dstQuad[3].x = 620; //src Bot right
	dstQuad[3].y = 159;

	srcQuad[0].x = 100; //dst Top left
	srcQuad[0].y = 120;
	srcQuad[1].x = 540; //dst Top right
	srcQuad[1].y = 120;
	srcQuad[2].x = 100; //dst Bottom left
	srcQuad[2].y = 360;
	srcQuad[3].x = 540; //dst Bot right
	srcQuad[3].y = 360;

	cvGetPerspectiveTransform(srcQuad, dstQuad,	warp_matrix);
	
	//CvCapture *capture = cvCaptureFromCAM(0);
	/*double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
	IplImage* image = cvRetrieveFrame(capture);
	CvSize imgSize;
    imgSize.width = image->width;
    imgSize.height = image->height;
	CvVideoWriter *writer = cvCreateVideoWriter("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, imgSize);*/
	int ik=0;
	while(1)
	{
		//IplImage* img = cvQueryFrame(capture);
		IplImage* img = cvLoadImage( "../../Data/6 Dec/009.jpg", CV_LOAD_IMAGE_COLOR);
		cvShowImage( "src", img );
		//cvWriteFrame(writer, img);
		//cvSaveImage(nameGen(ik++), img, 0);
		
		IplImage* warp_img = cvCloneImage(img);
		CV_MAT_ELEM(*warp_matrix, float, 2, 2) = Z;
		cvWarpPerspective(img, warp_img, warp_matrix, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		cvShowImage( "warp image", warp_img );

		IplImage* grayimg = cvCreateImage(cvGetSize(warp_img),IPL_DEPTH_8U,1);
		cvCvtColor( warp_img, grayimg, CV_RGB2GRAY );
		cvShowImage( "warp image (grey)", grayimg );
		
		cvSmooth(grayimg, grayimg, CV_GAUSSIAN, 3, 3, 0.0, 0.0);
		cvShowImage( "Smoothed warped gray", grayimg );
		
		IplImage* thresholded_img=simplethreshold(grayimg, 220);
		cvShowImage("threshold image",thresholded_img);

		//grayimg = doCanny( thresholded_img, 50, 100, 3 );
		grayimg = cvCloneImage(thresholded_img);
		cvShowImage("canny",grayimg);

		IplImage* finalimg = cvCreateImage(cvGetSize(grayimg),IPL_DEPTH_8U,3);
		CvMemStorage* line_storage=cvCreateMemStorage(0);

		CvSeq* results =  cvHoughLines2(grayimg,line_storage,CV_HOUGH_PROBABILISTIC,10,CV_PI/180*5,350,100,10);
		double angle = 0.0, temp;
		double lengthSqd, wSum=0;
		double xc = 0, yc = 0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			cvLine( finalimg, line[0], line[1], CV_RGB(0,0,255), 1, CV_AA, 0 );
			//lengthSqd = (line[0].x - line[1].x)*(line[0].x - line[1].x) + (line[0].y - line[1].y)*(line[0].y - line[1].y);
			wSum += 1;//lengthSqd;
			if(line[0].y > line[1].y)
				temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
			else
				temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
			if(temp < 0)
				angle += (90 + 180/3.14*temp)/* * lengthSqd*/;
			else
				angle += (180/3.14*temp - 90)/* * lengthSqd*/;
			xc += line[0].x + line[1].x;
			yc += line[0].y + line[1].y;
		}
		angle=angle/wSum;
		//angle+=10;
		printf("total: %d, angle: % f\n", results->total, angle);

		xc /= 2*results->total;
		yc /= 2*results->total;
		double m = (angle != 0) ? 1/tan(angle*3.14/180) : 100;	// 100 represents a very large slope (near vertical)
		m=-m;

		double x1, y1, x2, y2;	// The Center Line
		y1 = 0;
		y2 = finalimg->height;
		x1 = xc + (y1-yc)/m;
		x2 = xc + (y2-yc)/m; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(0,255,0), 1, CV_AA, 0);
		printf("point: %f\t%f\n", xc, yc);

		double lx=0, ly=0, lm=0, lc=0, rx=0, ry=0, rm=0, rc=0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			double xm = (line[0].x + line[1].x)/2.0, ym = (line[0].y + line[1].y)/2.0;
			if(ym - yc - m*(xm - xc) > 0)
			{
				lx += xm;
				ly += ym;
				lm += (line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				lc++;
			}
			else
			{
				rx += xm;
				ry += ym;
				rm += (line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				rc++;
			}
		}

		// The Left Line
		lx /= lc;	ly /= lc;	lm /= lc;
		rx /= rc;	ry /= rc;	rm /= rc;
		printf("lins: %f\t%f\t%f\n", lx, ly, lm);
		printf("lins: %f\t%f\t%f\n", rx, ry, rm);
		y1 = 0;
		y2 = finalimg->height-5;
		x1 = lx + (y1-ly)/lm;
		x2 = lx + (y2-ly)/lm; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(255,255,0), 1, CV_AA, 0);

		// The Right Line
		y1 = 0;
		y2 = finalimg->height-5;
		x1 = rx + (y1-ry)/rm;
		x2 = rx + (y2-ry)/rm; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(0,255,255), 1, CV_AA, 0);

		// The Center Point
		CvPoint vpt = cvPoint(finalimg->width/2, 416);
		printf("center point: %d\t%d\n", vpt.x, vpt.y);
		
		// The Dl and Dr
		int dl = vpt.x - lx + (ly-vpt.y+0.0)/lm;
		int dr = (vpt.y-ry+0.0)/rm + rx - vpt.x;
		printf("dl-dr: %d\n", dl-dr);

		cvShowImage("final",finalimg);

		if(dl-dr < SAFEZONE_LL)	// Assume that the bot lies just on the boundary of the safe zone
		{
			if(angle < -10)
			{
				navCommand(7, angle);
			}
			else
			{
				navCommand(7, angle);
			}
		}	
		else if(dl-dr > SAFEZONE_RL)
		{
			if(angle > 10)
			{
				navCommand(-7, angle);
			}
			else
			{
				navCommand(-7, angle);
			}
		}
		else
		{
			if((angle < 10) && (angle > -10))
			{
				navCommand(angle, angle);
			}
			else
			{
				navCommand(0, angle);
			}
		}

		cvWaitKey(0);
	}
}
int bw_detect_blobs(Tracker *tracker, struct StaticData *data)
{

    /* circular kernel for dilation */
    IplConvKernel *kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_ELLIPSE);

    /* temporary image to hold thresholded camera frame */
    IplImage *thresh = cvCreateImage(cvGetSize(tracker->frame),IPL_DEPTH_8U,1);

    /* variables for contour finding */
    CvMemStorage *mem = cvCreateMemStorage(0);
    CvSeq *contour;
    CvMoments moments;
    int it;


    /**
     * preprocessing 
    **/
    /* threshold image, reasonably stable since frame is highly underexposed and LEDs are very bright */
    cvThreshold(tracker->frame,thresh,180,255,CV_THRESH_BINARY);

    /* Dilate image to increase size of responses from thresholding, gives more stable result in contour finding*/
    cvDilate(thresh,thresh,kernel,2);


//  cvShowImage("thresh",thresh);


    /**
     * blob extraction (connected component finding)
    **/
    /* find contours in image, should give one contour for each markers */
    int nc = cvFindContours(thresh,mem,&contour,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);

//    printf("nc = %d\n",nc);

    it = 0;
    /* if NUM_OF_MARKERS contours detected, compute mean position of each contour */
    if(nc==data->NUM_OF_MARKERS)
    {
        if(contour)
        {
//            cvDrawContours(thresh,contour,cvScalarAll(255),cvScalarAll(0),100);
            CvSeq *c;
            for(c=contour; c!=NULL; c=c->h_next)
            {
                /* compute moments for each contour */
                cvContourMoments(c,&moments);
                /* make sure the contour encloses some area */
                if(moments.m00>0.0)
                {
                    /* compute center of mass -> mean blob position */
                    /* even though the computed position is stored in the marker structs, it doesn't neccessarily correspond to that specific marker */
                    tracker->marker[it]->blob_pos.x = moments.m10/moments.m00;
                    tracker->marker[it]->blob_pos.y = moments.m01/moments.m00;
//                    printf("(%f %f)\n",tracker->marker[it]->blob_pos.x,tracker->marker[it]->blob_pos.y);
                }
                else
                {
                    /* for stable marker recognition all markers must have been detected */
                    tracker->state = OFF_TRACK;
                    break;
                }
                it++;

            }
        }
    }
    else
    {
        tracker->state = OFF_TRACK;
        for(int nm=0; nm<data->NUM_OF_MARKERS; ++nm)
        {
            tracker->marker[nm]->pos_is_set = 0;
            tracker->marker[nm]->blob_pos.x = 0;
            tracker->marker[nm]->blob_pos.y = 0;
        } 
    }

    /* clean up memory */
    cvReleaseMemStorage(&mem);
    cvReleaseImage(&thresh);


    return nc;
}
Example #4
0
int main (int argc, char **argv)
{

	CvCapture *capture = 0;
	IplImage *frame, *frame_copy = 0;
	cascade = (CvHaarClassifierCascade *) cvLoad ("yolo.xml", 0, 0, 0);

	if (!cascade)
	{
		printf ("ERROR: Could not load classifier cascade\n");
		return -1;
	}

	storage = cvCreateMemStorage (0);

	capture = cvCaptureFromCAM (0);

	if (capture){
		int j = 0;

		for (;;){

			FILE *fin;
			int i = 0;
			flag = 0, f = 0;

			if(!cvGrabFrame (capture)){
				break;
			}

			frame = cvRetrieveFrame (capture);

			if (!frame){
				break;
			}

			if (!frame_copy){
				frame_copy = cvCreateImage(
					cvSize (frame->width, frame->height),
					IPL_DEPTH_8U, frame->nChannels);
			}

			system ("ps -e | grep totem > sample.txt");

			fin = fopen ("sample.txt", "r");

			fflush (fin);

			while (!feof (fin)){
				char a[40];
				fscanf (fin, "%s\n", a);
				if (a[i] == 't' && a[i + 1] == 'o' && a[i + 2] == 't'
				&& a[i + 3] == 'e' && a[i + 4] == 'm'){
					f = 1;
					break;
				}
      			else{
					f = 0;
				}
    		}

			fclose (fin);

			if (frame->origin == IPL_ORIGIN_TL){
				cvCopy (frame, frame_copy, 0);
			}
			else{
				cvFlip (frame, frame_copy, 0);
			}

  			flag = detect_and_draw (frame_copy);

			if (f == 0)
			{
				printf("no totem playing\n
						please switch off the application from the command centre\n
						or open a video file\n");
				sleep (5);
			}
			else if (flag == 0 && f == 1 && played == 1)
			{
				system ("totem --pause");
				played = 0;
			}
			else if (flag == 1 && f == 1 && played == 0)
			{
				system ("totem --play");
				played = 1;
			}

			if (cvWaitKey (10) >= 0)
				break;

		}
Example #5
0
int AdaBoost::read_num_class_data(const char* filename, int var_count, CvMat** data, CvMat** responses)
{
	const int M = 1024;
	FILE* f = fopen(filename, "rt");
	CvMemStorage* storage;
	CvSeq* seq;
	char buf[M + 2];
	float* el_ptr;
	CvSeqReader reader;
	int i=0, j=0;

	if(!f)
		return 0;

	el_ptr = new float[var_count + 1];
	storage = cvCreateMemStorage();
	seq	= cvCreateSeq(0, sizeof(*seq), (var_count + 1) * sizeof(float),	storage);

	for(;;)
	{
		char* ptr;

		if(!fgets(buf, M, f) || !strchr(buf, ','))
			break;

		el_ptr[0] = buf[0];
		ptr = buf + 2;

		for(i = 1; i <= var_count; i++)
		{
			int n = 0;
			sscanf(ptr, "%f%n", el_ptr + i, &n);
			ptr += n + 1;
		}

		if (i <= var_count)
			break;

		cvSeqPush(seq, el_ptr);
	}
	fclose(f);

	*data = cvCreateMat(seq->total, var_count, CV_32F);
	*responses = cvCreateMat(seq->total, 1, CV_32F);

	cvStartReadSeq(seq, &reader);

	for (i = 0; i < seq->total; i++)
	{
		const float* sdata = (float*) reader.ptr + 1;
		float* ddata = data[0]->data.fl + var_count * i;
		float* dr = responses[0]->data.fl + i;

		for (j = 0; j < var_count; j++)
			ddata[j] = sdata[j];

		*dr = sdata[-1];
		CV_NEXT_SEQ_ELEM(seq->elem_size, reader);
	}

	cvReleaseMemStorage(&storage);
	delete el_ptr;
	return 1;
}
Example #6
0
//--------------------------------------------------------------
void testApp::setup(){

	// CAPTURE RESOLUTION
	cw = 720;
	ch = 576;
	
	
	
	sb = 15.0;
	br = 3.0;
	
	// START VALUES
	medianValue		    =  1;
	lineThreshValue     = 50;
	lineMinLengthValue  = 24;
	lineMaxGapValue     =  4;
	cannyThresh1Value   =  5;
	cannyThresh2Value   = 20;
	cannyApertureValue  =  3;
	adaptiveThreshValue = 25;
	approxValue			= 10;
	contourSmoothValue  =  1;
	
	fillsAlphaValue		= 0x20;
	fillsApproxValue    = 10;
	contourAlphaValue	= 0x40;
	approxAlphaValue	= 0x40;
	clearBGAlphaValue   = 0x20;
	
	doFillsApproxValue	= false;

	
	#ifdef _USE_LIVE_VIDEO
        vidGrabber.setVerbose(true);
//		vidGrabber.setDeviceID(3);
        vidGrabber.initGrabber(cw,ch);
	#else
        vidPlayer.loadMovie("fingers.mov");
        vidPlayer.play();
	#endif

    colorImg  .allocate(cw, ch);
	grayImage .allocate(cw, ch);
	hsvImage  .allocate(cw, ch);
	satImage  .allocate(cw, ch);
	trsImage  .allocate(cw, ch);
	cannyImage.allocate(cw, ch);
	medianImg .allocate(cw, ch);

	bLearnBakground = true;
	threshold = 80;
	
	linesStorage = cvCreateMemStorage(0);
	fillsStorage = cvCreateMemStorage(0);
	edgesStorage = cvCreateMemStorage(0);
	approxStorage = cvCreateMemStorage(0);
	
	mode = MODE_PROCESS;
	draw_contours = false;
	draw_approx   = false;
	draw_edges	  = false;
	draw_fills	  = true;
	erase_bg	  = true;
	
	edgeContours = NULL;
	fillContours = NULL;
	lines	     = NULL;
	
	
	
	paperTexture.loadImage("paper6.jpg");
	
	ofSetBackgroundAuto(erase_bg);
	
}
Example #7
0
int main( int argc, char** argv )
{
	

    contadorBlue = 0;
    contadorGreen = 0;
    contadorRed = 0;

    CvCapture *capture = NULL;
    IplImage  *frame = NULL;
    IplImage  *result = NULL;
    int       key;
    char      *filename = (char*)"aGest.xml";


    /* load the classifier
       note that I put the file in the same directory with
       this code */
    cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );

    /* setup memory buffer; needed by the face detector */
    storage = cvCreateMemStorage( 0 );

    /* initialize camera */
    capture = cvCaptureFromCAM( 0 );

    /* always check */
    assert( cascade && storage && capture );
    
    /* open and rezise images to be overlayed */
    IplImage *drumblue = cvLoadImage("./Drums/DrumBlue.png");
    IplImage *drumgreen = cvLoadImage("./Drums/DrumGreen.png");
    IplImage *drumred = cvLoadImage("./Drums/DrumRed.png");
    IplImage *lineblue = cvLoadImage("./Drums/BlueLine.png");
    IplImage *linegreen = cvLoadImage("./Drums/GreenLine.png");
    IplImage *linered = cvLoadImage("./Drums/RedLine.png");
    IplImage *step1 = cvLoadImage("./Drums/Step.png");
    IplImage *step2 = cvLoadImage("./Drums/Step2.png");
    IplImage *arrow1 = cvLoadImage("./Drums/Arrow1.png");
    IplImage *arrow2 = cvLoadImage("./Drums/Arrow2.png");
    IplImage *bien = cvLoadImage("./Drums/Bien.png");
    IplImage *buu = cvLoadImage("./Drums/Buu.png");


    IplImage *rdrumblue = cvCreateImage(cvSize(110,95),drumblue->depth, drumblue->nChannels);
    IplImage *rdrumgreen = cvCreateImage(cvSize(110,95),drumgreen->depth, drumgreen->nChannels);
    IplImage *rdrumred = cvCreateImage(cvSize(110,95),drumred->depth, drumred->nChannels);
    IplImage *rdrumblue2 = cvCreateImage(cvSize(110,95),drumblue->depth, drumblue->nChannels);
    IplImage *rdrumgreen2 = cvCreateImage(cvSize(110,95),drumgreen->depth, drumgreen->nChannels);
    IplImage *rdrumred2 = cvCreateImage(cvSize(110,95),drumred->depth, drumred->nChannels);
    IplImage *rlineblue = cvCreateImage(cvSize(230,80),lineblue->depth, lineblue->nChannels);
    IplImage *rlinegreen = cvCreateImage(cvSize(230,80),linegreen->depth, linegreen->nChannels);
    IplImage *rlinered = cvCreateImage(cvSize(230,80),linered->depth, linered->nChannels);
    IplImage *rlineblue2 = cvCreateImage(cvSize(230,80),lineblue->depth, lineblue->nChannels);
    IplImage *rlinegreen2 = cvCreateImage(cvSize(230,80),linegreen->depth, linegreen->nChannels);
    IplImage *rlinered2 = cvCreateImage(cvSize(230,80),linered->depth, linered->nChannels);
    IplImage *rstep1 = cvCreateImage(cvSize(100,100),step1->depth, step1->nChannels);
    IplImage *rstep2 = cvCreateImage(cvSize(100,100),step2->depth, step2->nChannels);
    IplImage *rarrow1 = cvCreateImage(cvSize(110,70),arrow1->depth, arrow1->nChannels);
    IplImage *rarrow2 = cvCreateImage(cvSize(110,70),arrow2->depth, arrow2->nChannels);
    IplImage *rbien = cvCreateImage(cvSize(60,25),bien->depth, bien->nChannels);
    IplImage *rbuu = cvCreateImage(cvSize(60,25),buu->depth, buu->nChannels);
    

    cvResize(drumblue, rdrumblue);
    cvResize(drumgreen, rdrumgreen);
    cvResize(drumred, rdrumred);
    cvResize(drumblue, rdrumblue2);
    cvResize(drumgreen, rdrumgreen2);
    cvResize(drumred, rdrumred2);
    cvResize(lineblue, rlineblue);
    cvResize(linegreen, rlinegreen);
    cvResize(linered, rlinered);
    cvResize(lineblue, rlineblue2);
    cvResize(linegreen, rlinegreen2);
    cvResize(linered, rlinered2);
    cvResize(step1, rstep1);
    cvResize(step2, rstep2);
    cvResize(arrow1, rarrow1);
    cvResize(arrow2, rarrow2);
    cvResize(bien, rbien);
    cvResize(buu, rbuu);

    cvFlip(rdrumblue2, rdrumblue2,1);
    cvFlip(rdrumgreen2, rdrumgreen2,1);
    cvFlip(rdrumred2, rdrumred2,1);
    cvFlip(rlineblue2, rlineblue2,1);
    cvFlip(rlinegreen2, rlinegreen2,1);
    cvFlip(rlinered2, rlinered2,1);

    /* release memory */
    cvReleaseImage( &drumblue);
    cvReleaseImage( &drumgreen);
    cvReleaseImage( &drumred);
    cvReleaseImage( &lineblue);
    cvReleaseImage( &linegreen);
    cvReleaseImage( &linered );
    cvReleaseImage( &step1 );
    cvReleaseImage( &step2 );
    cvReleaseImage( &arrow1 );
    cvReleaseImage( &arrow2 );
    cvReleaseImage( &bien);
    cvReleaseImage( &buu);

 
    /* create a window */
    cvNamedWindow( "video", 1 );
    
    /* set time and frame variables*/
    initGame = clock ();
    frameN = 0;

    /* set scores*/
    score1 = 0;
    score2 = 0;
    redb = false;
    greenb = false;
    blueb = false;
    redb2 = false;
    greenb2 = false;
    blueb2 = false;
    bienn =0;
    maln =0;

std::list<int> lista;
lista.push_front(1);
lista.push_front(2);
lista.push_front(3);
lista.push_front(4);
lista.push_front(5); 


    while( key != 'q' ) {

        /* get a frame */
        //frame: 640,480
        frame = cvQueryFrame( capture );

        /* always check */
        if( !frame ) break;

        /* clone and 'fix' frame */
        cvFlip( frame, frame, 1 );

	GenerateScoreMessage(frame,score1,score2);	
		
        /* detect Hands and draw boxes */
        detectHands( frame, rlineblue2, rlinegreen2, rlinered2, false );
	detectHands( frame, rlineblue, rlinegreen, rlinered, true);

  
	/* overlay the game play buttons */
	
	cvLine(frame, cvPoint(320,0), cvPoint(320,480), cvScalar(255,255,0), 2);
	
        OverlayImage(frame,rdrumblue,cvPoint(0,240),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rdrumgreen,cvPoint(0,315),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rdrumred,cvPoint(0,390),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

        OverlayImage(frame,rdrumblue2,cvPoint(530, 15),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rdrumgreen2,cvPoint(530,90),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rdrumred2,cvPoint(530,165),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

 	OverlayImage(frame,rarrow1,cvPoint(0, 23),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
	OverlayImage(frame,rarrow1,cvPoint(0,98),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
	OverlayImage(frame,rarrow1,cvPoint(0,173),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
	
	OverlayImage(frame,rarrow2,cvPoint(530,248),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rarrow2,cvPoint(530,323),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rarrow2,cvPoint(530,398),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

	drawAndAdvance(frame,rbien, rbuu, rstep1, rstep2 );

//        OverlayImage(frame,rstep1,cvPoint(200,330),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
//        OverlayImage(frame,rstep2,cvPoint(400,330),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

//        OverlayImage(frame,rbien,cvPoint(200,200),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
//        OverlayImage(frame,rbuu,cvPoint(400,200),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

	
       /* display video */
        cvShowImage( "video", frame );

        /* quit if user press 'q' */
        key = cvWaitKey( 10 );
	
	frameN++;
    }
    
	
    /* free memory */
    cvReleaseCapture( &capture );
    cvDestroyWindow( "video" );
    cvReleaseHaarClassifierCascade( &cascade );
    cvReleaseMemStorage( &storage );
    return 0;

}
Example #8
0
//////////////////////
//
//   사각형 검출
//
//////////////////////
void ColorTracking::draw_square(IplImage* image)
{
	CvMemStorage* storage2 = NULL;			// 메모리 할당
	CvMemStorage* storage3 = NULL;			// 메모리 할당
	storage2 = cvCreateMemStorage(0);		//사각형 검출을 위한 윤곽선 추출 메모리
	storage3 = cvCreateMemStorage(0);		//검출된 사각형을 위한 메모리

	CvSeq* contours ;						//윤곽선						
	CvSeq* result ;							//윤곽선 중에 사각형이 될만한 윤곽선 저장
	
	CvPoint corner[4];						//사각형이 될 네 점

	rects = NULL;							//사각형이 될 네 점으로 만들 사각형 구조체 - 데이터 내보내기 위해 사용

	rects = new CvRect [100];				//사각형의 갯수를 미리 알 수없기에 제한을 둠
		
	//윤곽선추출
	cvFindContours(image, storage2, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);

	//외곽선 검출 정보로 사각형 잇기
	for(;contours !=0; contours = contours ->h_next)
	{
		//꼭지점 근사화
		//cvApproxPoly 외곽선정보, 크기, 메모리, 메소드, 정밀도, 0이면 해당 외곽선만 검사
		result = cvApproxPoly( contours, sizeof(CvContour), storage3, 
			CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );	

		//꼭지점이 4개, 외곽선픽셀수가 1000개 이상 일때 사각형으로 간주
		//컨백스 헐은 일단제외 cvCheckContourConvexity(result)
		if( result->total == 4 &&
					cvContourArea(result,CV_WHOLE_SEQ,0) > 500)
		{

			//초기 위치 설정
			CvPoint* st = (CvPoint*)cvGetSeqElem(result, 0);

			///////첫번째 꼭짓점 추출 - 임의의 점에서 가장 먼 점
			double fMaxDist = 0.0;

			for(int i = 1; i < result->total; i++)
			{
				CvPoint* pt = (CvPoint *)cvGetSeqElem(result, i);

				double fDist = sqrt((double)( ( st->x - pt->x) * (st->x - pt->x)
					+ (st->y - pt->y) * (st->y - pt->y) ));

				if(fDist > fMaxDist)
				{
					corner[0] = *pt;
					fMaxDist = fDist;
				}
			}

			///////두번째 꼭짓점 추출 - 첫번째 점에서 가장 먼 점
			fMaxDist = 0.0;

			for(int i = 1; i < result->total; i++)
			{
				CvPoint* pt = (CvPoint *)cvGetSeqElem(result, i);

				double fDist = sqrt((double)( ( corner[0].x - pt->x) * (corner[0].x - pt->x)
					+ (corner[0].y - pt->y) * (corner[0].y - pt->y) ));

				if(fDist > fMaxDist)
				{
					corner[1] = *pt;
					fMaxDist = fDist;
				}
			}

			////////세번째 꼭짓점 추출 - 첫번째 점과 두번째 점에서 가장 먼 점
			fMaxDist = 0.0;
			
			for(int i = 1; i < result->total; i++)
			{
				
				CvPoint* pt = (CvPoint *)cvGetSeqElem(result, i);

				int tempx;
				int tempy;

				//첫번째 점과 두번째 사이의 중간 점 찾기 
				//좌표는 음수가 있을 수가 있어서 큰 숫자 판별후에 계산 함
				
				//x좌표
				if(corner[0].x >= corner[1].x)
				{
					
					tempx = corner[0].x - (corner[0].x - corner[1].x) / 2;
				
				}
				else if(corner[0].x < corner[1].x)
				{
					
					tempx = corner[0].x + (corner[1].x - corner[0].x) / 2;
				
				}

				//y좌표
				if(corner[0].y >= corner[1].y)
				{

					tempy = corner[0].y - (corner[0].y - corner[1].y) / 2;

				}
				else if(corner[0].y < corner[1].y)
				{

					tempy = corner[0].y - (corner[1].y - corner[0].y) / 2;
		
				}

				//구한 중간점에서 가장 먼 점 찾기
				double fDist = sqrt((double)( ( tempx - pt->x) * (tempx - pt->x)
					+ (tempy - pt->y) * (tempy - pt->y) ));

				if(fDist > fMaxDist)
				{
					corner[2] = *pt;
					fMaxDist = fDist;

				}
			}

			//////네번째 꼭짓점 추출
			
			//벡터 내적을 이용하여 좌표평면에서 사각형의 너비의 최대값을 구하는 계산식 사용
			//이미 구한 세 개의 꼭지점과 나머지 한점, 즉 윤곽선 상의 모든 점(x,y)을 세 개의
			//삼각형으로나눈 뒤 그 넓이의 합이 최대가 되는 점을 네 번째 꼭지점으로 삼음.

			int x1 = corner[0].x;
			int y1 = corner[0].y;

			int x2 = corner[1].x;
			int y2 = corner[1].y;
			
			int x3 = corner[2].x;
			int y3 = corner[2].y;

			int nMaxDim = 0;

			for(int j = 0; j < result->total; j++ )
			{
				CvPoint* pt = (CvPoint *)cvGetSeqElem(result, j);

				int x = pt->x;
				int y = pt->y;

				int nDim = abs( ( x1 * y2 + x2 * y + x  * y1 ) - ( x2 * y1 + x  * y2 + x1 * y  ) )
						  +abs( ( x1 * y  + x  * y3+ x3 * y1 ) - ( x  * y1 + x3 * y  + x1 * y3 ) )
						  +abs( ( x  * y2 + x2 * y3+ x3 * y  ) - ( x2 * y  + x3 * y2 + x  * y3 ) );

				if(nDim > nMaxDim)
				{
					corner[3] = *pt;
					nMaxDim = nDim;
				}
			}

			//첫번째 -> 두번째
			cvLine(m_orig_img, cvPoint(corner[0].x, corner[0].y), cvPoint(corner[1].x, corner[1].y), CV_RGB(255,120,0), 3);
			//첫번째 -> 세번째
			cvLine(m_orig_img, cvPoint(corner[0].x, corner[0].y), cvPoint(corner[2].x, corner[2].y), CV_RGB(255,120,0), 3);
			//세번째 -> 첫번째
			cvLine(m_orig_img, cvPoint(corner[3].x, corner[3].y), cvPoint(corner[1].x, corner[1].y), CV_RGB(255,120,0), 3);
			//세번째 -> 두번째
			cvLine(m_orig_img, cvPoint(corner[3].x, corner[3].y), cvPoint(corner[2].x, corner[2].y), CV_RGB(255,120,0), 3);

			//꼭짓점 그리기
			cvCircle(m_orig_img, cvPoint(corner[0].x, corner[0].y), 5, CV_RGB(255,255,255), -1);
			cvCircle(m_orig_img, cvPoint(corner[1].x, corner[1].y), 5, CV_RGB(255,255,255), -1);
			cvCircle(m_orig_img, cvPoint(corner[2].x, corner[2].y), 5, CV_RGB(255,255,255), -1);
			cvCircle(m_orig_img, cvPoint(corner[3].x, corner[3].y), 5, CV_RGB(255,255,255), -1);
	
			//사각형 크기 계산
			int mWidth = abs(corner[3].x - corner[0].x);
			int mHeight = abs(corner[3].y - corner[0].y);

			//렉트 구조체에 넣어서 보존
			rects[rect_cnt].x = corner[0].x;
			rects[rect_cnt].y = corner[0].y;
			rects[rect_cnt].width = mWidth;
			rects[rect_cnt].height = mHeight;

			//인덱스 증가
			rect_cnt++;

			//준비한 배열보다 크면 스톱
			if(rect_cnt > 99 )
			{
				break;
			}
		}

		//사용한 메모리 해제
		cvClearSeq(contours);
		cvClearSeq(result);
	}
	//사용한 메모리 해제
	cvReleaseMemStorage(&storage2);
	cvReleaseMemStorage(&storage3);

}
Example #9
0
IplImage *WorkingFrame=NULL;
IplImage *frame1 = NULL;
IplImage *Frame_at_t = NULL;
IplImage *Frame_at_t_dt = NULL;
IplImage *eig_image = NULL;
IplImage *temp_image = NULL;
IplImage *pyramid1 = NULL;
IplImage *frameone = NULL;
IplImage *frametwo = NULL;
IplImage *dots = NULL;

int p=1;

IplImage *pyramid2 = NULL;
CvSeq* first_contour, *contours2;
CvMemStorage* storage = cvCreateMemStorage();	
double Result, Result2;
CvRect rect;

static int array[2]={0,0};

int* findhand(CvCapture *webcam) {
	
	//---Initialise Variables for Optical Flow---//
	CvSize OF_window = cvSize(3,3);						//Setup the size of the window of each pyramid level
	int no_of_points = 15000;
	CvPoint2D32f Frame_t_points[15000];
	CvPoint2D32f Frame_t_dt_points[15000];
	char optical_flow_found_feature[15000];
	float optical_flow_feature_error[15000];
	CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
Example #10
0
ReturnType ColorTracking::onExecute()
{
	// 영상을 Inport로부터 취득
	opros_any *pData = ImageIn.pop();
	RawImage result;
	// 데이터 포트 백터
	std::vector<PositionDataType> data;

	if(pData != NULL){
		
		// 포트로 부터 이미지 취득
		RawImage Image = ImageIn.getContent(*pData);
		RawImageData *RawImage = Image.getImage();

		// 현재영상의 크기를 취득
		m_in_width = RawImage->getWidth();
		m_in_height = RawImage->getHeight();

		// 원본영상의 이미지영역 확보
		if(m_orig_img == NULL){
			m_orig_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
		}
		if(m_dest_img == NULL){
			m_dest_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
		}
		if(m_hsv_img == NULL){
			m_hsv_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);

		}
		if(m_gray_img == NULL){
			m_gray_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
		}

		//영상에 대한 정보를 확보!memcpy
		memcpy(m_orig_img->imageData, RawImage->getData(), RawImage->getSize());

		//HSV변환
		cvCvtColor(m_orig_img, m_hsv_img, CV_BGR2HSV);

		//hsv 영역 축소 후, 설정값에 따라 해당 영역 이진 영상 추출
		color_config(m_hsv_img, m_color);
		
		//영상정리
		image_filter(m_gray_img);

		//검출 갯수 담을 변수 초기화
		circle_cnt = 0;
		rect_cnt = 0;
		
		//검출된 원을 위한 메모리 공간 할당
		storage0 = cvCreateMemStorage(0);
		
		//원 그리기
		draw_circle(m_gray_img);
		
		//사각형 그리기
		draw_square(m_gray_img);

//// DataOut
		//한가지라도 검출되면 
		if(circles || rects != NULL)
		{
			//원 데이터가 존재함
			if(circles)
			{
				//원의 갯수만큼 
				for(int k = 0; k < circles->total; k++)
				{
					float* cir;
					int circle_x, circle_y;
					double radi;

					//검출된 원을 저장한 circles에서 원의 파라미터를 cir에 저장
					//원의 중심 좌표 및 반지름이 배열에 순서대로 저장됨
					cir = (float*)cvGetSeqElem(circles, k);
					//검출된 원을 저장한 circles에서 원의 파라미터를 cir에 저장
					//원의 중심 좌표 및 반지름이 배열에 순서대로 저장됨
					circle_x    = cvRound(cir[0]);     //중심점 x 좌표
					circle_y    = cvRound(cir[1]);     //중심점 y 좌표
					radi        = (double)cvRound(cir[2]);     //반지름

					PositionDataType base;
					base.setName("circle");
					base.setX(circle_x);
					base.setY(circle_y);
					base.setRadian(radi);

					base.setHeight(NULL);
					base.setWidth(NULL);

					data.push_back(base);
			
				}

			}

			//사각형 데이터가 존재함
			if(rects != NULL)
			{
				for(int j = 0; j < rect_cnt; j++)
				{
					int rect_x, rect_y, rect_width, rect_height;

					rect_x = rects[j].x;
					rect_y = rects[j].y;
					rect_width = rects[j].width;
					rect_height = rects[j].height;

					PositionDataType base;
					base.setName("rectangle");
					base.setX(rect_x);
					base.setY(rect_y);
					base.setHeight(rect_height);
					base.setWidth(rect_width);

					base.setRadian(NULL);

					data.push_back(base);
				}
			}

			PositionOut.push(data);
		}
			
//// ImageOut
		// RawImage의 이미지 포인터 변수 할당
		RawImageData *pimage = result.getImage();
		
		// 입력된 이미지 사이즈 및 채널수로 로 재 설정
		pimage->resize(m_orig_img->width, m_orig_img->height, m_orig_img->nChannels);
		
		// 영상의 총 크기(pixels수) 취득
		int size = m_orig_img->width * m_orig_img->height * m_orig_img->nChannels;
		
		// 영상 데이터로부터 영상값만을 할당하기 위한 변수
		unsigned char *ptrdata = pimage->getData();
		
		// 현재 프레임 영상을 사이즈 만큼 memcpy
		memcpy(ptrdata, m_orig_img->imageData, size);

		// 포트아웃
		opros_any mdata = result;
		ImageOut.push(result);//전달
		delete pData;
	}

	return OPROS_SUCCESS;
}
Example #11
0
//////////////////////
//
//   원 검출
//
//////////////////////
void ColorTracking::draw_circle(IplImage* image)
{
	
	CvSeq* m_circle = NULL;					// 원 정보
	CvMemStorage* storage1 = NULL;			// 메모리 할당

	//검출된 원을 위한 메모리 공간 할당
	storage1 = cvCreateMemStorage(0);

	//원 갯수 저장 변수
	circle_cnt = 0;

	//원의 중심점을 검출하기 위한 누산기의 해상도
	// 1이면 입력영상과 같은 크기, 2이면 입력 영상의 가로/세로의 반크기의 누산기
	double dp = 1.5; 
	double min_dist = 300;				//검출된 원의 중심 사이의 최소거리.작을 수록 많은 원이 검출 됨-? 
	double cannyThreshold = 100;		//cvCanny 함수 임계값
	double accThreshold = 50;			//cvCanny 함수 축적평면의 임계값
	int min_radius = 50;				//최소 반지름
	int max_radius = 150;				//최대 반지름
	int cx, cy = 0;
		
	//소스영상, 메모리스토리지 포인터, 메소드인자, 영상의 해상도, 인접한 두 원사이의 최소거리
	m_circle = cvHoughCircles(image, storage1, CV_HOUGH_GRADIENT,
		dp,min_dist, cannyThreshold, accThreshold, min_radius, max_radius);

		//원이 1개 라도 있으면
	if(m_circle->total >0 )
	{
		// 데이터를 내보내기 위한 전역 선언한 시퀸스로 복사
		circles = cvCloneSeq(m_circle, storage0);

		//원 그리기
		for(int k = 0; k < m_circle->total; k++)
		{
			float* circle;
			int radius;

			//검출된 원을 저장한 circles에서 원의 파라미터를 circle에 저장
			//원의 중심 좌표 및 반지름이 배열에 순서대로 저장됨
			circle = (float*)cvGetSeqElem(m_circle, k);
			cx     = cvRound(circle[0]);     //중심점 x 좌표
			cy     = cvRound(circle[1]);     //중심점 y 좌표
			radius = cvRound(circle[2]);     //반지름

			//원그리기
			
			if(radius > min_radius && radius < max_radius)
			{
				//중심점
				cvCircle(m_orig_img, cvPoint(cx, cy), 3, CV_RGB(240,0,255), -1, 8, 0);

				//검출라인
				cvCircle(m_orig_img, cvPoint(cx, cy), radius, CV_RGB(255,255,255), 3, 8, 0);

			}	
		}
	}
	else // 원이 없으면
	{
		circles = NULL;
		circle_cnt = 0;
	}

	cvReleaseMemStorage(&storage1);
}
Example #12
0
    void update(double time,
                uint32_t* out,
                const uint32_t* in)
    {
        if (!cascade) {
            cvSetNumThreads(cvRound(threads * 100));
            if (classifier.length() > 0) {
                cascade = (CvHaarClassifierCascade*) cvLoad(classifier.c_str(), 0, 0, 0 );
                if (!cascade)
                    fprintf(stderr, "ERROR: Could not load classifier cascade %s\n", classifier.c_str());
                storage = cvCreateMemStorage(0);
            }
            else {
                memcpy(out, in, size * 4);
                return;
            }
        }

        // sanitize parameters
        search_scale = CLAMP(search_scale, 0.11, 1.0);
        neighbors = CLAMP(neighbors, 0.01, 1.0);

        // copy input image to OpenCV
        if( !image )
            image = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 4);
        memcpy(image->imageData, in, size * 4);
        
        // only re-detect periodically to control performance and reduce shape jitter
        int recheckInt = abs(cvRound(recheck * 1000));
        if ( recheckInt > 0 && count % recheckInt )
        {
            // skip detect
            count++;
//            fprintf(stderr, "draw-only counter %u\n", count);
        }
        else
        {
            count = 1;   // reset the recheck counter
            if (objects) // reset the list of objects
                cvClearSeq(objects);
            
            double elapsed = (double) cvGetTickCount();

            objects = detect();

            // use detection time to throttle frequency of re-detect vs. redraw (automatic recheck)
            elapsed = cvGetTickCount() - elapsed;
            elapsed = elapsed / ((double) cvGetTickFrequency() * 1000.0);

            // Automatic recheck uses an undocumented negative parameter value,
            // which is not compliant, but technically feasible.
            if (recheck < 0 && cvRound( elapsed / (1000.0 / (recheckInt + 1)) ) <= recheckInt)
                    count += recheckInt - cvRound( elapsed / (1000.0 / (recheckInt + 1)));
//            fprintf(stderr, "detection time = %gms counter %u\n", elapsed, count);
        }
        
        draw();
        
        // copy filtered OpenCV image to output
        memcpy(out, image->imageData, size * 4);
        cvReleaseImage(&image);
    }
static int aMatchContourTrees(void)
{
    CvSeqBlock contour_blk1, contour_blk2;
    CvContour contour_h1, contour_h2;
    CvContourTree *tree1, *tree2;
    CvMemStorage *storage;   /*   storage for contour and tree writing */
    int block_size = 10000;

    CvRandState state;
    double lower, upper;
    int seed;
    float fr;
    int type_seq;
    int method;
    int nPoints1 = 12, nPoints2 = 12;
    int xc,yc,a1 = 10, b1 = 20, a2 = 10, b2 =20, fi = 0;
    int xmin,ymin,xmax,ymax;
    double error_test,rezult, eps_rez = 0.8;
    double pi = 3.1415926;
    double threshold = 1.e-7;
    double threshold2 = 5.;
    int i;
    int code = TRS_OK;

    int width=256,height=256;
    CvPoint *cp1,*cp2;

    /* read tests params */

    if (!trsiRead(&nPoints1,"20","Number of points first contour"))
        return TRS_UNDEF;
    if (!trsiRead(&nPoints2,"20","Number of points second contour"))
        return TRS_UNDEF;

    if(nPoints1>0&&nPoints2>0)
    {
        if (!trsiRead(&a1,"10","first radius of the first elipse"))
            return TRS_UNDEF;
        if (!trsiRead(&b1,"20","second radius of the first elipse"))
            return TRS_UNDEF;
        if (!trsiRead(&a2,"15","first radius of the second elipse"))
            return TRS_UNDEF;
        if (!trsiRead(&b2,"30","second radius of the second elipse"))
            return TRS_UNDEF;
        if (!trsiRead(&fi,"0","second radius of the second elipse"))
            return TRS_UNDEF;

        if (!trsdRead(&upper,"3","noise amplidude"))
            return TRS_UNDEF;

        xc = (int)(width/2.);
        yc = (int)(height/2.);
        xmin = width;
        ymin = height;
        xmax = 0;
        ymax = 0;

        cp1 = (CvPoint*) trsmAlloc(nPoints1*sizeof(CvPoint));
        cp2 = (CvPoint*) trsmAlloc(nPoints2*sizeof(CvPoint));

        for(i=0; i<nPoints1; i++)
        {
            cp1[i].x = (int)(a1*cos(2*pi*i/nPoints1))+xc;
            cp1[i].y = (int)(b1*sin(2*pi*i/nPoints1))+yc;
            if(xmin> cp1[i].x) xmin = cp1[i].x;
            if(xmax< cp1[i].x) xmax = cp1[i].x;
            if(ymin> cp1[i].y) ymin = cp1[i].y;
            if(ymax< cp1[i].y) ymax = cp1[i].y;
        }

        if(xmax>width||xmin<0||ymax>height||ymin<0) return TRS_FAIL;

        lower = -upper;
        /*     upper = 3;*/
        seed = 345753;
        cvRandInit(&state, (float)lower,(float)upper, seed );
        for(i=0; i<nPoints2; i++)
        {
            cvbRand( &state, &fr, 1 );
            cp2[i].x =(int)fr+(int)(a2*cos(2*pi*i/nPoints2)*cos(2*pi*fi/360.))-
                      (int)(b2*sin(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+xc;
            cvbRand( &state, &fr, 1 );
            cp2[i].y =(int)fr+(int)(a2*cos(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+
                      (int)(b2*sin(2*pi*i/nPoints2)*cos(2*pi*fi/360.))+yc;

            if(xmin> cp2[i].x) xmin = cp2[i].x;
            if(xmax< cp2[i].x) xmax = cp2[i].x;
            if(ymin> cp2[i].y) ymin = cp2[i].y;
            if(ymax< cp2[i].y) ymax = cp2[i].y;
        }
        if(xmax>width||xmin<0||ymax>height||ymin<0) return TRS_FAIL;

        /*   contours initialazing */
        type_seq = CV_SEQ_POLYGON;
        cvMakeSeqHeaderForArray( type_seq, sizeof(CvContour), sizeof(CvPoint),
                                 (char*)cp1, nPoints1, (CvSeq*)&contour_h1, &contour_blk1);

        cvMakeSeqHeaderForArray( type_seq, sizeof(CvContour), sizeof(CvPoint),
                                 (char*)cp2, nPoints2, (CvSeq*)&contour_h2, &contour_blk2);

        /*  contour trees created*/
        storage = cvCreateMemStorage( block_size );

        tree1 = cvCreateContourTree ((CvSeq*)&contour_h1, storage, threshold);
        tree2 = cvCreateContourTree ((CvSeq*)&contour_h2, storage, threshold);


        /*  countours matchig */
        error_test = 0.;
        method = 1;

        rezult = cvMatchContourTrees (tree1, tree2, (CvContourTreesMatchMethod)method,threshold2);
        error_test+=rezult;

        if(error_test > eps_rez ) code = TRS_FAIL;
        else code = TRS_OK;

        trsWrite( ATS_CON | ATS_LST | ATS_SUM, "contours matching error_test =%f \n",
                  error_test);

        cvReleaseMemStorage ( &storage );

        trsFree (cp2);
        trsFree (cp1);

    }


    /*    _getch();     */
    return code;
}
Example #14
0
  void Gesture1::trackMarker (IplImage* destImg, CvPoint _r, CvPoint _b, CvPoint _g, CvPoint _y) {
    
    // find tissue box!
    CvPoint* objPoints = objectDetector->detect(destImg);

    // draw
    world->Step(1.0F/6.0F, 10, 10);
    cvLine(destImg, cvPoint(0,HEIGHT), cvPoint(1000,HEIGHT), CV_RGB(0,255,0), 3);
    for (b2Body* b = world->GetBodyList(); b; b = b->GetNext()) {
      //printf("**draw body\n");
      Box2DData* userData = (Box2DData*)b->GetUserData();
      if (userData != NULL) {
        if (strcmp(userData->type, "Circle") == 0) {
          //b2Vec2 v = b->GetWorldCenter();
          b2Vec2 v = b->GetPosition();
          //printf("** x=%f y=%f r=%f\n", v.x, v.y, userData->radius);
          CvPoint center = cvPoint(v.x*WORLD_SCALE, v.y*WORLD_SCALE);
          cvCircle(destImg, center, userData->radius*WORLD_SCALE, CV_RGB(255,0,0), -1);
        } else if (strcmp(userData->type, "Box") == 0) {
          world->DestroyBody(b);
        }
      }      
    }
    if (objPoints != NULL) {
      printf("construct body\n");
      b2PolygonShape cs;
      b2Vec2 vertices[4] = {
        b2Vec2((float)(objPoints[0].x)/WORLD_SCALE, (float)(objPoints[0].y)/WORLD_SCALE),
        b2Vec2((float)(objPoints[1].x)/WORLD_SCALE, (float)(objPoints[1].y)/WORLD_SCALE),
        b2Vec2((float)(objPoints[2].x)/WORLD_SCALE, (float)(objPoints[2].y)/WORLD_SCALE),
        b2Vec2((float)(objPoints[3].x)/WORLD_SCALE, (float)(objPoints[3].y)/WORLD_SCALE)
      };
      cs.Set(vertices, 4);
      b2BodyDef bd;
      //bd.type = b2_staticBody;
      Box2DData* obj = new Box2DData();
      strcpy(obj->type, "Box");
      bd.userData = obj;
      b2Body* body1 = world->CreateBody(&bd);
      body1->CreateFixture(&cs, 0.0f);
    }

    if (_r.x < 0) return;
    Point2D r = toPoint2D(_r);
    
    // if marker is not moving for a while, reset the path
    int len = path.size();
    if (len > KEEP_MAX) {
      path.erase(path.begin());
    }
    int nearCount = 0;
    int actual = min(KEEP_COUNT, len);
    
    /*
     for(int i=0; i<actual; i++){
     Point2D p = path[len-1-i];
     double d = dist(p, r);
     //printf("dist=%f\n", d);
     if (d < NEAR_THRESHOLD) ++nearCount;
     }
     if (nearCount > (double)actual * DONT_MOVE_THRESHOLD_RATE) {
     // marker is not moving, so clear the path
     printf("cleared\n");
     path.clear();
     }
     */
    
    path.push_back(r);

    // decide if we should recognize
    time_t current;
    time(&current);
    double interval = difftime(current, lastTime);
    printf("interval=%f\n", interval);
    if (interval < INTERVAL_SEC) return;

    len = path.size();
    if (len < 5) return;

    RecognitionResult res = g.recognize(path);
    printf("%s:%f\n", res.name.c_str(), res.score);
    if (res.name == "Circle" && res.score > SCORE_THRESHOLD) {
      printf("##circle detect##\n");
      // convert to vector<Point2D> to CvSeq<CvPoint>
      CvSeqWriter writer;
      CvMemStorage* storage = cvCreateMemStorage(0);
      cvStartWriteSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage, &writer);
      for (int i=0; i<len; i++) {
        CvPoint pt = toCvPoint(path[i]);
        CV_WRITE_SEQ_ELEM(pt, writer);
      }
      CvSeq* seq = cvEndWriteSeq(&writer);
      CvBox2D ellipse = cvFitEllipse2(seq);
      float radius = std::min(ellipse.size.width, ellipse.size.height)/(4.0F*WORLD_SCALE);
      cvEllipseBox(destImg, ellipse, CV_RGB(0,255,255), -1);

      // add Box2D object
      {
        b2CircleShape cs;
        cs.m_radius = radius;
        printf(" x=%f y=%f radius:%f\n", ellipse.center.x/WORLD_SCALE, ellipse.center.y/WORLD_SCALE, radius);
        b2BodyDef bd;
        bd.type = b2_dynamicBody;
        bd.position.Set(ellipse.center.x/WORLD_SCALE, ellipse.center.y/WORLD_SCALE);
        Box2DData* obj = new Box2DData();
        strcpy(obj->type, "Circle");
        obj->radius = radius;
        bd.userData = obj;
        b2Body* body1 = world->CreateBody(&bd);
        b2FixtureDef fixtureDef;
        fixtureDef.shape = &cs;
        fixtureDef.density = 1.0f;
        fixtureDef.friction = 0.3f;
        fixtureDef.restitution = 0.6f;
        body1->CreateFixture(&fixtureDef);
      }

      time(&lastTime);

      //cvEllipseBox(destImg, ellipse, CV_RGB(125,125,255));
    }
  }
Example #15
0
int main( int argc, char** argv )
{
    char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
    CvRNG rng = cvRNG(-1);

    if( (img0 = cvLoadImage(filename,1)) == 0 )
        return 0;

    printf( "Hot keys: \n"
            "\tESC - quit the program\n"
            "\tr - restore the original image\n"
            "\tw or SPACE - run watershed algorithm\n"
            "\t\t(before running it, roughly mark the areas on the image)\n"
            "\t  (before that, roughly outline several markers on the image)\n" );
    
    cvNamedWindow( "image", 1 );
    cvNamedWindow( "watershed transform", 1 );

    img = cvCloneImage( img0 );
    img_gray = cvCloneImage( img0 );
    wshed = cvCloneImage( img0 );
    marker_mask = cvCreateImage( cvGetSize(img), 8, 1 );
    markers = cvCreateImage( cvGetSize(img), IPL_DEPTH_32S, 1 );
    cvCvtColor( img, marker_mask, CV_BGR2GRAY );
    cvCvtColor( marker_mask, img_gray, CV_GRAY2BGR );

    cvZero( marker_mask );
    cvZero( wshed );
    cvShowImage( "image", img );
    cvShowImage( "watershed transform", wshed );
    cvSetMouseCallback( "image", on_mouse, 0 );

    for(;;)
    {
        int c = cvWaitKey(0);

        if( (char)c == 27 )
            break;

        if( (char)c == 'r' )
        {
            cvZero( marker_mask );
            cvCopy( img0, img );
            cvShowImage( "image", img );
        }

        if( (char)c == 'w' || (char)c == ' ' )
        {
            CvMemStorage* storage = cvCreateMemStorage(0);
            CvSeq* contours = 0;
            CvMat* color_tab;
            int i, j, comp_count = 0;
            //cvSaveImage( "wshed_mask.png", marker_mask );
            //marker_mask = cvLoadImage( "wshed_mask.png", 0 );
            cvFindContours( marker_mask, storage, &contours, sizeof(CvContour),
                            CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
            cvZero( markers );
            for( ; contours != 0; contours = contours->h_next, comp_count++ )
            {
                cvDrawContours( markers, contours, cvScalarAll(comp_count+1),
                                cvScalarAll(comp_count+1), -1, -1, 8, cvPoint(0,0) );
            }

            color_tab = cvCreateMat( 1, comp_count, CV_8UC3 );
            for( i = 0; i < comp_count; i++ )
            {
                uchar* ptr = color_tab->data.ptr + i*3;
                ptr[0] = (uchar)(cvRandInt(&rng)%180 + 50);
                ptr[1] = (uchar)(cvRandInt(&rng)%180 + 50);
                ptr[2] = (uchar)(cvRandInt(&rng)%180 + 50);
            }

            {
            double t = (double)cvGetTickCount();
            cvWatershed( img0, markers );
            t = (double)cvGetTickCount() - t;
            printf( "exec time = %gms\n", t/(cvGetTickFrequency()*1000.) );
            }

            // paint the watershed image
            for( i = 0; i < markers->height; i++ )
                for( j = 0; j < markers->width; j++ )
                {
                    int idx = CV_IMAGE_ELEM( markers, int, i, j );
                    uchar* dst = &CV_IMAGE_ELEM( wshed, uchar, i, j*3 );
                    if( idx == -1 )
                        dst[0] = dst[1] = dst[2] = (uchar)255;
                    else if( idx <= 0 || idx > comp_count )
                        dst[0] = dst[1] = dst[2] = (uchar)0; // should not get here
                    else
                    {
                        uchar* ptr = color_tab->data.ptr + (idx-1)*3;
                        dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2];
                    }
                }

            cvAddWeighted( wshed, 0.5, img_gray, 0.5, 0, wshed );
            cvShowImage( "watershed transform", wshed );
            cvReleaseMemStorage( &storage );
            cvReleaseMat( &color_tab );
        }
    }
int main(int argc, char* argv[])
{
	printf("Press Esc-Key to Exit Process.\n");

	RASPIVID_CONFIG * config = new RASPIVID_CONFIG();
	if(!config){
		printf("failed to create RASPIDVID_CONFIG.\n");
		return -1;
	}
	config->width=static_cast<int>(WIN_WIDTH);
	config->height=static_cast<int>(WIN_HEIGHT);
	config->bitrate=0;	// zero: leave as default
	config->framerate=0;
	config->monochrome=0;

	cvNamedWindow( DISP_WIN , CV_WINDOW_AUTOSIZE );
	RaspiCamCvCapture* capture = NULL;

	capture = raspiCamCvCreateCameraCapture2( 0, config );
	if(config){
		delete config;
		config = NULL;
	}
	if(!capture){
		printf("failed to create capture\n");
		return -1;
	}
	// キャプチャサイズを設定する.
	double w = WIN_WIDTH;
	double h = WIN_HEIGHT;
	raspiCamCvSetCaptureProperty (capture, RPI_CAP_PROP_FRAME_WIDTH, w);
	raspiCamCvSetCaptureProperty (capture, RPI_CAP_PROP_FRAME_HEIGHT, h);

	// 正面顔検出器の読み込み
	CvHaarClassifierCascade* cvHCC = (CvHaarClassifierCascade*)cvLoad(CASCADE, NULL,NULL,NULL);

	// 検出に必要なメモリストレージを用意する
	CvMemStorage* cvMStr = cvCreateMemStorage(0);

	while(1){
		IplImage* frame = raspiCamCvQueryFrame(capture);
		if(!frame){
			printf("failed to query frame.\n");
			break;
		}
		// 画像中から検出対象の情報を取得する
		CvSeq* face = cvHaarDetectObjects(	  frame
											, cvHCC
											, cvMStr
											, 1.2
											, 2
											, CV_HAAR_DO_CANNY_PRUNING
											, minsiz
											, minsiz
		);
		if(!face){
			printf("failed to detect objects.\n");
			break;
		}

		int i=0;
		for(i = 0; i < face->total; i++) {
			// 検出情報から顔の位置情報を取得
			CvRect* faceRect = (CvRect*)cvGetSeqElem(face, i);
			if(!faceRect){
				printf("failed to get Face-Rect.\n");
				break;
			}
			
			// 取得した顔の位置情報に基づき、矩形描画を行う
			cvRectangle(	  frame
							, cvPoint(faceRect->x, faceRect->y)
							, cvPoint(faceRect->x + faceRect->width, faceRect->y + faceRect->height)
							, CV_RGB(255, 0 ,0)
							, 2
							, CV_AA
							, 0
			);
		}
		cvShowImage( DISP_WIN, frame);
		char c = cvWaitKey(DELAY_MSEC);
		if( c==27 ){ // ESC-Key
			break;
		}
		sleep(0);
	}

	// 用意したメモリストレージを解放
	cvReleaseMemStorage(&cvMStr);

	// カスケード識別器の解放
	cvReleaseHaarClassifierCascade(&cvHCC);

	raspiCamCvReleaseCapture(&capture);
	cvDestroyWindow(DISP_WIN);

	return 0;
}
Example #17
0
char* char_ext(IplImage* imagen,basicOCR ocr  )
{
	
	//cvNamedWindow("temp");
	//cvShowImage("temp",imagen);
	//cvWaitKey(0);
	//char* plate=NULL;
	char* no=(char*)malloc(20*sizeof(char));
//-------------------------------------	-----------------------------------------------
    //NUMBER ISOLATION

	//Create needed images
	smooth= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	threshold= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	open_morf= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	
	//Init variables for countours
	contour = 0;
	contourLow = 0;
	//Create storage needed for contour detection
	CvMemStorage* storage = cvCreateMemStorage(0);
	//Smooth image
	cvSmooth(imagen, smooth, CV_GAUSSIAN, 3, 0, 0, 0);
	
	CvScalar avg;
	CvScalar avgStd;
	cvAvgSdv(smooth, &avg, &avgStd, NULL);
	//printf("Avg: %f\nStd: %f\n", avg.val[0], avgStd.val[0]);
	//threshold image
	cvThreshold(smooth, threshold, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY_INV);
	//Morfologic filters
	cvErode(threshold, open_morf, NULL,1); 
	cvDilate(open_morf, open_morf, NULL,1); 
	//Duplicate image for countour
	img_contornos=cvCloneImage(open_morf);
	
	//Search countours in preprocesed image
	cvFindContours( img_contornos, storage, &contour, sizeof(CvContour),
			CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0) );
	//Optimize contours, reduce points
	contourLow=cvApproxPoly(contour, sizeof(CvContour), storage,CV_POLY_APPROX_DP,1,1);
//-----------------------------------------------------------------------------------------------------------
//-----------------------------------------------------------------------------------------------------------
//NUMBER RECOGNITION
	CvRect rect;
	int carea=0,area=0;
	int count=0;
    int match;	
    int w,h;
    w=imagen->width;
    h=imagen->height;
	area=(w)*(h);
//	printf("area : %d, %d %d\n",area,w,h);
	//printf("\n%d\n",area/26);
	
	char name[6];
	//static int width;
	for( ; contourLow != 0; contourLow = contourLow->h_next )
	{		

		rect=cvBoundingRect(contourLow,0);
		cvSetImageROI(smooth,rect);
		IplImage *temp22=cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1);
		IplImage *temp23=cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1);
		cvCopy(smooth,temp22,NULL);
		carea=rect.width*rect.height;

		/*if((rect.width>rect.height)||(carea>(area/6))||(carea<(area/25)))
		{	
		    cvReleaseImage(&temp22);
		    continue;
		}*/
		
		if((carea<(area/4))&&(carea>(area/25)))
		{
			static int width = temp22->width;
			sprintf(name,"char%d",count);
			cvNamedWindow(name);
			cvMoveWindow(name,840 - count*3*width,10);
			cvThreshold(temp22, temp23, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY);

			cvShowImage(name,temp23);
			cvWaitKey(500);
			match=ocr.classify(temp23,0);
			if(match<=25)
			    no[count]=97+match;
			else
			    no[count]=48+match-26;

			count++;

		}

		cvReleaseImage(&temp22);
		cvReleaseImage(&temp23);		
		cvResetImageROI(smooth);
	}
	cvWaitKey(0);

	no[count]='\0';
	rev(no,count);
	//strcpy(plate,no);
	//printf("\n%d\n",count);
//-------------------------------------------------------------------------------------------------------------------------------------
	//cvReleaseImage(&imagen_color);
	cvReleaseImage(&imagen);
	cvReleaseImage(&open_morf);
	cvReleaseImage(&img_contornos);	
	return no;
	free(no);
}
Example #18
0
int main()
{
	int c = 0, i = 0;
    CvCapture* capture = cvCaptureFromCAM(0);

	if(!cvQueryFrame(capture))
	{
		printf("Video capture failed, please check the camera.");
	}
	else
	{
		printf("Video camera capture status: OK");
	}

    CvSize sz = cvGetSize(cvQueryFrame( capture));
	
	height = sz.height;
    width = sz.width;
    step = sz.width;
    
    IplImage* src = cvCreateImage( sz, 8, 3 );
	IplImage* hsv_image = cvCreateImage( sz, 8, 3);
	IplImage* hsv_mask = cvCreateImage( sz, 8, 1);
	IplImage* handview = cvCreateImage(sz, 8, 1);
	CvScalar  hsv_min = cvScalar(5, 70, 0, 0);
	CvScalar  hsv_max = cvScalar(20, 150, 255, 0); //H-> 0-20

	while( c != 27)
	{
		//printf("%d\t\t",framecount);
		src = cvQueryFrame( capture);
		cvCvtColor(src, hsv_image, CV_BGR2HSV);
		cvInRangeS (hsv_image, hsv_min, hsv_max, hsv_mask);
		cvSmooth(hsv_mask, handview, CV_MEDIAN, 5, 0, 0, 0);
		cvDilate(handview, handview, NULL, 3);
		//cvDilate(hsv_mask, handview, NULL, 1);
		//cvErode(handview, handview, NULL, 1);
		//cvDilate(handview, handview, NULL, 1);
		
		CvMemStorage* storage = cvCreateMemStorage(0);
		CvSeq* contour = 0;
		cvFindContours(handview, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
		cvZero(handview);
		
		for( ; contour != 0; contour = contour->h_next )
		{
			// replace CV_FILLED with 1 to see the outlines         
			double area = cvContourArea( contour, CV_WHOLE_SEQ, 0);
			if(area > 500)
			{
				cvDrawContours( handview, contour, cvScalar( 255 ), cvScalar( 255 ), 0, 0, 8 );
				//max = area;
			}
		}
		//cvShowImage("handview", handview);
		
		tips(handview);

		//cvNamedWindow( "hsv-msk",1);
		//cvShowImage( "hsv-msk", hsv_mask); //hsv_mask->origin = 1; 
		
		for(i=0; i<tipcount; i++)
		{
			if(posmax == i)
				cvCircle(src, cvPoint(tips_position[posmax][1], tips_position[posmax][0]), 3, cvScalar(0,255,0), 2, 8, 0);	
			else
				cvCircle(src, cvPoint(tips_position[i][1], tips_position[i][0]), 3, cvScalar(255,0,0), 2, 8, 0);
			

			if(speed[i][1] > 1 && speed[i][2] > 1 && (int)speed[i][5] == 1 && oldtips[(oldtipflag+1)%FRAMERUN][i][2] == 1)
			{	
				cvCircle(src, cvPoint(speed[i][4], speed[i][3]), 5, cvScalar(0,0,255), 3, 8, 0);
				speed[i][1] = 0;
				speed[i][2] = 0;
				speed[i][5] = 0;
				//printf("check\t");
			}
			else if(speed[i][1] > 1 && speed[i][2] > 1  && oldtips[(oldtipflag+1)%FRAMERUN][i][2] == -1)
			{	
				//cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 9, cvScalar(0,0,255), 3, 8, 0);
				speed[i][1] = speed[i][2];
				speed[i][2] = 0;
				speed[i][5] = 0;
				//printf("check\t");
			}
		}
					
		//printf("%d\t%d\t%d", (int)speed[3][1], (int)speed[3][2], (int)speed[3][5]);
			
		//printf("\n");
		/*if(speed[posmax][1] > 1 && speed[posmax][2] > 1 && oldtips[(oldtipflag+1)%FRAMERUN][posmax][2] == 1)
		{	
			cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 5, cvScalar(0,0,255), 3, 8, 0);
			speed[posmax][1]=0;
			speed[posmax][2]=0;
			//printf("check\t");
		}
		else if(speed[posmax][1] > 1 && speed[posmax][2] > 1 && oldtips[(oldtipflag+1)%FRAMERUN][posmax][2] == -1)
		{	
			//cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 5, cvScalar(0,0,255), 3, 8, 0);
			speed[posmax][1]=speed[posmax][2];
			speed[posmax][2]=0;
			//printf("check\t");
		}*/
		
		//sprintf(framename, "./frames/frame%d.jpg", framecount++);
		//cvSaveImage(framename, src);
			
		//maxspeed = posmax;
		oldtipflag = (oldtipflag+1)%FRAMERUN;	
		
		//printf("%d\t%f\t3\t%f\n", posmax, speed[posmax][0], speed[3][0]);
		cvNamedWindow( "src",1);
		cvShowImage( "src", src);

		c = cvWaitKey( 10);
	}

	cvReleaseCapture( &capture);
	cvReleaseImage(&hsv_image);
	cvReleaseImage(&hsv_mask);
	cvReleaseImage(&handview);
	cvDestroyAllWindows();
}
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{

    // Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame, *frame_copy = 0;

    // Used for calculations
    int optlen = strlen("--cascade=");

    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 )
    {
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    }
    else
    {
        fprintf( stderr,
        "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n" );
        return -1;
        /*input_name = argc > 1 ? argv[1] : 0;*/
    }

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);
    
    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else
        capture = cvCaptureFromAVI( input_name ); 

    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    if( capture )
    {
        // Capture from the camera.
        for(int i=0;;i++)
        {
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;
            
            // Allocate framecopy as the same size of the frame
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Check the origin of image. If top left, copy the image frame to frame_copy. 
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
                cvFlip( frame, frame_copy, 0 );
            
            // Call the function to detect and draw the face
            char fname[50];
	    sprintf(fname, "camera_stream%05d.jpg", i);
	    detect_and_draw( frame_copy, fname );

            // Wait for a while before proceeding to the next frame
            if( cvWaitKey( 10 ) >= 0 )
                break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded succesfully, then:
    else
    {
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

        // Load the image from that filename
        IplImage* image = cvLoadImage( filename, 1 );

        // If Image is loaded succesfully, then:
        if( image )
        {
            // Detect and draw the face
	  detect_and_draw( image, filename );

            // Wait for user input
            cvWaitKey(0);
            // Release the image memory
            cvReleaseImage( &image );
        }
        else
        {
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];

                // Get the line from the file
                while( fgets( buf, 1000, f ) )
                {

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded succesfully, then:
                    if( image )
                    {
                        // Detect and draw the face from the image
		      detect_and_draw( image, buf );
                        
                        // Wait for the user input, and release the memory
                        //cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                // Close the file
                fclose(f);
            }
        }

    }
    
    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");
    printf("Done.\n");
    // return 0 to indicate successfull execution of the program
    return 0;
}
Example #20
0
FrameAcquirer::FrameAcquirer(SinGen *s) 
{
	this->sg = s;
    
    // create all necessary instances
    cvNamedWindow (WINDOW_NAME, CV_WINDOW_AUTOSIZE);
    CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
    CvMemStorage* storage = cvCreateMemStorage(0);
    assert (storage);

    // you do own an iSight, don't you ?!?
    if (! camera)
        abort ();


    if(useMotion == false){
      // get an initial frame and duplicate it for later work
      IplImage *  current_frame = cvQueryFrame (camera);
      IplImage *  gray_image    = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  gray_image2    = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      assert (current_frame && gray_image);
      
      // as long as there are images ...
      while (current_frame = cvQueryFrame (camera))
      {
  	
          // convert to gray and downsize
        cvCvtColor (current_frame, gray_image, CV_BGR2GRAY);
        cvConvert( gray_image, gray_image2);
          
  	float vals[NUM_WAVES];
  	pixelate(gray_image,vals);
  	this->sg->setAmplitudes(vals);
         
  
          // just show the image
          cvAddWeighted( gray_image2, 0.5, gray_image, 0.5, 0.5, gray_image);
          cvShowImage (WINDOW_NAME, gray_image);
  
          // cvShowImage (WINDOW_NAME, current_frame);
          // wait a tenth of a second for keypress and window drawing
          int key = cvWaitKey (30);
          if (key == 'q' || key == 'Q')
              break;
      }

    }else{

      IplImage *  current_frame = cvQueryFrame (camera);
      IplImage *  gray_image    = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  avg_img = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_32F, 1);
      IplImage *  gavg_img = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  diff_img = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  diff_img2 = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  diff_img3 = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      
      // as long as there are images ...
      while (current_frame = cvQueryFrame (camera))
      {
  
          // convert to gray and downsize
          cvCvtColor (current_frame, gray_image, CV_BGR2GRAY);
  
          cvSmooth( gray_image, gray_image);
          
          cvRunningAvg( gray_image, avg_img, .250, NULL);
  
          cvConvert( avg_img, gavg_img );
  
          cvAbsDiff( gray_image, gavg_img, diff_img );    
       
          cvConvert( diff_img, diff_img2 );

  	  float vals[NUM_WAVES];
          pixelate(diff_img,vals);
  	  this->sg->setAmplitudes(vals);

          if(useMotionAndLight){
            pixelate(gray_image,vals);
            for(int i = 0; i < NUM_WAVES; i++){
              vals[i] *= C8;
            }
  	    this->sg->setFrequencies(vals);
          
            cvAddWeighted( diff_img2, 0.5, gray_image, 0.5, 0.5, diff_img);
            cvShowImage ( WINDOW_NAME, diff_img);
          }else{
            cvAddWeighted( diff_img, 0.5, diff_img2, 0.5, 0.5, diff_img);
            cvShowImage ( WINDOW_NAME, diff_img);

          }
          

          int key = cvWaitKey (30);
          if (key == 'q' || key == 'Q')
              break;
      }
  
    }
    
    // be nice and return no error
    return;
}
void connected_Components(IplImage *mask, int poly1_hull0, float perimScale, int *num, CvRect *bbs, CvPoint *centers)
{
	static CvMemStorage*	mem_storage	= NULL;
	static CvSeq*			contours	= NULL;
	//CLEAN UP RAW MASK
	cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_OPEN, CVCLOSE_ITR );
	cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_CLOSE, CVCLOSE_ITR );

	//FIND CONTOURS AROUND ONLY BIGGER REGIONS
	if( mem_storage==NULL ) mem_storage = cvCreateMemStorage(0);
    else cvClearMemStorage(mem_storage);

	CvContourScanner scanner = cvStartFindContours(mask,mem_storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
	CvSeq* c;
	int numCont = 0;
	while( (c = cvFindNextContour( scanner )) != NULL )
	{
		double len = cvContourPerimeter( c );
		double q = (mask->height + mask->width) /perimScale;   //calculate perimeter len threshold
		if( len < q ) //Get rid of blob if it's perimeter is too small
		{
			cvSubstituteContour( scanner, NULL );
		}
		else //Smooth it's edges if it's large enough
		{
			CvSeq* c_new;
			if(poly1_hull0) //Polygonal approximation of the segmentation
	            c_new = cvApproxPoly(c,sizeof(CvContour),mem_storage,CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL,0);
			else //Convex Hull of the segmentation
				c_new = cvConvexHull2(c,mem_storage,CV_CLOCKWISE,1);
            cvSubstituteContour( scanner, c_new );
			numCont++;
        }
	}
	contours = cvEndFindContours( &scanner );

	// PAINT THE FOUND REGIONS BACK INTO THE IMAGE
	cvZero( mask );
	IplImage *maskTemp;
	//CALC CENTER OF MASS AND OR BOUNDING RECTANGLES
	if(num != NULL)
	{
		int N = *num, numFilled = 0, i=0;
		CvMoments moments;
		double M00, M01, M10;
		maskTemp = cvCloneImage(mask);
		for(i=0, c=contours; c != NULL; c = c->h_next,i++ )
		{
			if(i < N) //Only process up to *num of them
			{
				cvDrawContours(maskTemp,c,CV_CVX_WHITE, CV_CVX_WHITE,-1,CV_FILLED,8);
				//Find the center of each contour
				if(centers != NULL)
				{
					cvMoments(maskTemp,&moments,1);
					M00 = cvGetSpatialMoment(&moments,0,0);
					M10 = cvGetSpatialMoment(&moments,1,0);
					M01 = cvGetSpatialMoment(&moments,0,1);
					centers[i].x = (int)(M10/M00);
					centers[i].y = (int)(M01/M00);
				}
				//Bounding rectangles around blobs
				if(bbs != NULL)
				{
					bbs[i] = cvBoundingRect(c);
				}
				cvZero(maskTemp);
				numFilled++;
			}
			//Draw filled contours into mask
			cvDrawContours(mask,c,CV_CVX_WHITE,CV_CVX_WHITE,-1,CV_FILLED,8); //draw to central mask
		} //end looping over contours
		*num = numFilled;
		cvReleaseImage( &maskTemp);
	}
	else
	{
		for( c=contours; c != NULL; c = c->h_next )
		{
			cvDrawContours(mask,c,CV_CVX_WHITE, CV_CVX_BLACK,-1,CV_FILLED,8);
		}
	}
}
Example #22
0
void main(int argc,char *argv[])
{
	int c;
	IplImage* color_img;
	IplImage* hsv_img;
	IplImage* h_img;
	IplImage* gray;
	int flags = CV_WINDOW_AUTOSIZE;
	CvCapture* cv_cap = cvCaptureFromCAM(CAMERA_0);         // Capture from CAMERA 0
	int h = 180;
	int t1 = 3, t2 = 5;
	CvScalar min = CV_RGB(h-15,100,0);
	CvScalar max = CV_RGB(h+15,256,256);

	/* Create ellipse to despeckle hsv.  */
	IplConvKernel* ellipse = cvCreateStructuringElementEx(10, 10, 1, 1,
			CV_SHAPE_ELLIPSE, NULL);
	/* For X, Y, And Area */
	CvMoments moments;

	/* For contours  */
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* contours = 0;

	double area, m01, m10;
	if (!cv_cap)
		goto fail;

	cvNamedWindow("Webcam Video", flags);                        // create window
	cvNamedWindow("hsv Video", flags);                        // create window
	cvNamedWindow("Contour Video", flags);                        // create window
	cvCreateTrackbar("Hue", "hsv Video", &h, 256, set_h);
	cvCreateTrackbar("countour1", "Contour Video", &t1, 256, set_h);
	cvCreateTrackbar("countour2", "Contour Video", &t2, 256, set_h);
	for(;;) {
		color_img = cvQueryFrame(cv_cap);               // get frame
		if(color_img != 0) {
			hsv_img = cvCreateImage(cvGetSize(color_img), IPL_DEPTH_8U, 3);
			gray = cvCreateImage(cvGetSize(hsv_img), IPL_DEPTH_8U, 1);
		}
		cvCvtColor(color_img, gray, CV_RGB2GRAY);
		cvCvtColor(color_img, hsv_img, CV_BGR2HSV);
		h_img = cvCreateImage(cvGetSize(hsv_img), IPL_DEPTH_8U, 1);


		/* HSV */
		min = CV_RGB(h-20,10,10);
		max = CV_RGB(h+20,256,256);

		/* Remove anything not in the hue range. */
		cvInRangeS(hsv_img, min, max, h_img);
		/* Remove noise, or at least make the blotches bigger? */
		cvErode(h_img, h_img, ellipse,1);
		cvDilate(h_img, h_img, ellipse,1);

		/* Calculate moments to figure out if the object is present */
		cvMoments(h_img, &moments, 1);

		area = cvGetSpatialMoment(&moments, 0,0);
		m01 = cvGetSpatialMoment(&moments, 0,1);
		m10 = cvGetSpatialMoment(&moments, 1,0);

		if (area > 17000) {
			int x = m10/area;
			int y = m01/area;
			printf("x = %d, y = %d (area = %f)\n", x, y, area);
		}

		/* Draw contours */
		cvCanny(gray, gray, (double)t1, (double)t2, 3);
		//cvDilate(gray, gray, 0, 1);
		cvDilate(gray, gray, ellipse, 1);
		cvFindContours(gray, storage, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_NONE, cvPoint(0,0));
		cvDrawContours(color_img, contours, CV_RGB(254,0,0), CV_RGB(0,255,0), 10, 1, CV_AA, cvPoint(0,0));
		/* Display images */
		cvShowImage("hsv Video", h_img); // show frame
		cvShowImage("Contour Video", gray); // show frame
		cvShowImage("Webcam Video", color_img); // show frame

		c = cvWaitKey(KS_WAIT);                         // wait KS_WAIT ms or for key stroke
		if(c == 27)
			break;                                  // if ESC, break and quit
	}
	/* clean up */
	cvReleaseCapture( &cv_cap );
	cvDestroyWindow("Webcam Video");
	return;
fail:
	printf("capture from cam failed\n");

}
Example #23
0
bool E_LaughingMan::Init(ImgFile_Ptr pFile)
{
	m_faces = NULL;
	IImgLayer_weakPtr _sl = pFile->GetSelectLayer();
	if(!_sl.expired()){
		OutputError::Alert(IE_ERROR_SELECT_LAYER_DONT_EXIST);
		return false;
	}

	IImgLayer_Ptr sl = _sl.lock();
	if(sl->GetLayerType() != IE_LAYER_TYPE::NORMAL_LAYER){
		OutputError::Alert(IE_ERROR_CANT_EDIT_LAYER);
		return false;
	}
	m_pEditLayer = std::dynamic_pointer_cast<ImgLayer>(sl);

	RECT rect, mask_rect;
	m_pEditLayer->GetLayerRect(&rect);

	//マスクの範囲取得
	if(pFile->GetMaskState() != IE_MASK_STATE_ID::INVALID){
		pFile->GetMaskRect(&mask_rect);
		//範囲を計算
		AndRect(&rect, &mask_rect, &rect);
	}

	CvSize sizeOfImage;
	sizeOfImage.width = rect.right - rect.left;
	sizeOfImage.height = rect.bottom - rect.top;

	m_pEditLayerHandle = (EditLayerHandle*)pFile->CreateImgFileHandle( IFH_EDIT_LAYER );
	m_pEditLayerHandle->SetHandleName( DISPLAY_NAME );
	pFile->DoImgFileHandle( m_pEditLayerHandle );

	m_pEditNode = m_pEditLayerHandle->CreateEditNode(&rect);
	m_pEditNode->blt_rect = rect;

	IplImage* bgraImg;
	IplImage* editImg;
	IplImage* grayImg;
	IplImage* editedImg;

	bgraImg = cvCreateImage(
		cvSize(rect.right - rect.left, rect.bottom - rect.top),
		IPL_DEPTH_8U,
		4);
	editImg = cvCreateImage(
		cvSize(rect.right - rect.left, rect.bottom - rect.top),
		IPL_DEPTH_8U,
		3);
	grayImg = cvCreateImage(
		cvSize(rect.right - rect.left, rect.bottom - rect.top),
		IPL_DEPTH_8U,
		1);
	editedImg = cvCreateImage(
		cvSize(rect.right - rect.left, rect.bottom - rect.top),
		IPL_DEPTH_8U,
		3);

	//レイヤーから有効範囲部分のデータを取得
	m_pEditLayer->CopyLayerImage(
		bgraImg,
		0,
		0,
		rect.left,
		rect.top,
		rect.right - rect.left,
		rect.bottom - rect.top);

	ucvCvtColor(bgraImg, editImg, CV_BGRA2BGR);
	cvCvtColor(editImg, grayImg, CV_BGRA2GRAY);
	cvEqualizeHist(grayImg, grayImg);

	m_cascade = (CvHaarClassifierCascade*)cvLoad(CASCADE_FILE_PATH, 0, 0, 0);
	if (m_cascade == NULL) {
		return false;
	}

	m_storage = cvCreateMemStorage(0);;
	cvClearMemStorage(m_storage);

    m_faces = cvHaarDetectObjects(grayImg, m_cascade, m_storage,
                                       1.1, 2, 0,//CV_HAAR_DO_CANNY_PRUNING,
                                       cvSize(30, 30) );


	cvReleaseImage(&bgraImg);
	cvReleaseImage(&editImg);
    cvReleaseImage(&grayImg);
    cvReleaseImage(&editedImg);

	return true;
}
Example #24
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;          
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }
        
        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );
        
        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
    
    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
    
    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);
    
    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
int main(int argc, char** argv)
{
  std::cout << "Using OpenCV " << CV_MAJOR_VERSION << "." << CV_MINOR_VERSION << "." << CV_SUBMINOR_VERSION << std::endl;
  
  CvCapture *capture;
  IplImage  *frame;
  int input_resize_percent = 100;
  
  if(argc < 3)
  {
    std::cout << "Usage " << argv[0] << " cascade.xml /home/msit/dataCollection/sftp://[email protected]/home/msit/dataCollection/slavePi2_RW1600_RH1200_TT60_FR15_06_03_2016_17_01_45_618527.h264" << std::endl;
    return 0;
  }

  if(argc == 4)
  {
    input_resize_percent = atoi(argv[3]);
    std::cout << "Resizing to: " << input_resize_percent << "%" << std::endl;
  }

  cascade = (CvHaarClassifierCascade*) cvLoad(argv[1], 0, 0, 0);
  storage = cvCreateMemStorage(0);
  capture = cvCaptureFromAVI(argv[2]);

  assert(cascade && storage && capture);

  cvNamedWindow("video", 1);

  IplImage* frame1 = cvQueryFrame(capture);
  frame = cvCreateImage(cvSize((int)((frame1->width*input_resize_percent)/100) , (int)((frame1->height*input_resize_percent)/100)), frame1->depth, frame1->nChannels);

  int key = 0;
  do
  {
    frame1 = cvQueryFrame(capture);

    if(!frame1)
      break;

    cvResize(frame1, frame);

    detect(frame);

    key = cvWaitKey(33);

    if(key == KEY_SPACE)
      key = cvWaitKey(0);

    if(key == KEY_ESC)
      break;

  }while(1);

  cvDestroyAllWindows();
  cvReleaseImage(&frame);
  cvReleaseCapture(&capture);
  cvReleaseHaarClassifierCascade(&cascade);
  cvReleaseMemStorage(&storage);

  return 0;
}
Example #26
0
File: AAM_IC.cpp Project: kod3r/cv
//============================================================================
void AAM_IC::Train(const file_lists& pts_files, 
				   const file_lists& img_files, 
				   double scale /* = 1.0 */, 
				   double shape_percentage /* = 0.975 */, 
				   double texture_percentage /* = 0.975 */)
{
	if(pts_files.size() != img_files.size())
	{
		fprintf(stderr, "ERROE(%s, %d): #Shapes != #Images\n",
			__FILE__, __LINE__);
		exit(0);
	}

	printf("################################################\n");
	printf("Build Inverse Compositional Image Alignmennt Model...\n");

	std::vector<AAM_Shape> AllShapes;
	for(int ii = 0; ii < pts_files.size(); ii++)
	{
		AAM_Shape Shape;
		bool flag = Shape.ReadAnnotations(pts_files[ii]);
		if(!flag)
		{
			IplImage* image = cvLoadImage(img_files[ii].c_str(), -1);
			Shape.ScaleXY(image->width, image->height);
			cvReleaseImage(&image);
		}
		AllShapes.push_back(Shape);
	}

	//building shape and texture distribution model
	printf("Build point distribution model...\n");
	__shape.Train(AllShapes, scale, shape_percentage);
	
	printf("Build warp information of mean shape mesh...");
	__Points = cvCreateMat (1, __shape.nPoints(), CV_32FC2);
	__Storage = cvCreateMemStorage(0);
	__paw.Train(__shape.GetMeanShape(), __Points, __Storage);
	printf("[%d by %d, triangles #%d, pixels #%d*3]\n",
		__paw.Width(), __paw.Height(), __paw.nTri(), __paw.nPix());

	printf("Build texture distribution model...\n");
	__texture.Train(pts_files, img_files, __paw, texture_percentage, true);

	//calculate gradient of texture
	printf("Calculating texture gradient...\n");
	CvMat* dTx = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
	CvMat* dTy = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
	CalcTexGrad(__texture.GetMean(), dTx, dTy);
	
	// save gradient image
	//mkdir("Modes");
	__paw.SaveWarpTextureToImage("Modes/dTx.jpg", dTx);
	__paw.SaveWarpTextureToImage("Modes/dTy.jpg", dTy);
	
	//calculate warp Jacobian at base shape
	printf("Calculating warp Jacobian...\n");
	CvMat* Jx = cvCreateMat(__paw.nPix(), __shape.nModes()+4, CV_64FC1);
	CvMat* Jy = cvCreateMat(__paw.nPix(), __shape.nModes()+4, CV_64FC1);
	CalcWarpJacobian(Jx,Jy);
	
	//calculate modified steepest descent image
	printf("Calculating steepest descent images...\n");
	CvMat* SD = cvCreateMat(__shape.nModes()+4, __texture.nPixels(), CV_64FC1);
	CalcModifiedSD(SD, dTx, dTy, Jx, Jy);

	//calculate inverse Hessian matrix
	printf("Calculating Hessian inverse matrix...\n");
	CvMat* H = cvCreateMat(__shape.nModes()+4, __shape.nModes()+4, CV_64FC1);
	CalcHessian(H, SD);

	//calculate update matrix (multiply inverse Hessian by modified steepest descent image)
	__G = cvCreateMat(__shape.nModes()+4, __texture.nPixels(), CV_64FC1);
	cvMatMul(H, SD, __G);

	//release
	cvReleaseMat(&Jx);
	cvReleaseMat(&Jy);
	cvReleaseMat(&dTx);
	cvReleaseMat(&dTy);
	cvReleaseMat(&SD);
	cvReleaseMat(&H);

	//alocate memory for on-line fitting stuff
	__update_s0 = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
	__inv_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
	__warp_t = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
	__error_t = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
	__search_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
	__delta_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
	__current_s = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
	__update_s = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
	__lamda  = cvCreateMat(1, __texture.nModes(), CV_64FC1);

	printf("################################################\n\n");
}
int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing, CvMat** responses )
{
    const int M = 1024;
    FILE* f = fopen( filename, "rt" );
    CvMemStorage* storage;
    CvSeq* seq;
    char buf[M+2], *ptr;
    float* el_ptr;
    CvSeqReader reader;
    int i, j, var_count = 0;

    if( !f )
        return 0;

    // read the first line and determine the number of variables
    if( !fgets( buf, M, f ))
    {
        fclose(f);
        return 0;
    }

    for( ptr = buf; *ptr != '\0'; ptr++ )
        var_count += *ptr == ',';
    assert( ptr - buf == (var_count+1)*2 );

    // create temporary memory storage to store the whole database
    el_ptr = new float[var_count+1];
    storage = cvCreateMemStorage();
    seq = cvCreateSeq( 0, sizeof(*seq), (var_count+1)*sizeof(float), storage );

    for(;;)
    {
        for( i = 0; i <= var_count; i++ )
        {
            int c = buf[i*2];
            el_ptr[i] = c == '?' ? -1.f : (float)c;
        }
        if( i != var_count+1 )
            break;
        cvSeqPush( seq, el_ptr );
        if( !fgets( buf, M, f ) || !strchr( buf, ',' ) )
            break;
    }
    fclose(f);

    // allocate the output matrices and copy the base there
    *data = cvCreateMat( seq->total, var_count, CV_32F );
    *missing = cvCreateMat( seq->total, var_count, CV_8U );
    *responses = cvCreateMat( seq->total, 1, CV_32F );

    cvStartReadSeq( seq, &reader );

    for( i = 0; i < seq->total; i++ )
    {
        const float* sdata = (float*)reader.ptr + 1;
        float* ddata = data[0]->data.fl + var_count*i;
        float* dr = responses[0]->data.fl + i;
        uchar* dm = missing[0]->data.ptr + var_count*i;

        for( j = 0; j < var_count; j++ )
        {
            ddata[j] = sdata[j];
            dm[j] = sdata[j] < 0;
        }
        *dr = sdata[-1];
        CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
    }

    cvReleaseMemStorage( &storage );
    delete el_ptr;
    return 1;
}
//The function detects the hand from input frame and draws a rectangle around the detected portion of the frame
void detect_and_draw( IplImage* img )
{

    // Create memory for calculations
    static CvMemStorage* storage = 0;

    // Create a new Haar classifier
    static CvHaarClassifierCascade* cascade = 0;

    // Sets the scale with which the rectangle is drawn with
    int scale = 1;

    // Create two points to represent the hand locations
    CvPoint pt1, pt2;

    // Looping variable
    int i; 

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return;
    }
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);

    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    // Find whether the cascade is loaded, to find the hands. If yes, then:
    if( cascade )
    {

        // There can be more than one hand in an image. So create a growable sequence of hands.
        // Detect the objects and store them in the sequence
        CvSeq* hands = cvHaarDetectObjects( img, cascade, storage,
                                            1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );

        // Loop the number of hands found.
        for( i = 0; i < (hands ? hands->total : 0); i++ )
        {
           // Create a new rectangle for drawing the hand
            CvRect* r = (CvRect*)cvGetSeqElem( hands, i );

            // Find the dimensions of the hand,and scale it if necessary
            pt1.x = r->x*scale;
            pt2.x = (r->x+r->width)*scale;
            pt1.y = r->y*scale;
            pt2.y = (r->y+r->height)*scale;

            // Draw the rectangle in the input image
            cvRectangle( img, pt1, pt2, CV_RGB(230,20,232), 3, 8, 0 );
        }
    }

    // Show the image in the window named "result"
    cvShowImage( "result", img );

   
}
static int aGestureRecognition(void)
{       
    IplImage *image, *imagew, *image_rez, *mask_rez, *image_hsv, *img_p[2],*img_v,
             *init_mask_ver = 0, *final_mask_ver = 0;
    CvPoint3D32f *pp, p;

    CvPoint pt;
    CvSize2D32f fsize;
    CvPoint3D32f center, cf;
    IplImage *image_mask, *image_maskw;
    
    CvSize size;
    CvHistogram *hist, *hist_mask;

    int width, height;
    int k_points, k_indexs;
    int warpFlag, interpolate;

    int hdim[2] = {20, 20};
    
    double coeffs[3][3], rect[2][2], rez = 0, eps_rez = 2.5, rez_h;
    float *thresh[2];
    float hv[3];
    
    float reps, aeps, ww;
    float line[6], in[3][3], h[3][3];
    float cx, cy, fx, fy;

    static char num[4]; 
    
    char *name_image;  
    char *name_range_image;
    char *name_verify_data;
    char *name_init_mask_very;
    char *name_final_mask_very;

    CvSeq *numbers;
    CvSeq *points;
    CvSeq *indexs;
        
    CvMemStorage *storage;
    CvRect hand_roi, hand_roi_trans;
    
    int i,j, lsize, block_size = 1000, flag;
    int code;

    FILE *filin, *fil_ver;

/* read tests params */

    code = TRS_OK;

/*  define input information    */
    strcpy (num, "001"); 

    lsize = strlen(data_path)+12;
    name_verify_data = (char*)trsmAlloc(lsize);
    name_range_image = (char*)trsmAlloc(lsize);
    name_image = (char*)trsmAlloc(lsize);

    name_init_mask_very = (char*)trsmAlloc(lsize);
    name_final_mask_very = (char*)trsmAlloc(lsize);

/*  define input range_image file path        */
    strcpy(name_range_image, data_path);
    strcat(name_range_image, "rpts");
    strcat(name_range_image, num);
    strcat(name_range_image, ".txt");

/*  define input image file path        */
    strcpy(name_image, data_path);
    strcat(name_image, "real");
    strcat(name_image, num);
    strcat(name_image, ".bmp");

/*  define verify data file path        */
    strcpy(name_verify_data, data_path);
    strcat(name_verify_data, "very");
    strcat(name_verify_data, num);
    strcat(name_verify_data, ".txt");

/*  define verify init mask file path    */
    strcpy(name_init_mask_very, data_path);
    strcat(name_init_mask_very, "imas");
    strcat(name_init_mask_very, num);
    strcat(name_init_mask_very, ".bmp");

/*  define verify final mask file path    */
    strcpy(name_final_mask_very, data_path);
    strcat(name_final_mask_very, "fmas");
    strcat(name_final_mask_very, num);
    strcat(name_final_mask_very, ".bmp");

    filin = fopen(name_range_image,"r");
    fil_ver = fopen(name_verify_data,"r");

    fscanf( filin, "\n%d %d\n", &width, &height);
    printf("width=%d height=%d  reading testing data...", width,height);

    OPENCV_CALL( storage = cvCreateMemStorage ( block_size ) );
    OPENCV_CALL( points = cvCreateSeq( CV_SEQ_POINT3D_SET, sizeof(CvSeq),
                            sizeof(CvPoint3D32f), storage ) );
    OPENCV_CALL (indexs = cvCreateSeq( CV_SEQ_POINT_SET, sizeof(CvSeq),
                            sizeof(CvPoint), storage ) );

    pp = 0;
    
/*  read input image from file   */   
    image = atsCreateImageFromFile( name_image );
    if(image == NULL)   {code = TRS_FAIL; goto m_exit;}

/*  read input 3D points from input file        */
    for (i = 0; i < height; i++)
    {
        for (j = 0; j < width; j++)    
        {
            fscanf( filin, "%f %f %f\n", &p.x, &p.y, &p.z);
            if(/*p.x != 0 || p.y != 0 ||*/ p.z != 0)
            {
                OPENCV_CALL(cvSeqPush(points, &p));
                pt.x = j; pt.y = i;
                OPENCV_CALL(cvSeqPush(indexs, &pt));
                               
            }
        }
    }

    k_points = points->total;
    k_indexs = indexs->total;

/*   convert sequence to array          */
    pp = (CvPoint3D32f*)trsmAlloc(k_points * sizeof(CvPoint3D32f));
    OPENCV_CALL(cvCvtSeqToArray(points, pp ));

/*  find 3D-line      */

    reps = (float)0.1;
    aeps = (float)0.1;
    ww = (float)0.08;

    OPENCV_CALL( cvFitLine3D(pp, k_points, CV_DIST_WELSCH, &ww, reps, aeps, line ));

/*  find hand location      */
    flag = -1;
    fsize.width = fsize.height = (float)0.22;  //   (hand size in m)

    numbers = NULL;
    OPENCV_CALL( cvFindHandRegion (pp, k_points, indexs,line, fsize,
                      flag,&center,storage, &numbers));

/*   read verify data    */
    fscanf( fil_ver, "%f %f %f\n", &cf.x, &cf.y, &cf.z);
    rez+= cvSqrt((center.x - cf.x)*(center.x - cf.x)+(center.y - cf.y)*(center.y - cf.y)+
         (center.z - cf.z)*(center.z - cf.z))/3.;
    
/*    create hand mask            */
    size.height = height;
    size.width = width;
    OPENCV_CALL( image_mask = cvCreateImage(size, IPL_DEPTH_8U, 1) ); 

    OPENCV_CALL( cvCreateHandMask(numbers, image_mask, &hand_roi) );

/*  read verify initial image mask                  */
    init_mask_ver = atsCreateImageFromFile( name_init_mask_very );
    if(init_mask_ver == NULL)   {code = TRS_FAIL; goto m_exit;}
    
    rez+= iplNorm(init_mask_ver, image_mask, IPL_L2) / (width*height+0.);

/*  calculate homographic transformation matrix            */
    cx = (float)(width / 2.);
    cy = (float)(height / 2.);
    fx = fy = (float)571.2048;

/* define intrinsic camera parameters                      */
    in[0][1] = in[1][0] = in[2][0] = in[2][1] = 0;
    in[0][0] = fx; in[0][2] = cx;
    in[1][1] = fy; in[1][2] = cy;
    in[2][2] = 1;

    OPENCV_CALL( cvCalcImageHomography(line, &center, in, h) );
    
    rez_h = 0;
    for(i=0;i<3;i++)
    {
        fscanf( fil_ver, "%f %f %f\n", &hv[0], &hv[1], &hv[2]);
        for(j=0;j<3;j++)
        {
            rez_h+=(hv[j] - h[i][j])*(hv[j] - h[i][j]);
        }
    }
    rez+=sqrt(rez_h)/9.;

/*   image unwarping         */
    size.width = image->width; 
    size.height = image->height; 
    OPENCV_CALL( imagew = cvCreateImage(size, IPL_DEPTH_8U,3) );
    OPENCV_CALL( image_maskw = cvCreateImage(size, IPL_DEPTH_8U,1) );

    iplSet(image_maskw, 0);

    cvSetImageROI(image, hand_roi);
    cvSetImageROI(image_mask, hand_roi);

/* convert homographic transformation matrix from float to double      */
    for(i=0;i<3;i++)
        for(j=0;j<3;j++)
            coeffs[i][j] = (double)h[i][j];

/*  get bounding rectangle for image ROI         */
    iplGetPerspectiveBound(image, coeffs, rect);

    width = (int)(rect[1][0] - rect[0][0]);
    height = (int)(rect[1][1] - rect[0][1]);
    hand_roi_trans.x = (int)rect[0][0];hand_roi_trans.y = (int)rect[0][1];
    hand_roi_trans.width = width; hand_roi_trans.height = height;

    cvMaxRect(&hand_roi, &hand_roi_trans, &hand_roi);
    iplSetROI((IplROI*)image->roi, 0, hand_roi.x, hand_roi.y,
               hand_roi.width,hand_roi.height);
    iplSetROI((IplROI*)image_mask->roi, 0, hand_roi.x, hand_roi.y,
                hand_roi.width,hand_roi.height);

    warpFlag = IPL_WARP_R_TO_Q;
/*    interpolate = IPL_INTER_CUBIC;   */
/*    interpolate = IPL_INTER_NN;      */
    interpolate = IPL_INTER_LINEAR;
    iplWarpPerspective(image, imagew, coeffs, warpFlag, interpolate);
    iplWarpPerspective(image_mask, image_maskw, coeffs, warpFlag, IPL_INTER_NN);  
    
/*  set new image and mask ROI after transformation        */
    iplSetROI((IplROI*)imagew->roi,0, (int)rect[0][0], (int)rect[0][1],(int)width,(int)height);
    iplSetROI((IplROI*)image_maskw->roi,0, (int)rect[0][0], (int)rect[0][1],(int)width,(int)height);

/*  copy image ROI to new image and resize        */
    size.width = width; size.height = height;
    image_rez = cvCreateImage(size, IPL_DEPTH_8U,3);
    mask_rez = cvCreateImage(size, IPL_DEPTH_8U,1);
 
    iplCopy(imagew,image_rez);
    iplCopy(image_maskw,mask_rez);
    
/* convert rezult image from RGB to HSV               */
    image_hsv = iplCreateImageHeader(3, 0, IPL_DEPTH_8U, "HSV", "HSV",
                                   IPL_DATA_ORDER_PIXEL, IPL_ORIGIN_TL,IPL_ALIGN_DWORD,
                                   image_rez->width, image_rez->height, NULL, NULL, NULL, NULL);
    iplAllocateImage(image_hsv, 0, 0 ); 
    strcpy(image_rez->colorModel, "RGB");
    strcpy(image_rez->channelSeq, "RGB");
    image_rez->roi = NULL;

    iplRGB2HSV(image_rez, image_hsv);

/* convert to three images planes                      */
    img_p[0] = cvCreateImage(size, IPL_DEPTH_8U,1);
    img_p[1] = cvCreateImage(size, IPL_DEPTH_8U,1);
    img_v = cvCreateImage(size, IPL_DEPTH_8U,1);

    cvCvtPixToPlane(image_hsv, img_p[0], img_p[1], img_v, NULL);
   
/*  calculate histograms                */
    hist = cvCreateHist ( 2, hdim, CV_HIST_ARRAY);
    hist_mask = cvCreateHist ( 2, hdim, CV_HIST_ARRAY);

/*  install histogram threshold         */
    thresh[0] = (float*) trsmAlloc(2*sizeof(float));
    thresh[1] = (float*) trsmAlloc(2*sizeof(float));

    thresh[0][0] = thresh[1][0] = -0.5;
    thresh[0][1] = thresh[1][1] = 255.5;
    cvSetHistThresh( hist, thresh, 1);
    cvSetHistThresh( hist_mask, thresh, 1);

    cvCalcHist(img_p, hist, 0);
        
    cvCalcHistMask(img_p, mask_rez, hist_mask, 0);
            
    cvCalcProbDensity(hist, hist_mask, hist_mask);

    cvCalcBackProject( img_p, mask_rez, hist_mask ); 

/*  read verify final image mask                  */
    final_mask_ver = atsCreateImageFromFile( name_final_mask_very );
    if(final_mask_ver == NULL)   {code = TRS_FAIL; goto m_exit;}

    rez+= iplNorm(final_mask_ver, mask_rez, IPL_L2) / (width*height+0.);

    trsWrite( ATS_CON | ATS_SUM, "\n gesture recognition \n");
    trsWrite( ATS_CON | ATS_SUM, "result testing error = %f \n",rez);

    if(rez > eps_rez) code = TRS_FAIL;
    else code = TRS_OK;
    
m_exit:    

    cvReleaseImage(&image_mask);
    cvReleaseImage(&mask_rez);
    cvReleaseImage(&image_rez);
    atsReleaseImage(final_mask_ver);
    atsReleaseImage(init_mask_ver);

    cvReleaseImage(&imagew);
    cvReleaseImage(&image_maskw); 

    cvReleaseImage(&img_p[0]);
    cvReleaseImage(&img_p[1]);
    cvReleaseImage(&img_v);
 
    cvReleaseHist( &hist);
    cvReleaseHist( &hist_mask);
    
    cvReleaseMemStorage ( &storage );

    trsFree(pp);
    trsFree(name_final_mask_very);
    trsFree(name_init_mask_very);
    trsFree(name_image);
    trsFree(name_range_image);
    trsFree(name_verify_data);

    fclose(filin);
    fclose(fil_ver);

    
/*    _getch();       */
    return code;
}
Example #30
0
int main( int argc, char** argv )
{
	CvMemStorage* storage = 0;
	CvHaarClassifierCascade* cascade = 0;
    CvCapture* capture = 0;
	IplImage *frame, *frame_copy = 0;
	char* cascade_name = "E:\\opencv\\data\\haarcascades\\haarcascade_frontalface_alt2.xml";
 
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );

	//delete[] cascade_name;
	cascade_name = NULL;
 
    if( !cascade )
    {
        return -1;
    }

    storage = cvCreateMemStorage(0);
	capture = cvCaptureFromCAM( 0 );

    cvNamedWindow( "result", 1 );
 
    if( capture )
    {
        for(;;)
        {
            if( !cvGrabFrame( capture ))
                break;

            frame = cvRetrieveFrame( capture );

            if( !frame )
                break;
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            else
                cvFlip( frame, frame_copy, 0 );

            detect( frame_copy, storage, cascade);
 
            if( cvWaitKey( 10 ) >= 0 )
                break;
        }

		cvReleaseHaarClassifierCascade(&cascade);
        cvReleaseImage( &frame_copy );
		cvReleaseMemStorage( &storage );
        cvReleaseCapture( &capture );

    }
    else
    {
        fprintf( stderr, "ERROR: Could not find a camera!\n" );
 
    }
 
    cvDestroyWindow("result");
 
    return 0;
}