Пример #1
0
LLSD LLModel::Decomposition::asLLSD() const
{
	LLSD ret;
	
	if (mBaseHull.empty() && mHull.empty())
	{ //nothing to write
		return ret;
	}

	//write decomposition block
	// ["physics_convex"]["HullList"] -- list of 8 bit integers, each entry represents a hull with specified number of points
	// ["physics_convex"]["Position"] -- list of 16-bit integers to be decoded to given domain, encoded 3D points
	// ["physics_convex"]["BoundingVerts"] -- list of 16-bit integers to be decoded to given domain, encoded 3D points representing a single hull approximation of given shape
	
	//get minimum and maximum
	LLVector3 min;
	
	if (mHull.empty())
	{  
		min = mBaseHull[0];
	}
	else
	{
		min = mHull[0][0];
	}

	LLVector3 max = min;

	LLSD::Binary hulls(mHull.size());

	U32 total = 0;

	for (U32 i = 0; i < mHull.size(); ++i)
	{
		U32 size = mHull[i].size();
		total += size;
		hulls[i] = (U8) (size);

		for (U32 j = 0; j < mHull[i].size(); ++j)
		{
			update_min_max(min, max, mHull[i][j]);
		}
	}

	for (U32 i = 0; i < mBaseHull.size(); ++i)
	{
		update_min_max(min, max, mBaseHull[i]);	
	}

	ret["Min"] = min.getValue();
	ret["Max"] = max.getValue();

	LLVector3 range = max-min;

	if (!hulls.empty())
	{
		ret["HullList"] = hulls;
	}

	if (total > 0)
	{
		LLSD::Binary p(total*3*2);

		U32 vert_idx = 0;
		
		for (U32 i = 0; i < mHull.size(); ++i)
		{
			std::set<U64> valid;

			llassert(!mHull[i].empty());

			for (U32 j = 0; j < mHull[i].size(); ++j)
			{
				U64 test = 0;
				const F32* src = mHull[i][j].mV;

				for (U32 k = 0; k < 3; k++)
				{
					//convert to 16-bit normalized across domain
					U16 val = (U16) (((src[k]-min.mV[k])/range.mV[k])*65535);

					if(valid.size() < 3)
					{
						switch (k)
						{
							case 0: test = test | (U64) val; break;
							case 1: test = test | ((U64) val << 16); break;
							case 2: test = test | ((U64) val << 32); break;
						};

						valid.insert(test);
					}
					
					U8* buff = (U8*) &val;
					//write to binary buffer
					p[vert_idx++] = buff[0];
					p[vert_idx++] = buff[1];

					//makes sure we haven't run off the end of the array
					llassert(vert_idx <= p.size());
				}
			}

			//must have at least 3 unique points
			llassert(valid.size() > 2);
		}

		ret["Positions"] = p;
	}

	//llassert(!mBaseHull.empty());

	if (!mBaseHull.empty())
	{
		LLSD::Binary p(mBaseHull.size()*3*2);

		U32 vert_idx = 0;
		for (U32 j = 0; j < mBaseHull.size(); ++j)
		{
			const F32* v = mBaseHull[j].mV;

			for (U32 k = 0; k < 3; k++)
			{
				//convert to 16-bit normalized across domain
				U16 val = (U16) (((v[k]-min.mV[k])/range.mV[k])*65535);

				U8* buff = (U8*) &val;
				//write to binary buffer
				p[vert_idx++] = buff[0];
				p[vert_idx++] = buff[1];

				if (vert_idx > p.size())
				{
					LL_ERRS() << "Index out of bounds" << LL_ENDL;
				}
			}
		}
		
		ret["BoundingVerts"] = p;
	}

	return ret;
}
Пример #2
0
void RecognitionDemos( Mat& full_image, Mat& template1, Mat& template2, Mat& template1locations, Mat& template2locations, VideoCapture& bicycle_video, Mat& bicycle_background, Mat& bicycle_model, VideoCapture& people_video, CascadeClassifier& cascade, Mat& numbers, Mat& good_orings, Mat& bad_orings, Mat& unknown_orings )
{
	Timestamper* timer = new Timestamper();

	// Principal Components Analysis
	PCASimpleExample();
    char ch = cvWaitKey();
	cvDestroyAllWindows();

	PCAFaceRecognition();
    ch = cvWaitKey();
	cvDestroyAllWindows();

	// Statistical Pattern Recognition
	Mat gray_numbers,binary_numbers;
	cvtColor(numbers, gray_numbers, CV_BGR2GRAY);
	threshold(gray_numbers,binary_numbers,128,255,THRESH_BINARY_INV);
    vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(binary_numbers,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
	Mat contours_image = Mat::zeros(binary_numbers.size(), CV_8UC3);
	contours_image = Scalar(255,255,255);
	// Do some processing on all contours (objects and holes!)
	vector<RotatedRect> min_bounding_rectangle(contours.size());
	vector<vector<Point>> hulls(contours.size());
	vector<vector<int>> hull_indices(contours.size());
	vector<vector<Vec4i>> convexity_defects(contours.size());
	vector<Moments> contour_moments(contours.size());
	for (int contour_number=0; (contour_number<(int)contours.size()); contour_number++)
	{
		if (contours[contour_number].size() > 10)
		{
			min_bounding_rectangle[contour_number] = minAreaRect(contours[contour_number]);
			convexHull(contours[contour_number], hulls[contour_number]);
			convexHull(contours[contour_number], hull_indices[contour_number]);
			convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
			contour_moments[contour_number] = moments( contours[contour_number] );
		}
	}
	for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
	{
		if (contours[contour_number].size() > 10)
		{
        Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
        drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
		char output[500];
		double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
		// Process any holes (removing the area from the are of the enclosing contour)
		for (int hole_number=hierarchy[contour_number][2]; (hole_number>=0); hole_number=hierarchy[hole_number][0])
		{
			area -= (contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
 			drawContours( contours_image, contours, hole_number, colour, CV_FILLED, 8, hierarchy );
			sprintf(output,"Area=%.0f", contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Point location( contours[hole_number][0].x +20, contours[hole_number][0].y +5 );
			putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
		// Draw the minimum bounding rectangle
		Point2f bounding_rect_points[4];
		min_bounding_rectangle[contour_number].points(bounding_rect_points);
		line( contours_image, bounding_rect_points[0], bounding_rect_points[1], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[1], bounding_rect_points[2], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[2], bounding_rect_points[3], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[3], bounding_rect_points[0], Scalar(0, 0, 127));
		float bounding_rectangle_area = min_bounding_rectangle[contour_number].size.area();
		// Draw the convex hull
        drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
		// Highlight any convexities
		int largest_convexity_depth=0;
		for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
		{
			if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
				largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
			if (convexity_defects[contour_number][convexity_index][3] > 256*2)
			{
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
			}
		}
		double hu_moments[7];
		HuMoments( contour_moments[contour_number], hu_moments );
		sprintf(output,"Perimeter=%d, Area=%.0f, BArea=%.0f, CArea=%.0f", contours[contour_number].size(),area,min_bounding_rectangle[contour_number].size.area(),contourArea(hulls[contour_number]));
		Point location( contours[contour_number][0].x, contours[contour_number][0].y-3 );
		putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		sprintf(output,"HuMoments = %.2f, %.2f, %.2f", hu_moments[0],hu_moments[1],hu_moments[2]);
		Point location2( contours[contour_number][0].x+100, contours[contour_number][0].y-3+15 );
		putText( contours_image, output, location2, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
	}
	imshow("Shape Statistics", contours_image );
	char c = cvWaitKey();
	cvDestroyAllWindows();

	// Support Vector Machine
	imshow("Good - original",good_orings);
	imshow("Defective - original",bad_orings);
	imshow("Unknown - original",unknown_orings);
	SupportVectorMachineDemo(good_orings,"Good",bad_orings,"Defective",unknown_orings);
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Template Matching
	Mat display_image, correlation_image;
	full_image.copyTo( display_image );
	double min_correlation, max_correlation;
	Mat matched_template_map;
	int result_columns =  full_image.cols - template1.cols + 1;
	int result_rows = full_image.rows - template1.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->reset();
	double before_tick_count = static_cast<double>(getTickCount());
	matchTemplate( full_image, template1, correlation_image, CV_TM_CCORR_NORMED );
	double after_tick_count = static_cast<double>(getTickCount());
	double duration_in_ms = 1000.0*(after_tick_count-before_tick_count)/getTickFrequency();
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (1)");
	Mat matched_template_display1;
	cvtColor(matched_template_map, matched_template_display1, CV_GRAY2BGR);
	Mat correlation_window1 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template1, Scalar(0,0,255) );
	double precision, recall, accuracy, specificity, f1;
	Mat template1locations_gray;
	cvtColor(template1locations, template1locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template1locations_gray, precision, recall, accuracy, specificity, f1 );
	char results[400];
	Scalar colour( 255, 255, 255);
	sprintf( results, "precision=%.2f", precision);
	Point location( 7, 213 );
	putText( display_image, "Results (1)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
  
	result_columns =  full_image.cols - template2.cols + 1;
	result_rows = full_image.rows - template2.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->ignoreTimeSinceLastRecorded();
	matchTemplate( full_image, template2, correlation_image, CV_TM_CCORR_NORMED );
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (2)");
	Mat matched_template_display2;
	cvtColor(matched_template_map, matched_template_display2, CV_GRAY2BGR);
	Mat correlation_window2 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template2, Scalar(0,0,255) );
	timer->putTimes(display_image);
	Mat template2locations_gray;
	cvtColor(template2locations, template2locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template2locations_gray, precision, recall, accuracy, specificity, f1 );
	sprintf( results, "precision=%.2f", precision);
	location.x = 123;
	location.y = 213;
	putText( display_image, "Results (2)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	Mat correlation_display1, correlation_display2;
	cvtColor(correlation_window1, correlation_display1, CV_GRAY2BGR);
	cvtColor(correlation_window2, correlation_display2, CV_GRAY2BGR);

	Mat output1 = JoinImagesVertically(template1,"Template (1)",correlation_display1,"Correlation (1)",4);
	Mat output2 = JoinImagesVertically(output1,"",matched_template_display1,"Local maxima (1)",4);
	Mat output3 = JoinImagesVertically(template2,"Template (2)",correlation_display2,"Correlation (2)",4);
	Mat output4 = JoinImagesVertically(output3,"",matched_template_display2,"Local maxima (2)",4);
	Mat output5 = JoinImagesHorizontally( full_image, "Original Image", output2, "", 4 );
	Mat output6 = JoinImagesHorizontally( output5, "", output4, "", 4 );
	Mat output7 = JoinImagesHorizontally( output6, "", display_image, "", 4 );
	imshow( "Template matching result", output7 );
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Chamfer Matching
    Mat model_gray,model_edges,model_edges2;
	cvtColor(bicycle_model, model_gray, CV_BGR2GRAY);
	threshold(model_gray,model_edges,127,255,THRESH_BINARY);
	Mat current_frame;
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,400);  // Just in case the video has already been used.
	bicycle_video >> current_frame;
	bicycle_background = current_frame.clone();
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,500); 
	timer->reset();
	int count = 0;
	while (!current_frame.empty() && (count < 8))
    {
		Mat result_image = current_frame.clone();
		count++;
		Mat difference_frame, difference_gray, current_edges;
		absdiff(current_frame,bicycle_background,difference_frame);
		cvtColor(difference_frame, difference_gray, CV_BGR2GRAY);
		Canny(difference_frame, current_edges, 100, 200, 3);

		vector<vector<Point> > results;
		vector<float> costs;
		threshold(model_gray,model_edges,127,255,THRESH_BINARY);
		Mat matching_image, chamfer_image, local_minima;
		timer->ignoreTimeSinceLastRecorded();
		threshold(current_edges,current_edges,127,255,THRESH_BINARY_INV);
		distanceTransform( current_edges, chamfer_image, CV_DIST_L2 , 3);
		timer->recordTime("Chamfer Image");
		ChamferMatching( chamfer_image, model_edges, matching_image );
		timer->recordTime("Matching");
		FindLocalMinima( matching_image, local_minima, 500.0 );
		timer->recordTime("Find Minima");
		DrawMatchingTemplateRectangles( result_image, local_minima, model_edges, Scalar( 255, 0, 0 ) );
		Mat chamfer_display_image = convert_32bit_image_for_display( chamfer_image );
		Mat matching_display_image = convert_32bit_image_for_display( matching_image );
		//timer->putTimes(result_image);
		Mat current_edges_display, local_minima_display, model_edges_display, colour_matching_display_image, colour_chamfer_display_image;
		cvtColor(current_edges, current_edges_display, CV_GRAY2BGR);
		cvtColor(local_minima, local_minima_display, CV_GRAY2BGR);
		cvtColor(model_edges, model_edges_display, CV_GRAY2BGR);
		cvtColor(matching_display_image, colour_matching_display_image, CV_GRAY2BGR);
		cvtColor(chamfer_display_image, colour_chamfer_display_image, CV_GRAY2BGR);

		Mat output1 = JoinImagesVertically(current_frame,"Video Input",current_edges_display,"Edges from difference", 4);
		Mat output2 = JoinImagesVertically(output1,"",model_edges_display,"Model", 4);
		Mat output3 = JoinImagesVertically(bicycle_background,"Static Background",colour_chamfer_display_image,"Chamfer image", 4);
		Mat output4 = JoinImagesVertically(output3,"",colour_matching_display_image,"Degree of fit", 4);
		Mat output5 = JoinImagesVertically(difference_frame,"Difference",result_image,"Result", 4);
		Mat output6 = JoinImagesVertically(output5,"",local_minima_display,"Local minima", 4);
		Mat output7 = JoinImagesHorizontally( output2, "", output4, "", 4 );
		Mat output8 = JoinImagesHorizontally( output7, "", output6, "", 4 );
		imshow("Chamfer matching", output8);
		c = waitKey(1000);  // This makes the image appear on screen
		bicycle_video >> current_frame;
	}
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Cascade of Haar classifiers (most often shown for face detection).
    VideoCapture camera;
	camera.open(1);
	camera.set(CV_CAP_PROP_FRAME_WIDTH, 320);
	camera.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
    if( camera.isOpened() )
	{
		timer->reset();
		Mat current_frame;
		do {
			camera >> current_frame;
			if( current_frame.empty() )
				break;
			vector<Rect> faces;
			timer->ignoreTimeSinceLastRecorded();
			Mat gray;
			cvtColor( current_frame, gray, CV_BGR2GRAY );
			equalizeHist( gray, gray );
			cascade.detectMultiScale( gray, faces, 1.1, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
			timer->recordTime("Haar Classifier");
			for( int count = 0; count < (int)faces.size(); count++ )
				rectangle(current_frame, faces[count], cv::Scalar(255,0,0), 2);
			//timer->putTimes(current_frame);
			imshow( "Cascade of Haar Classifiers", current_frame );
			c = waitKey(10);  // This makes the image appear on screen
        } while (c == -1);
	}
Mat CameraInteraction::Testmm(Mat frame){




            vector<vector<Point> > contours;


            //Update the current background model and get the foreground
            if(backgroundFrame>0)
            {bg.operator ()(frame,fore);backgroundFrame--;}
            else
            {bg.operator()(frame,fore,0);}

            //Get background image to display it
            bg.getBackgroundImage(back);


            //Enhance edges in the foreground by applying erosion and dilation
            erode(fore,fore,Mat());
            dilate(fore,fore,Mat());


            //Find the contours in the foreground
            findContours(fore,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
            for(int i=0;i<contours.size();i++)
                //Ignore all small insignificant areas
                if(contourArea(contours[i])>=5000)
                {
                    //Draw contour
                    vector<vector<Point> > tcontours;
                    tcontours.push_back(contours[i]);
                    drawContours(frame,tcontours,-1,cv::Scalar(0,0,255),2);

                    //Detect Hull in current contour
                    vector<vector<Point> > hulls(1);
                    vector<vector<int> > hullsI(1);
                    convexHull(Mat(tcontours[0]),hulls[0],false);
                    convexHull(Mat(tcontours[0]),hullsI[0],false);
                    drawContours(frame,hulls,-1,cv::Scalar(0,255,0),2);

                    //Find minimum area rectangle to enclose hand
                    RotatedRect rect=minAreaRect(Mat(tcontours[0]));

                    //Find Convex Defects
                    vector<Vec4i> defects;
                    if(hullsI[0].size()>0)
                    {
                        Point2f rect_points[4]; rect.points( rect_points );
                        for( int j = 0; j < 4; j++ )
                            line( frame, rect_points[j], rect_points[(j+1)%4], Scalar(255,0,0), 1, 8 );
                        Point rough_palm_center;
                        convexityDefects(tcontours[0], hullsI[0], defects);
                        if(defects.size()>=3)
                        {
                            vector<Point> palm_points;
                            for(int j=0;j<defects.size();j++)
                            {
                                int startidx=defects[j][0]; Point ptStart( tcontours[0][startidx] );
                                int endidx=defects[j][1]; Point ptEnd( tcontours[0][endidx] );
                                int faridx=defects[j][2]; Point ptFar( tcontours[0][faridx] );
                                //Sum up all the hull and defect points to compute average
                                rough_palm_center+=ptFar+ptStart+ptEnd;
                                palm_points.push_back(ptFar);
                                palm_points.push_back(ptStart);
                                palm_points.push_back(ptEnd);
                            }

                            //Get palm center by 1st getting the average of all defect points, this is the rough palm center,
                            //Then U chose the closest 3 points ang get the circle radius and center formed from them which is the palm center.
                            rough_palm_center.x/=defects.size()*3;
                            rough_palm_center.y/=defects.size()*3;
                            Point closest_pt=palm_points[0];
                            vector<pair<double,int> > distvec;
                            for(int i=0;i<palm_points.size();i++)
                                distvec.push_back(make_pair(dist(rough_palm_center,palm_points[i]),i));
                            sort(distvec.begin(),distvec.end());

                            //Keep choosing 3 points till you find a circle with a valid radius
                            //As there is a high chance that the closes points might be in a linear line or too close that it forms a very large circle
                            pair<Point,double> soln_circle;
                            for(int i=0;i+2<distvec.size();i++)
                            {
                                Point p1=palm_points[distvec[i+0].second];
                                Point p2=palm_points[distvec[i+1].second];
                                Point p3=palm_points[distvec[i+2].second];
                                soln_circle=circleFromPoints(p1,p2,p3);//Final palm center,radius
                                if(soln_circle.second!=0)
                                    break;
                            }

                            //Find avg palm centers for the last few frames to stabilize its centers, also find the avg radius
                            palm_centers.push_back(soln_circle);
                            if(palm_centers.size()>10)
                                palm_centers.erase(palm_centers.begin());

                            Point palm_center;
                            double radius=0;
                            for(int i=0;i<palm_centers.size();i++)
                            {
                                palm_center+=palm_centers[i].first;
                                radius+=palm_centers[i].second;
                            }
                            palm_center.x/=palm_centers.size();
                            palm_center.y/=palm_centers.size();
                            radius/=palm_centers.size();

                            //Draw the palm center and the palm circle
                            //The size of the palm gives the depth of the hand
                            circle(frame,palm_center,5,Scalar(144,144,255),3);
                            circle(frame,palm_center,radius,Scalar(144,144,255),2);

                            //Detect fingers by finding points that form an almost isosceles triangle with certain thesholds
                            int no_of_fingers=0;
                            for(int j=0;j<defects.size();j++)
                            {
                                int startidx=defects[j][0]; Point ptStart( tcontours[0][startidx] );
                                int endidx=defects[j][1]; Point ptEnd( tcontours[0][endidx] );
                                int faridx=defects[j][2]; Point ptFar( tcontours[0][faridx] );
                                //X o--------------------------o Y
                                double Xdist=sqrt(dist(palm_center,ptFar));
                                double Ydist=sqrt(dist(palm_center,ptStart));
                                double length=sqrt(dist(ptFar,ptStart));

                                double retLength=sqrt(dist(ptEnd,ptFar));
                                //Play with these thresholds to improve performance
                                if(length<=3*radius&&Ydist>=0.4*radius&&length>=10&&retLength>=10&&max(length,retLength)/min(length,retLength)>=0.8)
                                    if(min(Xdist,Ydist)/max(Xdist,Ydist)<=0.8)
                                    {
                                        if((Xdist>=0.1*radius&&Xdist<=1.3*radius&&Xdist<Ydist)||(Ydist>=0.1*radius&&Ydist<=1.3*radius&&Xdist>Ydist))
                                            line( frame, ptEnd, ptFar, Scalar(0,255,0), 1 ),no_of_fingers++;
                                    }


                            }

                            no_of_fingers=min(5,no_of_fingers);
                            qDebug()<<"NO OF FINGERS: "<<no_of_fingers;
                            //mouseTo(palm_center.x,palm_center.y);//Move the cursor corresponding to the palm
                            if(no_of_fingers<4)//If no of fingers is <4 , click , else release
//                                mouseClick();
                                qDebug()<<"Test";
                            else
//                                mouseRelease();
                                qDebug()<<"Hola";

                        }
                    }

                }
            if(backgroundFrame>0)
                putText(frame, "Recording Background", cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
//            imshow("Framekj",frame);
//            imshow("Background",back);
return frame;

}
Пример #4
0
void SupportVectorMachineDemo(Mat& class1_samples, char* class1_name, Mat& class2_samples, char* class2_name, Mat& unknown_samples)
{
    float labels[MAX_SAMPLES];
    float training_data[MAX_SAMPLES][2];
	CvSVM SVM;

    // Image for visual representation of (2-D) feature space
    int width = MAX_FEATURE_VALUE+1, height = MAX_FEATURE_VALUE+1;
    Mat feature_space = Mat::zeros(height, width, CV_8UC3);

	int number_of_samples = 0;
	// Loops three times:
	//  1st time - extracts feature values for class 1
	//  2nd time - extracts feature values for class 2 AND trains SVM
	//  3rd time - extracts feature values for unknowns AND predicts their classes using SVM
	for (int current_class = 1; current_class<=UNKNOWN_CLASS; current_class++)
	{
		Mat gray_image,binary_image;
		if (current_class == 1)
			cvtColor(class1_samples, gray_image, CV_BGR2GRAY);
		else if (current_class == 2)
			cvtColor(class2_samples, gray_image, CV_BGR2GRAY);
		else cvtColor(unknown_samples, gray_image, CV_BGR2GRAY);        
		threshold(gray_image,binary_image,128,255,THRESH_BINARY_INV);

	    vector<vector<Point>> contours;
		vector<Vec4i> hierarchy;
		findContours(binary_image,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
		Mat contours_image = Mat::zeros(binary_image.size(), CV_8UC3);
		contours_image = Scalar(255,255,255);
		// Do some processing on all contours (objects and holes!)
		vector<vector<Point>> hulls(contours.size());
		vector<vector<int>> hull_indices(contours.size());
		vector<vector<Vec4i>> convexity_defects(contours.size());
		vector<Moments> contour_moments(contours.size());
		for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
		{
			if (contours[contour_number].size() > 10)
			{
				convexHull(contours[contour_number], hulls[contour_number]);
				convexHull(contours[contour_number], hull_indices[contour_number]);
				convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
				contour_moments[contour_number] = moments( contours[contour_number] );
				// Draw the shape and features
				Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
				drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
				char output[500];
				double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
				// Draw the convex hull
				drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
				// Highlight any convexities
				int largest_convexity_depth=0;
				for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
				{
					if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
						largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
					if (convexity_defects[contour_number][convexity_index][3] > 256*2)
					{
						line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
						line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
					}
				}
				// Compute moments and a measure of the deepest convexity
				double hu_moments[7];
				HuMoments( contour_moments[contour_number], hu_moments );
				double diameter = ((double) contours[contour_number].size())/PI;
				double convexity_depth = ((double) largest_convexity_depth)/256.0;
				double convex_measure = convexity_depth/diameter;
				int class_id = current_class;
				float feature[2] = { (float) convex_measure*((float) MAX_FEATURE_VALUE), (float) hu_moments[0]*((float) MAX_FEATURE_VALUE) };
				if (feature[0] > ((float) MAX_FEATURE_VALUE)) feature[0] = ((float) MAX_FEATURE_VALUE);
				if (feature[1] > ((float) MAX_FEATURE_VALUE)) feature[1] = ((float) MAX_FEATURE_VALUE);
				if (current_class == UNKNOWN_CLASS)
				{
					// Try to predict the class
					Mat sampleMat = (Mat_<float>(1,2) << feature[0], feature[1]);
					float prediction = SVM.predict(sampleMat);
					class_id = (prediction == 1.0) ? 1 : (prediction == -1.0) ? 2 : 0;
				}
				char* current_class_name = (class_id==1) ? class1_name : (class_id==2) ? class2_name : "Unknown";

				sprintf(output,"Class=%s, Features %.2f, %.2f", current_class_name, feature[0]/((float) MAX_FEATURE_VALUE), feature[1]/((float) MAX_FEATURE_VALUE));
				Point location( contours[contour_number][0].x-40, contours[contour_number][0].y-3 );
				putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
				if (current_class == UNKNOWN_CLASS)
				{
				}
				else if (number_of_samples < MAX_SAMPLES)
				{
					labels[number_of_samples] = (float) ((current_class == 1) ? 1.0 : -1.0);
					training_data[number_of_samples][0] = feature[0];
					training_data[number_of_samples][1] = feature[1];
					number_of_samples++;
				}
			}
		}
		if (current_class == 1)
		{
			Mat temp_output = contours_image.clone();
			imshow(class1_name, temp_output );
		}
		else if (current_class == 2)
		{
			Mat temp_output2 = contours_image.clone();
			imshow(class2_name, temp_output2 );

			// Now that features for both classes have been determined, train the SVM
			Mat labelsMat(number_of_samples, 1, CV_32FC1, labels);
			Mat trainingDataMat(number_of_samples, 2, CV_32FC1, training_data);
			// Set up SVM's parameters
			CvSVMParams params;
			params.svm_type    = CvSVM::C_SVC;
			params.kernel_type = CvSVM::POLY;
			params.degree = 1;
			params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
			// Train the SVM
			SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);

			// Show the SVM classifier for all possible feature values
			Vec3b green(192,255,192), blue (255,192,192);
			// Show the decision regions given by the SVM
			for (int i = 0; i < feature_space.rows; ++i)
				for (int j = 0; j < feature_space.cols; ++j)
				{
					Mat sampleMat = (Mat_<float>(1,2) << j,i);
					float prediction = SVM.predict(sampleMat);
					if (prediction == 1)
						feature_space.at<Vec3b>(i,j) = green;
					else if (prediction == -1)
					    feature_space.at<Vec3b>(i,j)  = blue;
				}
			// Show the training data (as dark circles)
			for(int sample=0; sample < number_of_samples; sample++)
				if (labels[sample] == 1.0)
					circle( feature_space, Point((int) training_data[sample][0], (int) training_data[sample][1]), 3, Scalar( 0, 128, 0 ), -1, 8);
				else circle( feature_space, Point((int) training_data[sample][0], (int) training_data[sample][1]), 3, Scalar( 128, 0, 0 ), -1, 8);
			// Highlight the support vectors (in red)
			int num_support_vectors = SVM.get_support_vector_count();
			for (int support_vector_index = 0; support_vector_index < num_support_vectors; ++support_vector_index)
			{
				const float* v = SVM.get_support_vector(support_vector_index);
				circle( feature_space,  Point( (int) v[0], (int) v[1]),   3,  Scalar(0, 0, 255));
			}
			imshow("SVM feature space", feature_space);
		}
		else if (current_class == 3)
		{
			imshow("Classification of unknowns", contours_image );
		}
	}
}