コード例 #1
0
ファイル: characteranalysis.cpp プロジェクト: Witek-/openalpr
vector<bool> CharacterAnalysis::filterBetweenLines(Mat img, vector<vector<Point> > contours, vector<Vec4i> hierarchy, vector<Point> outerPolygon, vector<bool> goodIndices)
{
    static float MIN_AREA_PERCENT_WITHIN_LINES = 0.88;

    vector<bool> includedIndices(contours.size());
    for (int j = 0; j < contours.size(); j++)
        includedIndices[j] = false;


    if (outerPolygon.size() == 0)
        return includedIndices;

    vector<Point> validPoints;

    // Figure out the line height
    LineSegment topLine(outerPolygon[0].x, outerPolygon[0].y, outerPolygon[1].x, outerPolygon[1].y);
    LineSegment bottomLine(outerPolygon[3].x, outerPolygon[3].y, outerPolygon[2].x, outerPolygon[2].y);

    float x = ((float) img.cols) / 2;
    Point midpoint = Point(x, bottomLine.getPointAt(x));
    Point acrossFromMidpoint = topLine.closestPointOnSegmentTo(midpoint);
    float lineHeight = distanceBetweenPoints(midpoint, acrossFromMidpoint);

    // Create a white mask for the area inside the polygon
    Mat outerMask = Mat::zeros(img.size(), CV_8U);
    Mat innerArea = Mat::zeros(img.size(), CV_8U);
    fillConvexPoly(outerMask, outerPolygon.data(), outerPolygon.size(), Scalar(255,255,255));


    for (int i = 0; i < contours.size(); i++)
    {
        if (goodIndices[i] == false)
            continue;

        // get rid of the outline by drawing a 1 pixel width black line
        drawContours(innerArea, contours,
                     i, // draw this contour
                     cv::Scalar(255,255,255), // in
                     CV_FILLED,
                     8,
                     hierarchy,
                     0
                    );


        bitwise_and(innerArea, outerMask, innerArea);


        vector<vector<Point> > tempContours;
        findContours(innerArea, tempContours,
                     CV_RETR_EXTERNAL, // retrieve the external contours
                     CV_CHAIN_APPROX_SIMPLE ); // all pixels of each contours );

        double totalArea = contourArea(contours[i]);
        double areaBetweenLines = 0;

        for (int tempContourIdx = 0; tempContourIdx < tempContours.size(); tempContourIdx++)
        {
            areaBetweenLines += contourArea(tempContours[tempContourIdx]);

        }


        if (areaBetweenLines / totalArea >= MIN_AREA_PERCENT_WITHIN_LINES)
        {
            includedIndices[i] = true;
        }

        innerArea.setTo(Scalar(0,0,0));
    }

    return includedIndices;
}
コード例 #2
0
int ConvexityClassifier::Convexity_Computing(Mat &segmentedHand) {
    
    Mat out;
    vector<Point> contours,polygon;
    vector<Vec4i> hierarchy;
    vector<vector<Point> > contours_points;
    Scalar color(rand()&255, rand()&255, rand()&255);
    
    //cout << "FIND_CONTOURS_POINTS" << endl;
    /*Looking for Contours Points*/
    findContours( segmentedHand, contours_points, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE );
    
    //cout << "BIGGEST_CONTOURS" << endl;
    /*Convert vector<vector<Point>> to vector<Point> and find the biggest contours*/
    contours = BiggestContour (contours_points);
    
    /*Approximation of Hands Contours by Polygon*/
    //cout << "POLY_APPROX" << endl;
    approxPolyDP(contours,polygon,15,true);
    contours = polygon;
    
    /*Finding the center of palm*/
    //cout << "MIN_AREA_RECT" << endl;
    RotatedRect Palm = minAreaRect(contours);
    float Palm_Radius;
    if( Palm.size.height <= Palm.size.width )
        Palm_Radius = Palm.size.height / 2;
    else
        Palm_Radius = Palm.size.width / 2;
        
    vector<int> index_hull_points(contours_points.size());
    vector<Point> convexityDefects(contours_points.size());
    vector<Point> Concave_points;
    vector<int> Convex_points;
    //cout << "CONVEX_HULL" << endl;
    convexHull(contours,index_hull_points,false,false); //Find the index of Convex points
    
    /*Convexity Adapt from OpenCV [C versions]*/
    vector<Point>& contour = contours;
    vector<Point>& convexDefects = convexityDefects;
    vector<int>& hull = index_hull_points;
    //cout << "FIND_CONVEXITY_DEFECTS" << endl;
    findConvexityDefects(contour,hull,convexDefects);
    
    /*Controling Result*/
    //cout << "ALL Concave points: " << convexDefects.size() << endl;
    //cout << "ALL Convex points: " << hull.size() << endl;
    
    /*Filtering Concave points*/
    //cout << "FILTERING_CONCAVE_POINTS" << endl;
    Concave_points = Filtering_Concave_Point( convexDefects , Palm );
       
    /*Filtering Convex points*/
    //cout << "FILTERING_CONVEX_POINTS" << endl;
    Convex_points = Filtering_Convex_Point( hull , contour , Palm );
    
    //cout << "First Filter Convex points: " << Convex_points.size() << endl;
       
    vector<int> tmp;
    /*Isolating the interesting convex points*/
    //cout << "ISOLATING_CONVEX_POINTS" << endl;
    tmp = Isolating_Convex_Point( Convex_points , contour );
    
    //cout << "Second Filter Convex points: " << tmp.size() << endl;
    
    vector<int> result;
    float min_distance = Palm.center.y - Palm_Radius;
    /*Isolating convex_points by the Average Radius of the palm**/
    //cout << "ISOLATING_BY_AVERAGE" << endl;
    result = Isolating_Convex_Point_byAverage( contour , Concave_points , min_distance , tmp );
    
    //cout << "Convex points: " << result.size() << endl;
    
    //cout << "Concave points: " << Concave_points.size() << endl;
    
    float min_distance2 = Palm.center.y - (Palm_Radius * 2);
    /*Compute result*/
    float result_digital_numbers;
    //cout << "COMPUTE_RESULT" << endl;
    result_digital_numbers = Compute_Result( contour , Concave_points , result , min_distance2 );
    //cout<< "********************************" << endl;
    //cout<< "SIZE: " << segmentedHand.size() << endl;
    //cout<< "********************************" << endl;
    
    /*Drawing Convex of polygon*/
    for(int i = 0; i < contours_points.size() ; i++)
    {
        drawContours( segmentedHand, contours_points, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
    }
    
    /*Affichage*/
//    imshow("contour",segmentedHand);
  //  waitKey(0);
    
    return result_digital_numbers;
}
コード例 #3
0
void edgeProcessing(Mat src, Mat &dst, Rect roi)
{
    Mat imgGray, imgClone, imgClone_gray;
    cvtColor(src, imgGray, CV_BGR2GRAY);
    //cvtColor(imgClone, imgClone_gray, CV_BGR2GRAY);
    imgClone_gray = imgGray;
    Mat imgThresh;
    //adaptiveThreshold(imgClone_gray, imgThresh, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, 1, 11, 9);
    double otsu_thres = threshold(imgClone_gray, imgThresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
    double highThresh = otsu_thres;
    //	threshold(grayImage1, grayImage, highThresh*0.2, 255, CV_THRESH_BINARY);
    cv::Canny(imgGray, imgThresh, highThresh*0.3, highThresh, 3);
   // imshow("Thresh", imgThresh);
    Mat img_bin = imgThresh(roi).clone();
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    
    ////CvMemStorage *storage = cvCreateMemStorage(0);
    ////CvSeq *seq_contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), storage);
    cv::findContours(img_bin, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    /// Draw contours
    int xmax, ymax, xmin, ymin;
    int xleft, xright, ytop, ybottom;
    Mat drawing = Mat::zeros(src(roi).size(), CV_8UC3);
    int width = drawing.cols;
    int height = drawing.rows;
    int x=20,y=20;
    int maxhL=0,imaxL=-1,maxhR=0,imaxR=-1;
    
    for (int i = 0; i< contours.size(); i++)
    {
        int count = contours[i].size();
        if (count>100)
        {
            for (int t = 0; t < count; t++)
            {
                if (t == 0){
                    xleft = xright = contours[i][t].x;
                    ytop = ybottom = contours[i][t].y;
                }
                if (contours[i][t].x > xright){
                    xright = contours[i][t].x;
                }
                if (contours[i][t].x < xleft)
                {
                    xleft = contours[i][t].x;
                }
                if (contours[i][t].y > ytop)
                {
                    ytop = contours[i][t].y;
                }
                if (contours[i][t].y < ybottom)
                {
                    ybottom = contours[i][t].y;
                }
                xmin = xleft; xmax = xright;
                ymin = ybottom; ymax = ytop;
            }
            int w = xmax - xmin, h = ymax - ymin;
            int s = w*h;
            if (((xmax + xmin) / 2)>width / 10 && ((xmax + xmin) / 2) < width *2/ 5 && (ymax + ymin) / 2<height * 3 / 4 && (ymax + ymin) / 2>height / 4 && s>height&&h>height/4)
                //if ()
            {
                if (h > maxhL)
                {
                    maxhL = h; imaxL = i;
                }
            }
            if (((xmax + xmin) / 2)>width / 2 && ((xmax + xmin) / 2) < width  && (ymax + ymin) / 2<height * 3 / 4 && (ymax + ymin) / 2>height / 4 && s>height&&h>height / 4)
                //if ()
            {
                if (h > maxhR)
                {
                    maxhR = h; imaxR = i;
                }
            }
        }
    }
    Scalar color = Scalar(0, 0, 255);
    if(imaxL!=-1)drawContours(drawing, contours, imaxL, color, 2, 8, hierarchy, 3, Point());
    if (imaxR != -1)drawContours(drawing, contours, imaxR, color, 2, 8, hierarchy, 3, Point());
    
    /// Show in a window
   // namedWindow("Contours", CV_WINDOW_AUTOSIZE);
    imshow("Contours", drawing);
    
    Mat img_bin_gray;
    cvtColor(drawing, img_bin_gray, CV_BGR2GRAY);
    threshold(img_bin_gray, img_bin_gray, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
    
    
    int erosion_size = 5;
    cv::Mat element = cv::getStructuringElement(MORPH_RECT,
                                                Size(2 * erosion_size + 1, 2 * erosion_size + 1),
                                                Point(erosion_size, erosion_size));
    dilate(img_bin_gray, img_bin_gray, element);
    erode(img_bin_gray, img_bin_gray, element);
    //imshow("thres2", img_bin_gray);
    dst = img_bin_gray;
}
コード例 #4
0
void SupportVectorMachineDemo(Mat& class1_samples, char* class1_name, Mat& class2_samples, char* class2_name, Mat& unknown_samples)
{
    float labels[MAX_SAMPLES];
    float training_data[MAX_SAMPLES][2];
	CvSVM SVM;

    // Image for visual representation of (2-D) feature space
    int width = MAX_FEATURE_VALUE+1, height = MAX_FEATURE_VALUE+1;
    Mat feature_space = Mat::zeros(height, width, CV_8UC3);

	int number_of_samples = 0;
	// Loops three times:
	//  1st time - extracts feature values for class 1
	//  2nd time - extracts feature values for class 2 AND trains SVM
	//  3rd time - extracts feature values for unknowns AND predicts their classes using SVM
	for (int current_class = 1; current_class<=UNKNOWN_CLASS; current_class++)
	{
		Mat gray_image,binary_image;
		if (current_class == 1)
			cvtColor(class1_samples, gray_image, CV_BGR2GRAY);
		else if (current_class == 2)
			cvtColor(class2_samples, gray_image, CV_BGR2GRAY);
		else cvtColor(unknown_samples, gray_image, CV_BGR2GRAY);        
		threshold(gray_image,binary_image,128,255,THRESH_BINARY_INV);

	    vector<vector<Point>> contours;
		vector<Vec4i> hierarchy;
		findContours(binary_image,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
		Mat contours_image = Mat::zeros(binary_image.size(), CV_8UC3);
		contours_image = Scalar(255,255,255);
		// Do some processing on all contours (objects and holes!)
		vector<vector<Point>> hulls(contours.size());
		vector<vector<int>> hull_indices(contours.size());
		vector<vector<Vec4i>> convexity_defects(contours.size());
		vector<Moments> contour_moments(contours.size());
		for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
		{
			if (contours[contour_number].size() > 10)
			{
				convexHull(contours[contour_number], hulls[contour_number]);
				convexHull(contours[contour_number], hull_indices[contour_number]);
				convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
				contour_moments[contour_number] = moments( contours[contour_number] );
				// Draw the shape and features
				Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
				drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
				char output[500];
				double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
				// Draw the convex hull
				drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
				// Highlight any convexities
				int largest_convexity_depth=0;
				for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
				{
					if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
						largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
					if (convexity_defects[contour_number][convexity_index][3] > 256*2)
					{
						line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
						line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
					}
				}
				// Compute moments and a measure of the deepest convexity
				double hu_moments[7];
				HuMoments( contour_moments[contour_number], hu_moments );
				double diameter = ((double) contours[contour_number].size())/PI;
				double convexity_depth = ((double) largest_convexity_depth)/256.0;
				double convex_measure = convexity_depth/diameter;
				int class_id = current_class;
				float feature[2] = { (float) convex_measure*((float) MAX_FEATURE_VALUE), (float) hu_moments[0]*((float) MAX_FEATURE_VALUE) };
				if (feature[0] > ((float) MAX_FEATURE_VALUE)) feature[0] = ((float) MAX_FEATURE_VALUE);
				if (feature[1] > ((float) MAX_FEATURE_VALUE)) feature[1] = ((float) MAX_FEATURE_VALUE);
				if (current_class == UNKNOWN_CLASS)
				{
					// Try to predict the class
					Mat sampleMat = (Mat_<float>(1,2) << feature[0], feature[1]);
					float prediction = SVM.predict(sampleMat);
					class_id = (prediction == 1.0) ? 1 : (prediction == -1.0) ? 2 : 0;
				}
				char* current_class_name = (class_id==1) ? class1_name : (class_id==2) ? class2_name : "Unknown";

				sprintf(output,"Class=%s, Features %.2f, %.2f", current_class_name, feature[0]/((float) MAX_FEATURE_VALUE), feature[1]/((float) MAX_FEATURE_VALUE));
				Point location( contours[contour_number][0].x-40, contours[contour_number][0].y-3 );
				putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
				if (current_class == UNKNOWN_CLASS)
				{
				}
				else if (number_of_samples < MAX_SAMPLES)
				{
					labels[number_of_samples] = (float) ((current_class == 1) ? 1.0 : -1.0);
					training_data[number_of_samples][0] = feature[0];
					training_data[number_of_samples][1] = feature[1];
					number_of_samples++;
				}
			}
		}
		if (current_class == 1)
		{
			Mat temp_output = contours_image.clone();
			imshow(class1_name, temp_output );
		}
		else if (current_class == 2)
		{
			Mat temp_output2 = contours_image.clone();
			imshow(class2_name, temp_output2 );

			// Now that features for both classes have been determined, train the SVM
			Mat labelsMat(number_of_samples, 1, CV_32FC1, labels);
			Mat trainingDataMat(number_of_samples, 2, CV_32FC1, training_data);
			// Set up SVM's parameters
			CvSVMParams params;
			params.svm_type    = CvSVM::C_SVC;
			params.kernel_type = CvSVM::POLY;
			params.degree = 1;
			params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
			// Train the SVM
			SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);

			// Show the SVM classifier for all possible feature values
			Vec3b green(192,255,192), blue (255,192,192);
			// Show the decision regions given by the SVM
			for (int i = 0; i < feature_space.rows; ++i)
				for (int j = 0; j < feature_space.cols; ++j)
				{
					Mat sampleMat = (Mat_<float>(1,2) << j,i);
					float prediction = SVM.predict(sampleMat);
					if (prediction == 1)
						feature_space.at<Vec3b>(i,j) = green;
					else if (prediction == -1)
					    feature_space.at<Vec3b>(i,j)  = blue;
				}
			// Show the training data (as dark circles)
			for(int sample=0; sample < number_of_samples; sample++)
				if (labels[sample] == 1.0)
					circle( feature_space, Point((int) training_data[sample][0], (int) training_data[sample][1]), 3, Scalar( 0, 128, 0 ), -1, 8);
				else circle( feature_space, Point((int) training_data[sample][0], (int) training_data[sample][1]), 3, Scalar( 128, 0, 0 ), -1, 8);
			// Highlight the support vectors (in red)
			int num_support_vectors = SVM.get_support_vector_count();
			for (int support_vector_index = 0; support_vector_index < num_support_vectors; ++support_vector_index)
			{
				const float* v = SVM.get_support_vector(support_vector_index);
				circle( feature_space,  Point( (int) v[0], (int) v[1]),   3,  Scalar(0, 0, 255));
			}
			imshow("SVM feature space", feature_space);
		}
		else if (current_class == 3)
		{
			imshow("Classification of unknowns", contours_image );
		}
	}
}
コード例 #5
0
void ImageProcessor::process(MultispectralImage frame)
{
	MultispectralImage frame8Bit;
	QList<imgDesc> results;

	quint8 i;

	Mat filterMask;
	Mat maskedFCImage;
	double maxVal = 0;
	double maxTemp = 0.0;

	Mat temp;
	double spread;

	Mat motionMask;

	errorOccurred = false;

	lockConfig.lockForRead();

	//main processing tasks
	//*********************

	//subtract dark image, if enabled
	if(myConfig.calibration.subtractDark && !frame.getDarkSubtracted())
	{
		for(i = 1; i < frame.getChannelCount(); i++)
		{
			//subtract dark image from current image
			Mat tmp;
			cv::subtract(frame.getImageByChannelNumber(i),
						 frame.getDarkImage(),
						 tmp);

			//set result as new channel image
			frame.setChannelImage(frame.getWavebands().at(i), tmp);
		}

		frame.setDarkSubtracted(true);
	}

	//perform skin detection by using quotient filters, if enabled
	if(myConfig.detectSkinByQuotient && (myConfig.quotientFilters.size() > 0))
	{
		//clear result list
		skinDetectionResults.clear();

		//signal processing of all filters
		emit doSkinDetection(frame);
	}

	//if image depth is more than 8bit, image has to be resampled to be displayed
	if(frame.getDepth() > 8)
	{
		//if automatic contrast is enabled, find the brightest spot in all channels
		if(myConfig.contrastAutomatic)
		{
			//iterate through all bands (except dark) to find maximum value
			for(i = 1; i < frame.getChannelCount(); i++)
			{
				minMaxLoc(frame.getImageByChannelNumber(i), NULL, &maxTemp);
				if ( maxTemp > maxVal )
				{
					maxVal = maxTemp;
				}
			}

			//subtract contrast dark offset from maximum
			maxVal -= myConfig.contrastOffset;

			//slowly increase or decrease contrast value
			if((maxVal / myConfig.contrastValue) < 220)
			{
				myConfig.contrastValue -= (myConfig.contrastValue - (maxVal / 255)) / 10;
			}
			else if((maxVal / myConfig.contrastValue) > 250)
			{
				myConfig.contrastValue += ((maxVal / 255) - myConfig.contrastValue) / 10;
			}
		}

		//calculate spread factor
		spread = 1.0 / (double)myConfig.contrastValue;

		//configure GUI image object
		frame8Bit.setSize(frame.getWidth(), frame.getHeight());
		frame8Bit.setDepth(8);

		//scale down every band
		for (i = 0; i < frame.getChannelCount(); i++)
		{
			//subtract contrast offset, if enabled
			Mat tempOffset;
			if(myConfig.contrastOffset > 0)
			{
				subtract(frame.getImageByChannelNumber(i),
						 Scalar(myConfig.contrastOffset),
						 tempOffset);
			}
			else
			{
				tempOffset = frame.getImageByChannelNumber(i);
			}

			//convert to 8 bit using spread factor
			tempOffset.convertTo(temp, 8, spread );
			frame8Bit.setChannelImage(frame.getWavebands().at(i), temp.clone());
		}
	}
	else
	{
		frame8Bit = frame;
	}

	//detect edges
	if(myConfig.edgeDetection)
	{
		QMapIterator<qint16, Mat> it(frame8Bit.getImages());
		while(it.hasNext())
		{
			it.next();

			Mat edges = doEdgeDetection(it.value(), myConfig.edgeThreshold);

			struct imgDesc edgeResult;
			edgeResult.desc = QString("Edges %1nm").arg(it.key());
			edgeResult.img = edges;
			results.append(edgeResult);
		}
	}

	//Estimate distance (in separate thread)
	if (myConfig.estimateDistance)
	{
		//make edge mask on selected image
		Mat edges;
		if(autoSelectCannyImage) //automatically select sharpest band image for edge detection
		{
			Canny(frame8Bit.getImageByChannelNumber(lastSharpestBand), edges, cannyLowThresh, cannyHighThresh);
		}
		else //use band image selected by the user (in GUI)
		{
			Canny(frame8Bit.getImageByChannelNumber(cannyImage), edges, cannyLowThresh, cannyHighThresh);
		}

		//emit signals to distance estimation thread
		distEstimationResults.clear();
		emit setDistEstimParams((int)myConfig.sharpMetric, edges, myConfig.sharpnessNbrhdSize, medianKernel);
		emit doDistanceEstimation(frame8Bit);

		//wait for thread to finish
		while (!errorOccurred && distEstimationResults.size() < 1) //frame8Bit.getChannelCount()-1)
		{
			QCoreApplication::processEvents();
		}
		if(errorOccurred)
		{
			emit errorProcessing(ImageSourceException("Error in task: estimateDistanceByChromAberr."));
			return;
		}

		//append distance estimation result to results in order to display them
		if(!distEstimationResults.empty())
		{
			//get 8 bit image from 1st list entry (at position 0)
			results.append(distEstimationResults.at(0));
		}
	}

	//wait for threads to finish:
	//***************************

	//wait until all threads are finished, get results and delete them

	if(myConfig.detectSkinByQuotient && (myConfig.quotientFilters.size() > 0))
	{
		maskedFCImage = Mat::zeros(frame8Bit.getDarkImage().rows,
								   frame8Bit.getDarkImage().cols, CV_8UC3);

		//wait until all threads are finished and get results
		while(!errorOccurred &&
			  (myConfig.quotientFilters.size() > skinDetectionResults.size()))
		{
			QCoreApplication::processEvents(QEventLoop::AllEvents);
		}
		if(errorOccurred)
		{
			emit errorProcessing(ImageSourceException("Error in task: detectSkinByQuotients."));
			return;
		}
		//multiply (cut) the filter masks
		filterMask = skinDetectionResults.at(0);
		for(i = 1; i < skinDetectionResults.size(); i++ )
		{
			multiply(filterMask, skinDetectionResults.at(i),
					 filterMask, 1.0);
		}

		//remove positive pixels with motion artifacts
		if(myConfig.suppressMotion && (lastFrame.getChannelCount() == frame.getChannelCount()))
		{
			motionMask = Mat::ones(maskedFCImage.rows, maskedFCImage.cols, CV_8UC1);

			for(i= 0; i < frame.getChannelCount(); i++)
			{
				Mat diffF, threshF, thresh;
				Mat curF, prevF;

				//get frame channels and convert to float
				frame.getImageByChannelNumber(i).convertTo(curF, CV_32F);
				lastFrame.getImageByChannelNumber(i).convertTo(prevF, CV_32F);

				//calculate absolute difference between current and previous frame
				absdiff(curF, prevF, diffF);

				//threshold the absolute difference
				threshold(diffF, threshF, myConfig.motionThreshold, 1.0, THRESH_BINARY_INV);

				//convert to 8 bit unsigned
				threshF.convertTo(thresh, CV_8U);

				//update motion mask with new thresholded difference mask
				multiply(motionMask, thresh, motionMask);
			}

			//now multiply motion mask with filter mask to remove positive filter results
			//where there was motion detected
			multiply(motionMask, filterMask, filterMask);

			//add motion mask to results
			struct imgDesc motionResult;
			motionResult.desc = "Motion";
			threshold(motionMask, motionResult.img, 0, 255, THRESH_BINARY_INV) ;
			results.append(motionResult);
		}

		//Morph result:
		if(myConfig.morphResult)
		{
			Mat element(4,4,CV_8U,Scalar(1));
			morphologyEx(filterMask, filterMask, MORPH_OPEN, element);
		}

		//set mask on top of (8bit) false colour image
		bitwise_or(maskedFCImage,
				   frame8Bit.getFalseColorImage(myConfig.falseColorChannels),
				   maskedFCImage, filterMask);

		if(myConfig.showMaskContours)
		{
			vector<vector<Point> > contours;
			CvScalar green = CV_RGB(0,255,0);
			//CvScalar blue = CV_RGB(0,0,255);

			findContours(filterMask, contours,
						 CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

			drawContours(maskedFCImage, contours, -1, green, 2, 8);
		}

		struct imgDesc skinMask;
		struct imgDesc skinResult;

		skinMask.desc = "QF Mask";
		threshold(filterMask, skinMask.img, 0, 255, THRESH_BINARY) ;
		results.append(skinMask);

		skinResult.desc = "Masked FC Image";
		skinResult.img = maskedFCImage;
		results.append(skinResult);
	}

	lockConfig.unlock();

	emit finishedProcessing(frame, frame8Bit, results);

	lastFrame = frame;
}
コード例 #6
0
	std::vector<std::vector<std::vector<cv::Point>>> MultiContourObjectDetector::findApproxContours(
		cv::Mat image,
		bool performOpening,
		bool findBaseShape)
	{

		// CREATE ACTIVE ZONE 80% AND 50% ---------------------

		Point centre(image.size().width / 2, image.size().height / 2);

		int deleteHeight = image.size().height * _deleteFocus;
		int deleteWidth = image.size().width * _deleteFocus;
		int deleteX = centre.x - deleteWidth / 2;
		int deleteY = centre.y - deleteHeight / 2;

		int attenuationHeight = image.size().height * _attenuationFocus;
		int attenuationWidth = image.size().width * _attenuationFocus;
		int attenuationX = centre.x - attenuationWidth / 2;
		int attenuationY = centre.y - attenuationHeight / 2;

		Rect erase(deleteX, deleteY, deleteWidth, deleteHeight);
		_deleteRect = erase;

		Rect ease(attenuationX, attenuationY, attenuationWidth, attenuationHeight);
		_attenuationRect = ease;
		// ----------------------------------------

		bool imageTooBig = false;

		Mat newImage;

		if (image.size().height <= 400 || image.size().width <= 400)
		{
			Mat pickColor = image(Rect((image.size().width / 2) - 1, image.size().height - 2, 2, 2));
			Scalar color = mean(pickColor);

			int increment = 2;
			newImage = Mat(Size(image.size().width + increment, image.size().height + increment), image.type());
			newImage = color;

			Point nc(newImage.size().width / 2, newImage.size().height / 2);
			int incH = image.size().height;
			int incW = image.size().width;
			int incX = nc.x - incW / 2;
			int incY = nc.y - incH / 2;

			image.copyTo(newImage(Rect(incX, incY, incW, incH)));
		}
		else
		{
			imageTooBig = true;
			newImage = image;
		}

		Size imgSize = newImage.size();
		Mat gray(imgSize, CV_8UC1);
		Mat thresh(imgSize, CV_8UC1);

		if (newImage.channels() >= 3)
			cvtColor(newImage, gray, CV_BGR2GRAY);
		else
			newImage.copyTo(gray);

		int minThreshold;

		if (performOpening)
		{
			// PERFORM OPENING (Erosion --> Dilation)

			int erosion_size = 3;
			int dilation_size = 3;

			if (imageTooBig)
			{
				erosion_size = 5;
				dilation_size = 5;
			}

			Mat element = getStructuringElement(0, Size(2 * erosion_size, 2 * erosion_size), Point(erosion_size, erosion_size));
			erode(gray, gray, element);
			dilate(gray, gray, element);

			minThreshold = mean(gray)[0];

			if (minThreshold < 90)
				minThreshold = 60;
			else if (minThreshold >= 90 && minThreshold < 125)
				minThreshold = 100;
		}


		threshold(gray, thresh, minThreshold, 255, THRESH_BINARY);

#ifdef DEBUG_MODE
		imshow("Threshold", thresh);
#endif

		vector<vector<Point>> contours;
		vector<Vec4i> hierarchy;
		vector<Point> hull, approx;

		map<int, vector<vector<Point>>> hierachedContours;
		map<int, vector<vector<Point>>> approxHContours;

		findContours(thresh, contours, hierarchy, CV_RETR_TREE, CHAIN_APPROX_NONE);


#ifdef DEBUG_MODE
		Mat tempI(image.size(), CV_8UC1);
		tempI = Scalar(0);
		drawContours(tempI, contours, -1, cv::Scalar(255), 1, CV_AA);

		imshow("Contours", tempI);
#endif


		vector<vector<Point>> temp;

		// CATALOG BY HIERARCHY LOOP
		for (int i = 0; i < contours.size(); i++)
		{

#ifdef DEBUG_MODE
			tempI = Scalar(0);
			temp.clear();
			temp.push_back(contours[i]);
			drawContours(tempI, temp, -1, cv::Scalar(255), 1, CV_AA);
#endif
			int parent = hierarchy[i][3];
			if (parent == -1)
			{
				if (hierachedContours.count(i) == 0)
				{
					// me not found

					hierachedContours.insert(pair<int, vector<vector<Point>>>(i, vector<vector<Point>>()));
					hierachedContours[i].push_back(contours[i]);
				}
				else
				{
					// me found
					continue;
				}
			}
			else
			{
				if (hierachedContours.count(parent) == 0)
				{
					// dad not found
					hierachedContours.insert(pair<int, vector<vector<Point>>>(parent, vector<vector<Point>>()));
					hierachedContours[parent].push_back(contours[parent]);
				}
				hierachedContours[parent].push_back(contours[i]);
			}
		}


		int minPoint, maxPoint;
		minPoint = _minContourPoints - _minContourPoints / 2.1;
		maxPoint = _minContourPoints + _minContourPoints / 1.5;


		// APPROX LOOP

		for (map<int, vector<vector<Point>>>::iterator it = hierachedContours.begin(); it != hierachedContours.end(); it++)
		{

			if (it->second[0].size() < 400)
				continue;

#ifdef DEBUG_MODE
			tempI = Scalar(0);
			drawContours(tempI, it->second, -1, cv::Scalar(255), 1, CV_AA);
#endif

			if (it == hierachedContours.begin() && it->second.size() < _aspectedContours)
				continue;

			for (int k = 0; k < it->second.size(); k++)
			{
				if (it->second[k].size() < _minContourPoints)
				{
					if (k == 0) // padre
						break;
					else        // figlio
						continue;
				}

				convexHull(it->second[k], hull, false);

				double epsilon = it->second[k].size() * 0.003;
				approxPolyDP(it->second[k], approx, epsilon, true);

#ifdef DEBUG_MODE			
				tempI = Scalar(0);
				vector<vector<Point>> temp;
				temp.push_back(approx);
				drawContours(tempI, temp, -1, cv::Scalar(255), 1, CV_AA);
#endif

				// REMOVE TOO EXTERNAL SHAPES -------------

				if (imageTooBig)
				{
					Rect bounding = boundingRect(it->second[k]);

#ifdef DEBUG_MODE
					rectangle(tempI, _deleteRect, Scalar(255));
					rectangle(tempI, bounding, Scalar(255));
#endif

					bool isInternal = bounding.x > _deleteRect.x &&
						bounding.y > _deleteRect.y &&
						bounding.x + bounding.width < _deleteRect.x + _deleteRect.width &&
						bounding.y + bounding.height < _deleteRect.y + _deleteRect.height;


					if (!isInternal)
					{
						if (k == 0)
							break;
					}
				}

				// --------------------------------------------------

				if (!findBaseShape)
				{
					if (hull.size() < minPoint || hull.size() > maxPoint)
					{
						if (k == 0) // padre
							break;
						else        // figlio
							continue;
					}

				}


				if (k == 0)
				{
					approxHContours.insert(pair<int, vector<vector<Point>>>(it->first, vector<vector<Point>>()));
					approxHContours.at(it->first).push_back(approx);
				}
				else
				{
					approxHContours[it->first].push_back(approx);
				}
			}
		}

		int maxSize = 0,
			maxID = 0;

		vector<vector<vector<Point>>> lookupVector;
		for (map<int, vector<vector<Point>>>::iterator it = approxHContours.begin(); it != approxHContours.end(); it++)
		{
			if (it->second.size() <= 1)
				continue;

			if (findBaseShape)
			{
				int totSize = 0;
				for (int k = 0; k < it->second.size(); k++)
				{
					totSize += it->second[k].size();
				}

				if (totSize > maxSize)
				{
					maxSize = totSize;
					maxID = it->first;
				}
			}
			else
			{
				lookupVector.push_back(it->second);
			}

		}

		if (findBaseShape)
		{
			lookupVector.push_back(approxHContours.at(maxID));
		}

		return lookupVector;
	}
コード例 #7
0
vector<Point> Camera::Follow() //prima void
{
    //float* contour=new float[height*width]();
    float* output=new float[height*width]();
    vector<float> temp = ToArray(Mat::zeros(height, width, CV_8UC1 ));
    output = &temp[0];
    cout << "[START] Active Contour " << endl;
    
    // Follow the camera
    cout << "Following the camera ...";
    cout << flush;
    Point center(-1,-1); Vec3b hsv; Mat mask, gray, HSV; Scalar lowerb, upperb;
    int erosion_size = 2, dilation_size = 10;
    Mat erodeElement = getStructuringElement(MORPH_RECT, Size(2 * erosion_size + 1, 2 * erosion_size + 1), Point(erosion_size, erosion_size) );
    Mat dilateElement = getStructuringElement(MORPH_RECT, Size(2 * dilation_size + 1, 2 * dilation_size + 1), Point(dilation_size, dilation_size) );
    vector<float> frameArray, maskArray;
    Mat ROI = Mat::zeros( height, width, CV_8UC1 );
    int count = 0; double sum = 0; 
    
    //while(waitKey(1) == -1) {//da togliere
        /*if (capture.read(frame) == NULL) {
            cout << "[ERROR] frame not read" << endl;
            return;
        }*/ //frame già settato
        clock_t startTime = clock(); // compute the time
        cvtColor(frame, gray, COLOR_RGB2GRAY);        
        cvtColor(frame, HSV, COLOR_RGB2HSV);
        
        setMouseCallback("Frame", onMouse);
        
        if( drawing_box ) 
            draw_box(&frame, roi);
        
        if(clicked) {
            // Init mask
            if(!haveMask) {
                // Take hsv from mouse
                center = initCenter;
                hsv = HSV.at<Vec3b>(center.y,center.x);
                haveMask = true;
                //cout << "HSV: " << hsv << endl;
                lowerb = Scalar(hsv.val[0] - 30, hsv.val[1] - 50, hsv.val[2] - 50);
                upperb = Scalar(hsv.val[0] + 30, hsv.val[1] + 50, hsv.val[2] + 50);
                //cout << "lowerb: " << lowerb << endl;
                //cout << "upperb: " << upperb << endl;  
                ROI = Mat::zeros( height, width, CV_8UC1 );
                rectangle( ROI, roi.tl(), roi.br(), Scalar(255), -1);
                
                sum = 0; count = 0; //benchmark
                
            }
            
            // Create the mask
            
            inRange(HSV, lowerb , upperb, mask);
            dilate(mask, mask, dilateElement);
            mask = mask.mul(ROI);
            
            //imshow("mask", mask);
            
            frameArray = ToArray(gray);
            maskArray = ToArray(mask);
            ActiveContour(&frameArray[0], output, contour, &maskArray[0], width, height, roi.br().y);  
            
            Mat OUT = ToMat(output, height, width);
            OUT.convertTo(OUT, CV_8UC1);
            //imshow("Output", OUT);
            
            vector<vector<Point> > contours;
            vector<Vec4i> hierarchy;
            findContours(OUT, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));
            
            vector<Rect> boundRect( contours.size() );
            
            for( int i = 0; i < contours.size(); i++ )
            { 
                boundRect[i] = boundingRect( Mat(contours[i]) );
            }
            
            /// Draw polygonal contour + bonding rects + circles
            Mat drawing = Mat::zeros( height, width, CV_8UC3 );
            circle(frame, center, 5, Scalar(0,0,255), 5);
            
            for( int i = 0; i< contours.size(); i++ )
            {
                
                if(boundRect[i].contains(center)) {
                    
                    drawContours( frame, contours, i, Scalar(255,255,255), 1, 8, vector<Vec4i>(), 0, Point() );
                    rectangle( frame, boundRect[i].tl(), boundRect[i].br(), Scalar(0,255,0), 2, 8, 0 );
                    
                    // Center
                    center.x = boundRect[i].tl().x + boundRect[i].size().width/2;
                    center.y = boundRect[i].tl().y + boundRect[i].size().height/2;
                    
                    int x = boundRect[i].size().width;
                    int y = boundRect[i].size().height;
                    int v = (int)(sqrt((x/(y+EPS))*area));
                    int h = (int)(area/v);
                    int deltax = (int)((h-y)/2);
                    int deltay = (int)((v-x)/2);
                    int tlx = boundRect[i].tl().x -deltax;
                    int tly = boundRect[i].tl().y -deltay;
                    int brx = boundRect[i].br().x +deltax;
                    int bry = boundRect[i].br().y +deltay;
                    tlx = (tlx < 0) ? 0 : tlx;
                    brx = (brx > width) ? width : brx;
                    tly = (tly < 0) ? 0 : tly;
                    bry = (bry > height) ? height : bry;                   
                    roi = Rect(Point(tlx,tly),Point(brx,bry));
                    ROI = Mat::zeros( height, width, CV_8UC1 );
                    rectangle( ROI, roi.tl(), roi.br(), Scalar(255), -1);
                    rectangle( frame, roi.tl(), roi.br(), Scalar(0,0,255), 2, 8, 0 );
                    //imshow("ROI", ROI);
                    break;
                }
            }
        }
        imshow("Frame", frame);
        sum += double( clock() - startTime ) / (double)CLOCKS_PER_SEC;
        count++;
        
//  }
    cout << sum / count << endl << flush;
    return contours.front();
}
コード例 #8
0
ファイル: main.cpp プロジェクト: guptask/ngn2_cchmc
/* Process the images inside each directory */
bool processDir(std::string path, std::string image_name, std::string metrics_file) {

    /* Create the data output file for images that were processed */
    std::ofstream data_stream;
    data_stream.open(metrics_file, std::ios::app);
    if (!data_stream.is_open()) {
        std::cerr << "Could not open the data output file." << std::endl;
        return false;
    }

    // Create the output directory
    std::string out_directory = path + "result/";
    struct stat st = {0};
    if (stat(out_directory.c_str(), &st) == -1) {
        mkdir(out_directory.c_str(), 0700);
    }
    out_directory = out_directory + image_name + "/";
    st = {0};
    if (stat(out_directory.c_str(), &st) == -1) {
        mkdir(out_directory.c_str(), 0700);
    }

    // Count the number of images
    std::string dir_name = path + "jpg/" + image_name + "/";
    DIR *read_dir = opendir(dir_name.c_str());
    if (!read_dir) {
        std::cerr << "Could not open directory '" << dir_name << "'" << std::endl;
        return false;
    }
    struct dirent *dir = NULL;
    uint8_t z_count = 0;
    bool collect_name_pattern = false;
    std::string end_pattern;
    while ((dir = readdir(read_dir))) {
        if (!strcmp (dir->d_name, ".") || !strcmp (dir->d_name, "..")) continue;
        if (!collect_name_pattern) {
            std::string delimiter = "c1+";
            end_pattern = dir->d_name;
            size_t pos = end_pattern.find(delimiter);
            end_pattern.erase(0, pos);
            collect_name_pattern = true;
        }
        z_count++;
    }

    std::vector<cv::Mat>    blue_list(NUM_Z_LAYERS_COMBINED), 
                            green_list(NUM_Z_LAYERS_COMBINED), 
                            red_list(NUM_Z_LAYERS_COMBINED);
    for (uint8_t z_index = 1; z_index <= z_count; z_index++) {

        // Create the input filename and rgb stream output filenames
        std::string in_filename;
        if (z_count < 10) {
            in_filename = dir_name + image_name + 
                                        "_z" + std::to_string(z_index) + end_pattern;
        } else {
            if (z_index < 10) {
                in_filename = dir_name + image_name + 
                                        "_z0" + std::to_string(z_index) + end_pattern;
            } else if (z_index < 100) {
                in_filename = dir_name + image_name + 
                                        "_z" + std::to_string(z_index) + end_pattern;
            } else { // assuming number of z plane layers will never exceed 99
                std::cerr << "Does not support more than 99 z layers curently" << std::endl;
                return false;
            }
        }

        // Extract the bgr streams for each input image
        cv::Mat img = cv::imread(in_filename.c_str(), -1);
        if (img.empty()) return false;

        // Original image
        std::string out_original = out_directory + "zlayer_" + 
                                        std::to_string(z_index) + "_a_original.jpg";
        if (DEBUG_FLAG) cv::imwrite(out_original.c_str(), img);

        std::vector<cv::Mat> channel(3);
        cv::split(img, channel);
        blue_list[(z_index-1)%NUM_Z_LAYERS_COMBINED]  = channel[0];
        green_list[(z_index-1)%NUM_Z_LAYERS_COMBINED] = channel[1];
        red_list[(z_index-1)%NUM_Z_LAYERS_COMBINED]   = channel[2];

        // Continue collecting layers if needed
        //if (z_index%NUM_Z_LAYERS_COMBINED && (z_index != z_count)) continue;
        if (z_index < NUM_Z_LAYERS_COMBINED) continue;

        data_stream << image_name << ","
                    << std::to_string(z_index - NUM_Z_LAYERS_COMBINED + 1) << ","
                    << std::to_string(z_index) << ",";

        // Merge some layers together
        cv::Mat blue  = cv::Mat::zeros(channel[0].size(), CV_8UC1);
        cv::Mat green = cv::Mat::zeros(channel[1].size(), CV_8UC1);
        cv::Mat red   = cv::Mat::zeros(channel[1].size(), CV_8UC1);
        for (unsigned int merge_index = 0; 
                    merge_index < NUM_Z_LAYERS_COMBINED; merge_index++) {
            bitwise_or(blue, blue_list[merge_index], blue);
            bitwise_or(green, green_list[merge_index], green);
            bitwise_or(red, red_list[merge_index], red);
        }

        /** Gather BGR channel information needed for feature extraction **/

        /* Enhance layers */

        // Red channel
        cv::Mat red_enhanced;
        if(!enhanceImage(red, ChannelType::RED, &red_enhanced)) return false;
        std::string out_red = out_directory + "zlayer_" + 
                                        std::to_string(z_index) + "_red_enhanced.jpg";
        if (DEBUG_FLAG) cv::imwrite(out_red.c_str(), red_enhanced);

        // Purple channel
        cv::Mat purple_enhanced;
        if(!enhanceImage(red, ChannelType::PURPLE, &purple_enhanced)) return false;
        //cv::Mat red_enhanced_negative = cv::Mat::zeros(red_enhanced.size(), CV_8UC1);
        //bitwise_not(red_enhanced, red_enhanced_negative);
        //bitwise_and(purple_enhanced, red_enhanced_negative, purple_enhanced);
        std::string out_purple = out_directory + "zlayer_" + 
                                        std::to_string(z_index) + "_purple_enhanced.jpg";
        if (DEBUG_FLAG) cv::imwrite(out_purple.c_str(), purple_enhanced);

        // Blue channel
        cv::Mat blue_enhanced;
        if(!enhanceImage(blue, ChannelType::BLUE, &blue_enhanced)) return false;
        //cv::Mat purple_enhanced_negative = cv::Mat::zeros(purple_enhanced.size(), CV_8UC1);
        //bitwise_not(purple_enhanced, purple_enhanced_negative);
        //bitwise_and(blue_enhanced, purple_enhanced_negative, blue_enhanced);
        std::string out_blue = out_directory + "zlayer_" + 
                                        std::to_string(z_index) + "_blue_enhanced.jpg";
        if (DEBUG_FLAG) cv::imwrite(out_blue.c_str(), blue_enhanced);


        /* Segment */

        // Blue channel
        cv::Mat blue_segmented;
        std::vector<std::vector<cv::Point>> contours_blue;
        std::vector<cv::Vec4i> hierarchy_blue;
        std::vector<HierarchyType> blue_contour_mask;
        std::vector<double> blue_contour_area;
        contourCalc(    blue_enhanced,
                        MIN_NUCLEUS_SIZE,
                        &blue_segmented, 
                        &contours_blue,
                        &hierarchy_blue, 
                        &blue_contour_mask,
                        &blue_contour_area  );
        std::vector<std::vector<cv::Point>> contours_blue_filtered;
        filterCells(    ChannelType::BLUE, 
                        blue_enhanced, 
                        contours_blue, 
                        blue_contour_mask, 
                        &contours_blue_filtered );

        // Red channel
        cv::Mat red_segmented;
        std::vector<std::vector<cv::Point>> contours_red;
        std::vector<cv::Vec4i> hierarchy_red;
        std::vector<HierarchyType> red_contour_mask;
        std::vector<double> red_contour_area;
        contourCalc(    red_enhanced,
                        1.0,
                        &red_segmented, 
                        &contours_red,
                        &hierarchy_red, 
                        &red_contour_mask,
                        &red_contour_area  );


        /* Classify the cells */
        std::vector<std::vector<cv::Point>> contours_neural_soma;
        std::vector<std::vector<cv::Point>> contours_neural_nuclei, contours_astrocytes;
        cv::Mat purple_intersection = cv::Mat::zeros(purple_enhanced.size(), CV_8UC1);
        for (size_t i = 0; i < contours_blue_filtered.size(); i++) {
            std::vector<cv::Point> purple_contour;
            cv::Mat temp;
            if (findCellSoma( contours_blue_filtered[i], purple_enhanced, &temp, &purple_contour )) {
                contours_neural_soma.push_back(purple_contour);
                contours_neural_nuclei.push_back(contours_blue_filtered[i]);
                bitwise_or(purple_intersection, temp, purple_intersection);
                cv::Mat temp_not;
                bitwise_not(temp, temp_not);
                bitwise_and(purple_enhanced, temp_not, purple_enhanced);
            } else {
                contours_astrocytes.push_back(contours_blue_filtered[i]);
            }
        }


        /** Collect the metrics **/

        /* Cells */

        data_stream << contours_blue_filtered.size() << ",";

        float mean_dia = 0.0, stddev_dia = 0.0;
        float mean_aspect_ratio = 0.0, stddev_aspect_ratio = 0.0;
        float mean_error_ratio = 0.0, stddev_error_ratio = 0.0;

        // Characterize neural nuclei
        separationMetrics(  contours_neural_nuclei, 
                            &mean_dia, 
                            &stddev_dia, 
                            &mean_aspect_ratio, 
                            &stddev_aspect_ratio, 
                            &mean_error_ratio, 
                            &stddev_error_ratio
                        );
        data_stream << contours_neural_nuclei.size() << "," 
                    << mean_dia << "," 
                    << stddev_dia << "," 
                    << mean_aspect_ratio << "," 
                    << stddev_aspect_ratio << "," 
                    << mean_error_ratio << "," 
                    << stddev_error_ratio << ",";

        // Characterize the soma size
        separationMetrics(  contours_neural_soma, 
                            &mean_dia, 
                            &stddev_dia, 
                            &mean_aspect_ratio, 
                            &stddev_aspect_ratio, 
                            &mean_error_ratio, 
                            &stddev_error_ratio
                        );
        data_stream << mean_dia << "," 
                    << stddev_dia << "," 
                    << mean_aspect_ratio << "," 
                    << stddev_aspect_ratio << "," 
                    << mean_error_ratio << "," 
                    << stddev_error_ratio << ",";

        // Characterize the astrocyte nuclei
        separationMetrics(  contours_astrocytes, 
                            &mean_dia, 
                            &stddev_dia, 
                            &mean_aspect_ratio, 
                            &stddev_aspect_ratio, 
                            &mean_error_ratio, 
                            &stddev_error_ratio
                        );
        data_stream << contours_astrocytes.size() << "," 
                    << mean_dia << "," 
                    << stddev_dia << "," 
                    << mean_aspect_ratio << "," 
                    << stddev_aspect_ratio << "," 
                    << mean_error_ratio << "," 
                    << stddev_error_ratio << ",";


        /* Synapses */
        std::string red_output;
        binArea(red_contour_mask, red_contour_area, &red_output);
        data_stream << red_output << ",";

        data_stream << std::endl;


        /** Display analyzed images **/

        // Initialize
        cv::Mat drawing_blue  = blue;
        cv::Mat drawing_green = green;
        cv::Mat drawing_red   = red;

        // Draw soma
        for (size_t i = 0; i < contours_neural_soma.size(); i++) {
            drawContours(drawing_blue, contours_neural_soma, i, 255, 1, 8);
            drawContours(drawing_green, contours_neural_soma, i, 255, 1, 8);
            drawContours(drawing_red, contours_neural_soma, i, 255, 1, 8);
        }

        // Draw synapses
        for (size_t i = 0; i < contours_red.size(); i++) {
            drawContours(drawing_blue, contours_red, i, 0, 0, 8);
            drawContours(drawing_green, contours_red, i, 0, 0, 8);
            drawContours(drawing_red, contours_red, i, 255, -1, 8);
        }

        // Merge the modified red, blue and green layers
        std::vector<cv::Mat> merge_analyzed;
        merge_analyzed.push_back(drawing_blue);
        merge_analyzed.push_back(drawing_green);
        merge_analyzed.push_back(drawing_red);
        cv::Mat color_analyzed;
        cv::merge(merge_analyzed, color_analyzed);

        // Draw the analyzed image
        std::vector<int> compression_params;
        compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
        compression_params.push_back(101);
        cv::imwrite("/tmp/img.jpg", color_analyzed, compression_params);
        std::string out_analyzed = out_directory + "zlayer_" + 
                                        std::to_string(z_index) + "_analyzed.tif";
        std::string cmd = "convert -quiet /tmp/img.jpg " + out_analyzed;
        system(cmd.c_str());
        system("rm /tmp/img.jpg");
    }
    closedir(read_dir);
    data_stream.close();

    return true;
}
コード例 #9
0
void OpenniFilter::cloud_cb_ (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr &cloud)
{
    if (!viewer.wasStopped())
    {
        if (cloud->isOrganized())
        {
            // initialize all the Mats to store intermediate steps
            int cloudHeight = cloud->height;
            int cloudWidth = cloud->width;
            rgbFrame = Mat(cloudHeight, cloudWidth, CV_8UC3);
            drawing = Mat(cloudHeight, cloudWidth, CV_8UC3, NULL);
            grayFrame = Mat(cloudHeight, cloudWidth, CV_8UC1, NULL);
            hsvFrame = Mat(cloudHeight, cloudWidth, CV_8UC3, NULL);
            contourMask = Mat(cloudHeight, cloudWidth, CV_8UC1, NULL);

            if (!cloud->empty())
            {
                for (int h = 0; h < rgbFrame.rows; h ++)
                {
                    for (int w = 0; w < rgbFrame.cols; w++)
                    {
                        pcl::PointXYZRGBA point = cloud->at(w, cloudHeight-h-1);
                        Eigen::Vector3i rgb = point.getRGBVector3i();
                        rgbFrame.at<Vec3b>(h,w)[0] = rgb[2];
                        rgbFrame.at<Vec3b>(h,w)[1] = rgb[1];
                        rgbFrame.at<Vec3b>(h,w)[2] = rgb[0];
                    }
                }

                // do the filtering 
                int xPos = 0;
                int yPos = 0;
                mtx.lock();
                xPos = mouse_x;
                yPos = mouse_y;
                mtx.unlock();

                // color filtering based on what is chosen by users
                cvtColor(rgbFrame, hsvFrame, CV_RGB2HSV);
                Vec3b pixel = hsvFrame.at<Vec3b>(xPos,yPos);

                int hueLow = pixel[0] < iHueDev ? pixel[0] : pixel[0] - iHueDev;
                int hueHigh = pixel[0] > 255 - iHueDev ? pixel[0] : pixel[0] + iHueDev;
                // inRange(hsvFrame, Scalar(hueLow, pixel[1]-20, pixel[2]-20), Scalar(hueHigh, pixel[1]+20, pixel[2]+20), grayFrame);
                inRange(hsvFrame, Scalar(hueLow, iLowS, iLowV), Scalar(hueHigh, iHighS, iHighV), grayFrame);

                // removes small objects from the foreground by morphological opening
                erode(grayFrame, grayFrame, getStructuringElement(MORPH_ELLIPSE, Size(5,5)));
                dilate(grayFrame, grayFrame, getStructuringElement(MORPH_ELLIPSE, Size(5,5)));

                // morphological closing (removes small holes from the foreground)
                dilate(grayFrame, grayFrame, getStructuringElement(MORPH_ELLIPSE, Size(5,5)));
                erode(grayFrame, grayFrame, getStructuringElement(MORPH_ELLIPSE, Size(5,5)));

                // gets contour from the grayFrame and keeps the largest contour
                Mat cannyOutput;
                vector<vector<Point> > contours;
                vector<Vec4i> hierarchy;
                int thresh = 100;
                Canny(grayFrame, cannyOutput, thresh, thresh * 2, 3);
                findContours(cannyOutput, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
                int largestContourArea, largestContourIndex = 0;
                int defaultContourArea = 1000; // 1000 seems to work find in most cases... cannot prove this
                vector<vector<Point> > newContours;
                for (int i = 0; i < contours.size(); i++)
                {
                    double area = contourArea(contours[i], false);
                    if (area > defaultContourArea)
                        newContours.push_back(contours[i]);
                }

                // draws the largest contour: 
                drawing = Mat::zeros(cannyOutput.size(), CV_8UC3);
                for (int i = 0; i < newContours.size(); i++)
                    drawContours(drawing, newContours, i, Scalar(255, 255, 255), CV_FILLED, 8, hierarchy, 0, Point());

                // gets the filter by setting everything within the contour to be 1. 
                inRange(drawing, Scalar(1, 1, 1), Scalar(255, 255, 255), contourMask);

                // filters the point cloud based on contourMask
                // again go through the point cloud and filter out unnecessary points
                pcl::PointCloud<pcl::PointXYZRGBA>::Ptr resultCloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
                pcl::PointXYZRGBA newPoint;
                for (int h = 0; h < contourMask.rows; h ++)
                {
                    for (int w = 0; w < contourMask.cols; w++)
                    {
                        if (contourMask.at<uchar>(h,w) > 0)
                        {
                            newPoint = cloud->at(w,h);
                            resultCloud->push_back(newPoint);
                        }
                    }
                }

                if (xPos == 0 && yPos == 0)
                    viewer.showCloud(cloud);
                else
                    viewer.showCloud(resultCloud);
                
                imshow("tracker", rgbFrame);
                imshow("filtered result", contourMask);
                char key = waitKey(1);
                if (key == 27) 
                {
                    interface->stop();
                    return;
                }
            }
            else
                cout << "Warning: Point Cloud is empty" << endl;
        }
        else
            cout << "Warning: Point Cloud is not organized" << endl;
    }
}
コード例 #10
0
void ForbiddenPlanet::drawBoothScreen(ofxCvColorImage webCamImg, ofRectangle faceBoundingBox)
{
    // increase top and bottom of facebounding box by 35%
    float newHeight = faceBoundingBox.height * 1.35f;
    float heightDiff = newHeight - faceBoundingBox.height;
    
    float newY = faceBoundingBox.y - heightDiff/2.0;
    
    faceBoundingBox.y = newY;
    faceBoundingBox.height = newHeight;
    
    
    fboBuffer.begin();
    ofClear(0,0,0);
    
    float sizeMultX = 1.5f;//(float)BOOTH_SCREEN_SIZE_X / (float)webCamImg.width;
    float sizeMultY = 1.5f;//(float)BOOTH_SCREEN_SIZE_Y / (float)webCamImg.height;
    
    
    
    ofSetColor(255,255,255);
    ofSetLineWidth(2.5f);
    
    ofSetColor(255,255,255);
    _backdropImg.draw(BOOTH_SCREEN_POS_X,0,BOOTH_SCREEN_SIZE_X,BOOTH_SCREEN_SIZE_Y);
    ofNoFill();
    ofSetColor(255,0,50);
    
    // Find position of face
    ofPoint faceCentre = faceBoundingBox.getCenter();
    
    float faceCentreDiffY = BOOTH_SCREEN_SIZE_Y/2.0-25.0f-faceCentre.y;
    float faceCentreDiffX = BOOTH_SCREEN_SIZE_X/2.0-faceCentre.x;
    
    float faceSizeDiff = 384.000000 / faceBoundingBox.width;
    
    float w = faceSizeDiff * (float)BOOTH_SCREEN_SIZE_X;
    float h = faceSizeDiff * (float)BOOTH_SCREEN_SIZE_Y * 1.25f;
    
    float xPos = (BOOTH_SCREEN_SIZE_X/2) - FACE_TARGET_POS_X + faceCentreDiffX;
    float yPos = (BOOTH_SCREEN_SIZE_Y/2) - FACE_TARGET_POS_Y + faceCentreDiffY;
    
    ofRectangle faceBounds;
    
    float currentMult = 0.25f;
    float prevMult = 0.75f;
    if(_lastBounds->x != 0 &&_lastBounds->y != 0 &&_lastBounds->width != 0 &&_lastBounds->height != 0)
    {
        if(_lastBounds->x !=NULL && _lastBounds->y != NULL && _lastBounds->width !=NULL && _lastBounds->height != NULL)
        {
            printf("%f %f %f %f\n",_lastBounds->x,_lastBounds->y,_lastBounds->width,_lastBounds->height);
            faceBounds = ofRectangle(xPos* currentMult + _lastBounds->x*prevMult,yPos * currentMult + _lastBounds->y * prevMult,w * currentMult + _lastBounds->width * prevMult,h * currentMult + _lastBounds->height*prevMult );
        }
    }else
        faceBounds = ofRectangle(xPos,yPos,w,h);
    

    // Use face bounds to chop out contour image
    //printf("roi: %f %f %f %f\n",faceBoundingBox.x,faceBoundingBox.y,faceBoundingBox.width,faceBoundingBox.height);
    if(faceBoundingBox.width > 0 && faceBoundingBox.height > 0 && faceBoundingBox.width>0 && faceBoundingBox.height > 0)
    {
        ofxCvGrayscaleImage grayConvert;
        grayConvert = webCamImg;
        
        
        ofImage img;
        img.setFromPixels(grayConvert.getPixels(), grayConvert.width, grayConvert.height, OF_IMAGE_GRAYSCALE);
        img.resize(1280,720);
        img.crop(faceBoundingBox.x,faceBoundingBox.y,faceBoundingBox.width,faceBoundingBox.height);
   
        
        //grayConvert.resize(640,480);
        ofPushMatrix();
        ofTranslate(15,0);
        ofSetColor(200,0,75);
        drawContours(0.4 + abs(sin(_contrast))*0.6f,img,sizeMultX,sizeMultY,faceBoundingBox);
        
        ofTranslate(5,7);
        drawContours(1.0f-0.4 + abs(sin(_contrast))*0.6f,img,sizeMultX,sizeMultY,faceBoundingBox);
        ofPopMatrix();
        
    }
    _contrast += 0.025f;
    
    ofFill();
    ofSetColor(255,255,255);
    //glBlendFunc(GL_SRC_ALPHA, GL_ONE);
    _laserVideo.draw(BOOTH_SCREEN_POS_X,0,BOOTH_SCREEN_SIZE_X,BOOTH_SCREEN_SIZE_Y);
    //glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
    
    fboBuffer.end();
    fboBuffer.draw(BOOTH_SCREEN_POS_X,0,BOOTH_SCREEN_SIZE_X,BOOTH_SCREEN_SIZE_Y);
    
    /*
    if(isShowBoothOnProjector())
        fboBuffer.draw(BOOTH_SCREEN_SIZE_X - BOOTH_SCREEN_SIZE_X/4,BOOTH_SCREEN_SIZE_Y   - BOOTH_SCREEN_SIZE_Y/4,BOOTH_SCREEN_SIZE_X/4,BOOTH_SCREEN_SIZE_Y/4);
    else
        _mainVideo.draw(BOOTH_SCREEN_SIZE_X - BOOTH_SCREEN_SIZE_X/4,BOOTH_SCREEN_SIZE_Y   - BOOTH_SCREEN_SIZE_Y/4,BOOTH_SCREEN_SIZE_X/4,BOOTH_SCREEN_SIZE_Y/4);
    */
    
    
    _font.drawString("Outside Feed:", OUTSIDE_FEED_TEXT_POS_X, OUTSIDE_FEED_TEXT_POS_Y);
    
    _font2.drawString("LIVE",LIVE_TEXT_POS_X,LIVE_TEXT_POS_Y);
    
    printf("show booth on projector: %i\n",isShowBoothOnProjector());
    if(isShowBoothOnProjector() >= 0)
    {
        ofSetColor(0,0,0);
        ofRect(LIVE_ICON_POS_X,LIVE_ICON_POS_Y,_liveIcon.getWidth()/4,_liveIcon.getHeight()/4);
        ofSetColor(255,255,255);
        _liveIcon.draw(LIVE_ICON_POS_X,LIVE_ICON_POS_Y,_liveIcon.getWidth(),_liveIcon.getHeight());
        fboBuffer.draw(BOOTH_PREVIEW_WINDOW_POS_X,BOOTH_PREVIEW_WINDOW_POS_Y,BOOTH_PREVIEW_WINDOW_SIZE_X,BOOTH_PREVIEW_WINDOW_SIZE_Y);
    }else
    {
        _notLiveIcon.draw(LIVE_ICON_POS_X,LIVE_ICON_POS_Y);
        _mainVideo.draw(BOOTH_PREVIEW_WINDOW_POS_X,BOOTH_PREVIEW_WINDOW_POS_Y,BOOTH_PREVIEW_WINDOW_SIZE_X,BOOTH_PREVIEW_WINDOW_SIZE_Y);
    }
    drawEmotionGuideLineText();
}
コード例 #11
0
Result CalSpermCount::calNum(Mat img)
{
	Mat tmp, tmp_back;
//	int col = 800, row = 600;
	Size dsize = Size(nshowcol, nshowrow);
	Mat image2show = Mat(dsize,CV_8U);

	GaussianBlur(img,img,cv::Size(5,5), 1.5);

	if(nShowMidRst)
	    ShowImage("after filtering", img);
	//cvSmooth(img, img, CV_MEDIAN, 3, 0, 0, 0); //中值滤波,消除小的噪声;

	//Mat element(9,9,CV_8U,Scalar(1));
	Mat element = getStructuringElement(MORPH_RECT,Size(10,10));
	erode(img, tmp,element);
    //ShowImage("erode",tmp);

	dilate(tmp,tmp_back,element);
	::resize(tmp_back, image2show,dsize);
	//cvNamedWindow("dilate");
	//imshow("dilate", image2show);
	//ShowImage("dilate",tmp_back);

	morphologyEx(img,dst_gray,MORPH_TOPHAT,element);
	//morphologyEx(img,dst_gray,MORPH_BLACKHAT,cv::Mat());
	//dst_gray = img;

	//绘制直方图
	Histgram1D hist;
	hist.stretch(dst_gray,0.01f);
	if(nShowMidRst)
	    ShowImage("Histogram", hist.getHistogramImage(dst_gray, 1));
	Mat tmpImage;

	//::equalizeHist(dst_gray,tmpImage);
	//ShowImage("拉伸后", tmpImage);
	//ShowImage("拉伸后Histogram",  hist.getHistogramImage(tmpImage, 1));

    if(nShowMidRst)
   {
		ShowImage("Picture EQ", hist.stretch(dst_gray, 50));
	    ShowImage("after draw, Histogram",  hist.getHistogramImage(hist.stretch(dst_gray,50), 1));
	}

	dst_gray =  hist.stretch(dst_gray, 50);
	//dst_gray =  ::equalizeHist(dst_gray,dst_gray);


	//dst_gray = tmp_back  - img  ;
	//dst_gray = img  ;
	::resize(dst_gray, image2show,dsize);
#if 0 // No GUI
	if(nShowMidRst)
	    cvNamedWindow(WINDOWNAME);

	::createTrackbar("filter pattern",WINDOWNAME,&g_nThresholdType,4,on_Theshold);
	createTrackbar("theshold value",WINDOWNAME,&g_nThresholdValue,255,on_Theshold);
#endif
	//初始化自定义回调函数
	on_Theshold(0,0);

	//adaptiveThreshold(dst_gray,dstimgbw,255,adaptive_method,CV_THRESH_BINARY,blocksize,0);
    //cvAdaptiveThreshold(img, dst_bw,255,adaptive_method,//自适应阀值,blocksize为奇数
    // CV_THRESH_BINARY,blocksize,offset);


//	::resize(dstimgbw, image2show,dsize);
//	cvNamedWindow("src2");
//	imshow("src2", image2show);
//		waitKey(0);

	vector<vector<Point> > contours;

	findContours(dstimgbw,contours,CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

	Mat rst(img.size(),CV_8U,Scalar(0));
	drawContours(rst,contours,-1,255,1);

	int cmin = minArea;
	int cmax = maxArea;
	vector<vector<Point> >::iterator itc = contours.begin();
	int i = 0;
	result.LittleNum = 0;
	result.AimNum = 0;
	result.LargeNum = 0;
	fallcount = contours.size();
	while(itc!=contours.end())
	{

		if(itc->size() < cmin )
		{
			result.LittleNum ++;
			itc = contours.erase(itc);
			//i++;
		}
		else if(itc->size() > cmax)
		{
			itc = contours.erase(itc);
			result.LargeNum ++;
		}
		else
		{
			result.AimNum ++;
		    cout <<i<<" = "<< itc->size()<<endl;
			i++;
			++itc;
		}
	}

	Mat rst2(img.size(),CV_8UC3,Scalar(0,0,0));
	drawContours(rst2,contours,-1,cv::Scalar(255,255,255),1);

	char sz1[MAX_PATH],sz2[MAX_PATH],sz3[MAX_PATH];
	char sz4[MAX_PATH];
	char szError[MAX_PATH] = " ";

	fcount = fratio * result.AimNum;


	if(result.LittleNum/fallcount > fminRatio  || result.LargeNum/fallcount > fmaxRatio)
			sprintf(szError,"Sample is dirty");

	sprintf(sz1,"Aim Num = %d",   result.AimNum);
	sprintf(sz2,"Little Num=%d",  result.LittleNum);
	sprintf(sz3,"Large Num=%d",   result.LargeNum);
	sprintf(sz4,"Count =%3.3f",   fcount);

	putText(rst2,sz1,cv::Point(10,10),cv::FONT_HERSHEY_PLAIN, 1.0,   cv::Scalar(255,0,0),     2);
	putText(rst2,sz2,cv::Point(10,30),cv::FONT_HERSHEY_PLAIN, 1.0,   cv::Scalar(255,0,0),     2);
	putText(rst2,sz3,cv::Point(10,50),cv::FONT_HERSHEY_PLAIN, 1.0,   cv::Scalar(255,0,0),     2);
	putText(rst2,sz4,cv::Point(10,70),cv::FONT_HERSHEY_PLAIN, 1.0,   cv::Scalar(255,0,255),     2);
	putText(rst2,szError,cv::Point(10,90),cv::FONT_HERSHEY_PLAIN, 1.0,   cv::Scalar(0,0,255),     2);

	cout << "Aim Num = "<<result.AimNum<<endl;
	cout << "Little Num = "<<result.LittleNum<<endl;
	cout << "Large Num = "<<result.LargeNum<<endl;
	cout << "Count = "<<fcount<<endl;
	cout << "all = "<<contours.size()<<endl;

	ShowImage("result",rst2);

	//waitKey(0);
	return result;
}
コード例 #12
0
ファイル: shape_finder.cpp プロジェクト: jacol12345/PIRO
void camera_contours_display(int num, Straightener & straight) {
	int c;
		IplImage* color_img;
		CvCapture* cv_cap = cvCaptureFromCAM(num);
		cvNamedWindow("Video", 0); // create window
		resizeWindow("Video", 700,700);
		for(;;) {
			color_img = cvQueryFrame(cv_cap); // get frame
			if(color_img != 0) {
				Mat cam_mat(color_img);
				Mat result;
				cam_mat.copyTo(result);

				if(straight.doAll(cam_mat, result)) {
					///Apply blur
					blur(result, result, Size(3,3));
					///Apply Canny to destination Matrix
					Canny(result, result, 50, 50, 3);
					/// Vectors for storing contours
					vector<vector<Point> > contours; //contours of the paper sheet
					vector<vector<Point> > approx_contours; //approx contours of the paper sheet
					vector<Vec4i> hierarchy;
					int erosion_type = 2;
					int erosion_size = 3;
					Mat element = getStructuringElement(erosion_type,
														Size( 2*erosion_size + 1, 2*erosion_size+1),
														Point( erosion_size, erosion_size));
					dilate(result, result, element);
					/// Cut 20 px from each side to avoid paper borders detection
					result = result(Rect(10, 10, result.cols-20, result.rows-20));
					findContours(result, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point(0, 0));
					/// Draw contours
					Mat drawing = Mat::zeros( result.size(), CV_8UC3 );
					/// https://github.com/Itseez/opencv/blob/master/samples/cpp/contours2.cpp
//					approx_contours.resize(contours.size());
					for(unsigned int i = 0; i < contours.size(); i++) {
						/// Area of more than 20 and no parent
						if(contourArea(contours[i]) > 20 && hierarchy[i][3] == -1) {
							vector<Point> tmp_contour;
							approxPolyDP(Mat(contours[i]), tmp_contour, 3, true);
							approx_contours.push_back(tmp_contour);
						}
					}
					for(unsigned int i=0; i < approx_contours.size(); i++) {
						Scalar color;
						if(approx_contours[i].size() == 4) {
							color = Scalar( 255, 255, 255);
							drawContours( drawing, approx_contours, i, color, 1, 8, NULL, 0, Point() );
						}
						else {
							color = Scalar( 0, 255, 0);
							drawContours( drawing, approx_contours, i, color, 1, 8, NULL, 0, Point() );
						}
					}
					imshow("Video", drawing);
				}
			}
			c = cvWaitKey(10); // wait 10 ms or for key stroke
			if(c == 27)

				break; // if ESC, break and quit
		}
		/* clean up */
		cvReleaseCapture( &cv_cap );
		cvDestroyWindow("Video");
}
コード例 #13
0
ファイル: Detect.cpp プロジェクト: pybae/HandMade
std::pair<vector<Point>, std::pair<Point,double>> Detect::operator() (Mat& frame, Mat& raw, int count)
{
    vector<Point> tips;

    // first find the curves in the image
    vector<vector<Point>> polyCurves = getPolyCurves(frame);

    //std::cout << polyCurves.size() << std::endl;

    // Find max inscribed circle for the 0th polycurve and draw it.
    std::pair<Point, double> maxCircle = findMaxInscribedCircle(polyCurves, raw);

    circle(raw, maxCircle.first, maxCircle.second, cv::Scalar(220,75,20), 1, CV_AA);

    // Good PolyCurve is with 3.5 * max inscribed radius
    vector<vector<Point>> goodPolyCurves;
    getRegionOfInterest(goodPolyCurves, polyCurves, maxCircle);

    // draw good poly curve
    drawContours(raw,goodPolyCurves, -1 , cv::Scalar(0,255,0), 2);

    // Find min enclosing circle on goodPolyCurve and draw it
    std::pair<Point2f, double> minCircle = findMinEnclosingCircle(goodPolyCurves);
    circle(raw, minCircle.first, minCircle.second, cv::Scalar(220,75,20), 1, CV_AA);

    // now find the convex hull for each polyCurve
    if (goodPolyCurves.size() < 1) {
        return std::pair<vector<Point>, std::pair<Point, double>>(tips, maxCircle);
    }

    // Get convex hulls
    vector<vector<int>> hullIndices = getConvexHulls(goodPolyCurves);

    vector<vector<Point>> hullPoints;
    for(int i = 0; i < goodPolyCurves.size(); i++) {
        if (goodPolyCurves[i].size() > 0) {
            vector<Point> hullPoint;
            convexHull(goodPolyCurves[i], hullPoint);
            hullPoints.push_back(hullPoint);
        }
    }

    // Draw the convex hulls
    drawContours(raw, hullPoints, -1, cv::Scalar(255, 0, 0), 2);

    
    for (int i = 0; i < 1; ++i)
    {
        vector<Vec4i> defects;
        
        // find convexity defects for each poly curve and draw them
        convexityDefects(goodPolyCurves[i], hullIndices[i], defects);

        defects = filterDefects(defects);
        vector<Point> defectEnds;

        for (int j = 0; j < defects.size(); ++j) {
            Vec4i& defect = defects[j];

            int startIdx = defect[0];
            int endIdx = defect[1];
            int farIdx = defect[2];

            Point start = goodPolyCurves[i][startIdx];
            Point end = goodPolyCurves[i][endIdx];
            Point far = goodPolyCurves[i][farIdx];
            if(euclideanDist(far, start) > maxCircle.second) {
                defectEnds.push_back(start);
                defectEnds.push_back(end);
            }
        }

        tips = findFingerTips(defectEnds, maxCircle, raw);
    }

    flip(raw, raw, 1);
    imshow("hand", raw);

    // movie code (we should return the frame to HandMade and put movie code there with the others.)
    if(makeMoviesD) {
	    char buffer[30];
	    sprintf(buffer, "detected/detected_%03d.jpg", count++);
	    imwrite(buffer, raw);
	}

    return std::pair<vector<Point>, std::pair<Point, double>>(tips, maxCircle);
}
コード例 #14
0
ファイル: main.cpp プロジェクト: VitaliiKinakh/OpenCV
void testMatch(cv::Mat original, cv::Mat templ)
{
	cv::Mat src, dst;

	src = original.clone();

	cv::Mat binI(src.size(), CV_8U);
	cv::Mat binT(templ.size(), CV_8U);

	// create colorful images
	cv::Mat rgb(original.size(), CV_8UC3);
	cv::cvtColor(src, rgb, CV_GRAY2BGR);
	cv::Mat rgbT(templ.size(), CV_8UC3);
	cv::cvtColor(templ, rgbT, CV_GRAY2BGR);
	cv::cvtColor(src, rgb, CV_GRAY2BGR);

	// get edges
	Canny(src, binI, 50, 200);
	Canny(templ, binT, 50, 200);

	// show edges
	cv::namedWindow("cannyI", CV_WINDOW_FREERATIO);
	imshow("cannyI", binI);

	cv::namedWindow("cannyT", 2);
	imshow("cannyT", binT);

	// save contours on image
	std::vector<std::vector<cv::Point>> storage;

	// find contours on main image
	findContours(binI, storage, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

	int counter;
	// draw countours
	if (!storage.empty()) {
		for (counter = 0; counter < storage.size(); ++counter) {
			drawContours(rgb, storage, counter, cv::Scalar(0, 255, 0) , 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());

		}
	}
	// show them
	cv::namedWindow("cont", CV_WINDOW_FREERATIO);
	imshow("cont", rgb);

	// save contours on template
	std::vector<std::vector<cv::Point>> storageT;

	// find contours on template
	findContours(binT, storageT, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

	std::vector<std::vector<cv::Point>> seqT;
	double perimT = 0;

	if (!storageT.empty()) {
		// find the longest contour
		for (auto c : storageT) {
			double perim = cv::arcLength(c, true);
			if (perim>perimT) {
				perimT = perim;
				seqT.push_back(c);
			}
		}
	}
	// draw founded contour
	drawContours(rgbT, seqT, 0, cv::Scalar(36, 201, 197), 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
	//cv::namedWindow("contT", CV_WINDOW_FREERATIO);
	//imshow("contT", rgbT);


	std::vector<std::vector<cv::Point>> seqM;
	double matchM = 1000;
	// iterato throw storage to find best fit 
	counter = 0;
	if (!storage.empty()) {
		cv::Moments m;
		int cX;
		int cY;
		double match0;
		// looking for best fit using moments
		// temp variable to detect best fit
		for (auto c : storage) {
			m = moments(c);
			cX = int(m.m10 / m.m00);
			cY = int(m.m01 / m.m00);
			match0 = matchShapes(c, seqT[0], CV_CONTOURS_MATCH_I3, 0);
			if (match0 < 0.1) {
				seqM.push_back(c);
			}
			printf("[i] %d match: %.2f\n", ++counter, match0);
			putText(rgb, std::to_string(counter), cv::Point(cX, cY), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255), 2);
		}
	}
	// draw founded
	for (int i = 0; i < seqM.size(); ++i) {
		drawContours(rgb, seqM, i, cv::Scalar(0, 0, 255), 5, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
	}
	cv::namedWindow("find", CV_WINDOW_FREERATIO);
	imshow("find", rgb);

	// wait for any key pressed
	cv::waitKey(0);

	cv::destroyAllWindows();
}
コード例 #15
0
std::vector<int> LightplaneCalibrator::AddLightImage(
    std::vector<cv::Mat>& srcs_light) {

  std::vector<int> return_value;
  std::vector<cv::Point>::iterator begin_100, end_900, itr;
  ims_points_.clear();
  for (int i = 0; i < srcs_light.size(); i++) {
    std::vector<cv::Point> pts; std::vector<cv::Point2f> ptfs;
    cv::Mat im = srcs_light[i], gray;
    if (im.channels() == 3) {
      cvtColor(im, gray, CV_BGR2GRAY);
    }
    else {
      gray = im;
    }
    cam_->UndistorImage(gray, gray);
    medianBlur(gray, gray, 11);
    cv::Mat threshold_out;
    cv::threshold(gray, threshold_out, 25, 255, CV_THRESH_BINARY);
    cv::Mat dialateStructure = cv::getStructuringElement(
      cv::MorphShapes::MORPH_RECT, cv::Size(15, 15));

    dilate(threshold_out, threshold_out, dialateStructure, cv::Point(-1, -1));
#ifdef _DEBUG_JIANG_  // for debug
    cv::namedWindow("threshold_medianBlur_out");
    cv::imshow("threshold_medianBlur_out", threshold_out);
    /*imwrite("threshold_medianBlur_out.bmp",threshold_out);*/
    cv::waitKey(200);
#endif
    std::vector<std::vector<cv::Point> > contours;
    std::vector<cv::Vec4i> hierarchy;
    findContours(threshold_out, contours, hierarchy, CV_RETR_EXTERNAL,
      CV_CHAIN_APPROX_NONE, cv::Point(0, 0));

    /// Draw contours
    cv::Mat drawing = cv::Mat::zeros(threshold_out.size(), CV_8U);
    for (size_t i = 0; i < contours.size(); i++) {
      if (contours[i].size() > 200) {
        //Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
        drawContours(drawing, contours, (int)i, cv::Scalar(255),
          CV_FILLED, 8, hierarchy, 0, cv::Point());

      }
    }
    cv::Mat  thining_output;
    Thinning(drawing, thining_output);
#ifdef _DEBUG_JIANG_
    cv::imshow("thining_output", thining_output);
    cv::waitKey(200);
#endif
    findNonZero(thining_output, pts);
    for (itr = pts.begin(); itr != pts.end(); ++itr) {
      if ((*itr).y <= 100) {
        begin_100 = itr;
      }
      if ((*itr).y <= 900) {
        end_900 = itr;
      }
    }
    ptfs.assign(begin_100, end_900);
    ims_points_.push_back(ptfs);
  }
  //std::ofstream log;
  //log.open("ims_points.txt");
  //for (int i = 0; i < ims_points_.size(); i++) {
  //  for (int j = 0; j < ims_points_[i].size(); j++) {
  //    log << ims_points_[i][j].x << " " << ims_points_[i][j].y << " " << i + 1 
  //        << std::endl;

  //  }
  //  log << std::endl;
  //}
  return return_value;
}
コード例 #16
0
  void CharacterAnalysis::analyze()
  {
    timespec startTime;
    getTimeMonotonic(&startTime);

    if (config->always_invert)
      bitwise_not(pipeline_data->crop_gray, pipeline_data->crop_gray);

    pipeline_data->clearThresholds();
    pipeline_data->thresholds = produceThresholds(pipeline_data->crop_gray, config);

    timespec contoursStartTime;
    getTimeMonotonic(&contoursStartTime);

    pipeline_data->textLines.clear();

    for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++)
    {
      TextContours tc(pipeline_data->thresholds[i]);

      allTextContours.push_back(tc);
    }

    if (config->debugTiming)
    {
      timespec contoursEndTime;
      getTimeMonotonic(&contoursEndTime);
      cout << "  -- Character Analysis Find Contours Time: " << diffclock(contoursStartTime, contoursEndTime) << "ms." << endl;
    }
    //Mat img_equalized = equalizeBrightness(img_gray);

    timespec filterStartTime;
    getTimeMonotonic(&filterStartTime);

    for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++)
    {
      this->filter(pipeline_data->thresholds[i], allTextContours[i]);

      if (config->debugCharAnalysis)
        cout << "Threshold " << i << " had " << allTextContours[i].getGoodIndicesCount() << " good indices." << endl;
    }

    if (config->debugTiming)
    {
      timespec filterEndTime;
      getTimeMonotonic(&filterEndTime);
      cout << "  -- Character Analysis Filter Time: " << diffclock(filterStartTime, filterEndTime) << "ms." << endl;
    }

    PlateMask plateMask(pipeline_data);
    plateMask.findOuterBoxMask(allTextContours);

    pipeline_data->hasPlateBorder = plateMask.hasPlateMask;
    pipeline_data->plateBorderMask = plateMask.getMask();

    if (plateMask.hasPlateMask)
    {
      // Filter out bad contours now that we have an outer box mask...
      for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++)
      {
        filterByOuterMask(allTextContours[i]);
      }
    }

    int bestFitScore = -1;
    int bestFitIndex = -1;
    for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++)
    {

      int segmentCount = allTextContours[i].getGoodIndicesCount();

      if (segmentCount > bestFitScore)
      {
        bestFitScore = segmentCount;
        bestFitIndex = i;
        bestThreshold = pipeline_data->thresholds[i];
        bestContours = allTextContours[i];
      }
    }

    if (this->config->debugCharAnalysis)
      cout << "Best fit score: " << bestFitScore << " Index: " << bestFitIndex << endl;

    if (bestFitScore <= 1)
    {
      pipeline_data->disqualified = true;
      pipeline_data->disqualify_reason = "Low best fit score in characteranalysis";
      return;
    }

    //getColorMask(img, allContours, allHierarchy, charSegments);

    if (this->config->debugCharAnalysis)
    {
      Mat img_contours = bestContours.drawDebugImage(bestThreshold);

      displayImage(config, "Matching Contours", img_contours);
    }

    LineFinder lf(pipeline_data);
    vector<vector<Point> > linePolygons = lf.findLines(pipeline_data->crop_gray, bestContours);

    vector<TextLine> tempTextLines;
    for (unsigned int i = 0; i < linePolygons.size(); i++)
    {
      vector<Point> linePolygon = linePolygons[i];

      LineSegment topLine = LineSegment(linePolygon[0].x, linePolygon[0].y, linePolygon[1].x, linePolygon[1].y);
      LineSegment bottomLine = LineSegment(linePolygon[3].x, linePolygon[3].y, linePolygon[2].x, linePolygon[2].y);

      vector<Point> textArea = getCharArea(topLine, bottomLine);

      TextLine textLine(textArea, linePolygon, pipeline_data->crop_gray.size());

      tempTextLines.push_back(textLine);
    }

    filterBetweenLines(bestThreshold, bestContours, tempTextLines);

    // Sort the lines from top to bottom.
    std::sort(tempTextLines.begin(), tempTextLines.end(), sort_text_line);

    // Now that we've filtered a few more contours, re-do the text area.
    for (unsigned int i = 0; i < tempTextLines.size(); i++)
    {
      vector<Point> updatedTextArea = getCharArea(tempTextLines[i].topLine, tempTextLines[i].bottomLine);
      vector<Point> linePolygon = tempTextLines[i].linePolygon;
      if (updatedTextArea.size() > 0 && linePolygon.size() > 0)
      {
        pipeline_data->textLines.push_back(TextLine(updatedTextArea, linePolygon, pipeline_data->crop_gray.size()));
      }

    }

    if (config->auto_invert)
      pipeline_data->plate_inverted = isPlateInverted();
    else
      pipeline_data->plate_inverted = config->always_invert;

    if (config->debugGeneral)
      cout << "Plate inverted: " << pipeline_data->plate_inverted << endl;


    if (pipeline_data->textLines.size() > 0)
    {
      int confidenceDrainers = 0;
      int charSegmentCount = this->bestContours.getGoodIndicesCount();
      if (charSegmentCount == 1)
        confidenceDrainers += 91;
      else if (charSegmentCount < 5)
        confidenceDrainers += (5 - charSegmentCount) * 10;

      // Use the angle for the first line -- assume they'll always be parallel for multi-line plates
      int absangle = abs(pipeline_data->textLines[0].topLine.angle);
      if (absangle > config->maxPlateAngleDegrees)
        confidenceDrainers += 91;
      else if (absangle > 1)
        confidenceDrainers += (config->maxPlateAngleDegrees - absangle) ;

      // If a multiline plate has only one line, disqualify
      if (pipeline_data->isMultiline && pipeline_data->textLines.size() < 2)
      {
        if (config->debugCharAnalysis)
          std::cout << "Did not detect multiple lines on multi-line plate" << std::endl;
        confidenceDrainers += 95;
      }

      if (confidenceDrainers >= 90)
      {
        pipeline_data->disqualified = true;
        pipeline_data->disqualify_reason = "Low confidence in characteranalysis";
      }
      else
      {
        float confidence = 100 - confidenceDrainers;
        pipeline_data->confidence_weights.setScore("CHARACTER_ANALYSIS_SCORE", confidence, 1.0);
      }
    }
    else
    {
        pipeline_data->disqualified = true;
        pipeline_data->disqualify_reason = "No text lines found in characteranalysis";
    }

    if (config->debugTiming)
    {
      timespec endTime;
      getTimeMonotonic(&endTime);
      cout << "Character Analysis Time: " << diffclock(startTime, endTime) << "ms." << endl;
    }

    // Draw debug dashboard
    if (this->pipeline_data->config->debugCharAnalysis && pipeline_data->textLines.size() > 0)
    {
      vector<Mat> tempDash;
      for (unsigned int z = 0; z < pipeline_data->thresholds.size(); z++)
      {
        Mat tmp(pipeline_data->thresholds[z].size(), pipeline_data->thresholds[z].type());
        pipeline_data->thresholds[z].copyTo(tmp);
        cvtColor(tmp, tmp, CV_GRAY2BGR);

        tempDash.push_back(tmp);
      }

      Mat bestVal(this->bestThreshold.size(), this->bestThreshold.type());
      this->bestThreshold.copyTo(bestVal);
      cvtColor(bestVal, bestVal, CV_GRAY2BGR);

      for (unsigned int z = 0; z < this->bestContours.size(); z++)
      {
        Scalar dcolor(255,0,0);
        if (this->bestContours.goodIndices[z])
          dcolor = Scalar(0,255,0);
        drawContours(bestVal, this->bestContours.contours, z, dcolor, 1);
      }
      tempDash.push_back(bestVal);
      displayImage(config, "Character Region Step 1 Thresholds", drawImageDashboard(tempDash, bestVal.type(), 3));
    }
  }
コード例 #17
0
	std::vector<std::vector<std::vector<cv::Point>>> MultiContourObjectDetector::processContours(
		std::vector<std::vector<std::vector<cv::Point>>> approxContours,
		double hammingThreshold,
		double correlationThreshold,
		int* numberOfObject)
	{
		vector<vector<vector<Point>>> objects;
		double attenuation = 0;

		for (int i = 0; i < approxContours.size(); i++)
		{
			if (approxContours[i].size() != _baseShape.size())
				continue;
			attenuation = 0;

#ifdef DEBUG_MODE
			Mat tempI(Size(1000, 1000), CV_8UC1);
			tempI = Scalar(0);
			drawContours(tempI, approxContours[i], -1, cv::Scalar(255), 1, CV_AA);
#endif

			double totCorrelation = 0,
				totHamming = 0;

			Moments m = moments(approxContours[i][0], true);
			int cx = int(m.m10 / m.m00);
			int cy = int(m.m01 / m.m00);

			Point c(cx, cy);

			if (!(c.x >= _attenuationRect.x &&
				c.y >= _attenuationRect.y &&
				c.x <= (_attenuationRect.x + _attenuationRect.width) &&
				c.y <= (_attenuationRect.y + _attenuationRect.height)))
				attenuation = 5;

			// C and H with external contour
			vector<Point> externalKeypoints;
			Utility::findCentroidsKeypoints(approxContours[i][0], externalKeypoints, Utility::CentroidDetectionMode::THREE_LOOP);
			totCorrelation += (Utility::correlationWithBase(externalKeypoints, _baseKeypoints[0]) - attenuation);

			totHamming += (Utility::calculateContourPercentageCompatibility(approxContours[i][0], _baseShape[0]) - attenuation);

			// looking for the contour with the better cnetroids and shape match

			for (int j = 1; j < approxContours[i].size(); j++)
			{
				attenuation = 0;

				Moments m = moments(approxContours[i][j], true);
				int cx = int(m.m10 / m.m00);
				int cy = int(m.m01 / m.m00);

				Point c(cx, cy);

				if (!(c.x >= _attenuationRect.x &&
					c.y >= _attenuationRect.y &&
					c.x <= (_attenuationRect.x + _attenuationRect.width) &&
					c.y <= (_attenuationRect.y + _attenuationRect.height)))
					attenuation = 5;


				double maxCorrelation = std::numeric_limits<double>::min(),
					maxHamming = std::numeric_limits<double>::min();

				for (int k = 1; k < _baseShape.size(); k++)
				{
					vector<Point> internalKeypoints;
					Utility::findCentroidsKeypoints(approxContours[i][j], internalKeypoints, Utility::CentroidDetectionMode::THREE_LOOP);
					maxCorrelation = max(maxCorrelation, Utility::correlationWithBase(internalKeypoints, _baseKeypoints[k]));

					maxHamming = max(maxHamming, Utility::calculateContourPercentageCompatibility(approxContours[i][j], _baseShape[k]));
				}

				totCorrelation += (maxCorrelation - attenuation);
				totHamming += (maxHamming - attenuation);
			}

			totCorrelation /= approxContours[i].size();
			totHamming /= approxContours[i].size();

			cout << "Middle Correlation " << to_string(i) << " with base ---> " << totCorrelation << endl;
			cout << "Middle Hamming distance" << to_string(i) << " with base ---> " << totHamming << endl;

			if (totCorrelation >= correlationThreshold && totHamming >= hammingThreshold)
				objects.push_back(approxContours[i]);
		}

		*numberOfObject = objects.size();

		return objects;
	}
コード例 #18
0
ファイル: visionNode.cpp プロジェクト: Roboy/mocap
void VisionNode::CameraCallback(CCamera *cam, const void *buffer, int buffer_length) {
    cv::Mat myuv(HEIGHT + HEIGHT / 2, WIDTH, CV_8UC1, (unsigned char *) buffer);
    cv::cvtColor(myuv, img, CV_YUV2RGBA_NV21);
    cv::cvtColor(img, img_gray, CV_RGBA2GRAY);

    communication::MarkerPosition markerPosition;
    markerPosition.header.stamp = ros::Time::now();
    static uint next_id = 0;
    markerPosition.header.seq = next_id++;
    markerPosition.cameraID = ID;

    static uint counter = 0;
    t2 = std::chrono::high_resolution_clock::now();
    time_span = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
    markerPosition.fps = (double)counter/time_span.count();
    counter++;

    if(time_span.count()>30){ // reset every 30 seconds
        counter = 0;
        t1 = std::chrono::high_resolution_clock::now();
        std_msgs::Int32 msg;
        msg.data = ID;
        cameraID_pub->publish(msg);
    }

    cv::Mat filtered_img;
    cv::threshold(img_gray, filtered_img, threshold_value, 255, 3);

    // find contours in result, which hopefully correspond to a found object
    vector <vector<cv::Point>> contours;
    vector <cv::Vec4i> hierarchy;
    findContours(filtered_img, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE,
                 cv::Point(0, 0));

    // filter out tiny useless contours
    double min_contour_area = 10;
    for (auto it = contours.begin(); it != contours.end();) {
        if (contourArea(*it) < min_contour_area) {
            it = contours.erase(it);
        }
        else {
            ++it;
        }
    }

    // publish the markerPositions
    vector<cv::Point2f> centers(contours.size());
    vector<float> radius(contours.size());
    for (int idx = 0; idx < contours.size(); idx++) {
        minEnclosingCircle(contours[idx], centers[idx], radius[idx]);
        communication::Vector2 pos;
        pos.x = WIDTH - centers[idx].x;
        pos.y = centers[idx].y;
        markerPosition.marker_position.push_back(pos);
    }
    //imshow("camera", img);
    //waitKey(1);
    markerPosition.markerVisible=contours.size();
    marker_position_pub->publish(markerPosition);

    if(publish_video_flag && counter%3==0){
        // get centers and publish
        for (int idx = 0; idx < contours.size(); idx++) {
            drawContours(img_gray, contours, idx, cv::Scalar(0, 0, 0), 4, 8, hierarchy, 0,
                         cv::Point());
        }
        cv_bridge::CvImage cvImage;
        img_gray.copyTo(cvImage.image);
        sensor_msgs::Image msg;
        cvImage.toImageMsg(msg);
        msg.encoding = "mono8";
       	msg.header = markerPosition.header;
        video_pub->publish(msg);
   }
}
コード例 #19
0
ファイル: colorfilter.cpp プロジェクト: Blejzer/openalpr
// Gets the hue/sat/val for areas that we believe are license plate characters
// Then uses that to filter the whole image and provide a mask.
void ColorFilter::findCharColors()
{
  int MINIMUM_SATURATION = 45;
  
  if (this->debug)
    cout << "ColorFilter::findCharColors" << endl;
  
  //charMask.copyTo(this->colorMask);
  this->colorMask = Mat::zeros(charMask.size(), CV_8U);
  bitwise_not(this->colorMask, this->colorMask);
  
  Mat erodedCharMask(charMask.size(), CV_8U);
  Mat element = getStructuringElement( 1,
				  Size( 2 + 1, 2+1 ),
				  Point( 1, 1 ) );
  erode(charMask, erodedCharMask, element);

  vector<vector<Point> > contours;
  vector<Vec4i> hierarchy;
  findContours(erodedCharMask, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);

    
  
  vector<float> hMeans, sMeans, vMeans;
  vector<float> hStdDevs, sStdDevs, vStdDevs;
      
  for (int i = 0; i < contours.size(); i++)
  {
    if (hierarchy[i][3] != -1)
      continue;
      
      Mat singleCharMask = Mat::zeros(hsv.size(), CV_8U);
      
      drawContours(singleCharMask, contours,
	  i, // draw this contour
	  cv::Scalar(255,255,255), // in 
	  CV_FILLED, 
	  8, 
	  hierarchy
      ); 
      
      // get rid of the outline by drawing a 1 pixel width black line
      drawContours(singleCharMask, contours,
	  i, // draw this contour
	  cv::Scalar(0,0,0), // in 
	  1, 
	  8, 
	  hierarchy
      ); 
      
      
	  
      
      //drawAndWait(&singleCharMask);

      Scalar mean;
      Scalar stddev;
      meanStdDev(hsv, mean, stddev, singleCharMask);
      
      if (this->debug)
      {
	cout << "ColorFilter " << setw(3) << i << ". Mean:  h: " << setw(7) << mean[0] << " s: " << setw(7) <<mean[1] << " v: " << setw(7) << mean[2] 
			      << " | Std: h: " << setw(7) <<stddev[0] << " s: " << setw(7) <<stddev[1] << " v: " << stddev[2] << endl;
      }

      if (mean[0] == 0 && mean[1] == 0 && mean[2] == 0)
	continue;
			    
      hMeans.push_back(mean[0]);
      sMeans.push_back(mean[1]);
      vMeans.push_back(mean[2]);
      hStdDevs.push_back(stddev[0]);
      sStdDevs.push_back(stddev[1]);
      vStdDevs.push_back(stddev[2]);
      

  }
  
  if (hMeans.size() == 0)
    return;
  
  int bestHueIndex = this->getMajorityOpinion(hMeans, .65, 30);
  int bestSatIndex = this->getMajorityOpinion(sMeans, .65, 35);
  int bestValIndex = this->getMajorityOpinion(vMeans, .65, 30);
  
  
  if (sMeans[bestSatIndex] < MINIMUM_SATURATION)
    return;
  
  
  bool doHueFilter = false, doSatFilter = false, doValFilter = false;
  float hueMin, hueMax;
  float satMin, satMax;
  float valMin, valMax;
  
  if (this->debug)
    cout << "ColorFilter Winning indices:" << endl;
  if (bestHueIndex != -1)
  {
    doHueFilter = true;
    hueMin = hMeans[bestHueIndex] - (2 * hStdDevs[bestHueIndex]);
    hueMax = hMeans[bestHueIndex] + (2 * hStdDevs[bestHueIndex]);
    
    if (abs(hueMin - hueMax) < 20)
    {
      hueMin = hMeans[bestHueIndex] - 20;
      hueMax = hMeans[bestHueIndex] + 20;
    }
    
    if (hueMin < 0)
      hueMin = 0;
    if (hueMax > 180)
      hueMax = 180;
    
    if (this->debug)
      cout << "ColorFilter Hue: " << bestHueIndex << " : " << setw(7) << hMeans[bestHueIndex] << " -- " << hueMin << "-" << hueMax << endl;
  }
  if (bestSatIndex != -1)
  {
    doSatFilter = true;
    
    satMin = sMeans[bestSatIndex] - (2 * sStdDevs[bestSatIndex]);
    satMax = sMeans[bestSatIndex] + (2 * sStdDevs[bestSatIndex]);
    
    if (abs(satMin - satMax) < 20)
    {
      satMin = sMeans[bestSatIndex] - 20;
      satMax = sMeans[bestSatIndex] + 20;
    }
    
    if (satMin < 0)
      satMin = 0;
    if (satMax > 255)
      satMax = 255;
    
    if (this->debug)
      cout << "ColorFilter Sat: " << bestSatIndex << " : " << setw(7) << sMeans[bestSatIndex] << " -- " << satMin << "-" << satMax << endl;
  }
  if (bestValIndex != -1)
  {
    doValFilter = true;
    
    valMin = vMeans[bestValIndex] - (1.5 * vStdDevs[bestValIndex]);
    valMax = vMeans[bestValIndex] + (1.5 * vStdDevs[bestValIndex]);
    
    if (abs(valMin - valMax) < 20)
    {
      valMin = vMeans[bestValIndex] - 20;
      valMax = vMeans[bestValIndex] + 20;
    }
    
    if (valMin < 0)
      valMin = 0;
    if (valMax > 255)
      valMax = 255;
    
    if (this->debug)
      cout << "ColorFilter Val: " << bestValIndex << " : " << setw(7) << vMeans[bestValIndex] << " -- " << valMin << "-" << valMax  << endl;
  }
  


  Mat imgDebugHueOnly = Mat::zeros(hsv.size(), hsv.type());
  Mat imgDebug = Mat::zeros(hsv.size(), hsv.type());
  Mat imgDistanceFromCenter = Mat::zeros(hsv.size(), CV_8U);
  Mat debugMask = Mat::zeros(hsv.size(), CV_8U);
  bitwise_not(debugMask, debugMask);
  
  for (int row = 0; row < charMask.rows; row++)
  {
    for (int col = 0; col < charMask.cols; col++)
    {
      int h = (int) hsv.at<Vec3b>(row, col)[0];
      int s = (int) hsv.at<Vec3b>(row, col)[1];
      int v = (int) hsv.at<Vec3b>(row, col)[2];
      
      bool hPasses = true;
      bool sPasses = true;
      bool vPasses = true;

      int vDistance = abs(v - vMeans[bestValIndex]);
      
      imgDebugHueOnly.at<Vec3b>(row, col)[0] = h;
      imgDebugHueOnly.at<Vec3b>(row, col)[1] = 255;
      imgDebugHueOnly.at<Vec3b>(row, col)[2] = 255;
      
      imgDebug.at<Vec3b>(row, col)[0] = 255;
      imgDebug.at<Vec3b>(row, col)[1] = 255;
      imgDebug.at<Vec3b>(row, col)[2] = 255;
      
      if (doHueFilter && (h < hueMin || h > hueMax))
      {
	hPasses = false;
	imgDebug.at<Vec3b>(row, col)[0] = 0;
	debugMask.at<uchar>(row, col) = 0;
      }
      if (doSatFilter && (s < satMin || s > satMax))
      {
	sPasses = false;
	imgDebug.at<Vec3b>(row, col)[1] = 0;
      }
      if (doValFilter && (v < valMin || v > valMax))
      {
	vPasses = false;
	imgDebug.at<Vec3b>(row, col)[2] = 0;
      }
      
      //if (pixelPasses)
      //  colorMask.at<uchar>(row, col) = 255;
      //else
      //imgDebug.at<Vec3b>(row, col)[0] = hPasses & 255;
      //imgDebug.at<Vec3b>(row, col)[1] = sPasses & 255;
      //imgDebug.at<Vec3b>(row, col)[2] = vPasses & 255;
      
      if ((hPasses) ||  (hPasses && sPasses))//(hPasses && vPasses) || (sPasses && vPasses) ||
	this->colorMask.at<uchar>(row, col) = 255;
      else
	this->colorMask.at<uchar>(row, col) = 0;
      
      
      if ((hPasses && sPasses) || (hPasses && vPasses) || (sPasses && vPasses))
      {
	vDistance = pow(vDistance, 0.9);
      }
      else
      {
	vDistance = pow(vDistance, 1.1);
      }
      if (vDistance > 255)
	vDistance = 255;
      imgDistanceFromCenter.at<uchar>(row, col) = vDistance;
    }
  }
  

  
  vector<Mat> debugImagesSet;
  
  if (this->debug)
  {
    debugImagesSet.push_back(addLabel(charMask, "Charecter mask"));
    //debugImagesSet1.push_back(erodedCharMask);
    Mat maskCopy(colorMask.size(), colorMask.type());
    colorMask.copyTo(maskCopy);
    debugImagesSet.push_back(addLabel(maskCopy, "color Mask Before"));
  }
  
  
  Mat bigElement = getStructuringElement( 1,
				  Size( 3 + 1, 3+1 ),
				  Point( 1, 1 ) );
  
  Mat smallElement = getStructuringElement( 1,
				Size( 1 + 1, 1+1 ),
				Point( 1, 1 ) );
  
  morphologyEx(this->colorMask, this->colorMask, MORPH_CLOSE, bigElement);
  //dilate(this->colorMask, this->colorMask, bigElement);
  
  Mat combined(charMask.size(), charMask.type());
  bitwise_and(charMask, colorMask, combined);
  
  if (this->debug)
  {
    debugImagesSet.push_back(addLabel(colorMask, "Color Mask After"));

    debugImagesSet.push_back(addLabel(combined, "Combined"));
    
    //displayImage(config, "COLOR filter Mask", colorMask);
    debugImagesSet.push_back(addLabel(imgDebug, "Color filter Debug"));
    
    cvtColor(imgDebugHueOnly, imgDebugHueOnly, CV_HSV2BGR);
    debugImagesSet.push_back(addLabel(imgDebugHueOnly, "Color Filter Hue"));
    
    equalizeHist(imgDistanceFromCenter, imgDistanceFromCenter);
    debugImagesSet.push_back(addLabel(imgDistanceFromCenter, "COLOR filter Distance"));
    
    debugImagesSet.push_back(addLabel(debugMask, "COLOR Hues off"));
  
  
    Mat dashboard = drawImageDashboard(debugImagesSet, imgDebugHueOnly.type(), 3);
    displayImage(config, "Color Filter Images", dashboard);
  }

}
コード例 #20
0
std::pair<int, int> GetThresholdedImage(
	cv::Mat& img_BGR, cv::Mat& img_THR,
	int& HueLow, int& HueHigh, int& SatLow, int& SatHigh, int& ValueLow, int& ValueHigh,
	cv::Scalar Color, int *nr_pixels_ptr,
	int HueLow2, int HueHigh2, int SatLow2, int SatHigh2, int ValueLow2, int ValueHigh2)
{
	std::cout << "GetThresholdedImage starting" << std::endl;
	cv::RNG rng(12345);
	// Convert the image into an HSV image
	cv::Mat img_HSV; cv::cvtColor(img_BGR, img_HSV, CV_BGR2HSV);

	if (manual)
	{
		cv::inRange(img_HSV, cv::Scalar(lowerH, lowerS, lowerV),
		            cv::Scalar(upperH, upperS, upperV), img_THR);
	}
	else
	{
		cv::Mat img_THR1;
		cv::Mat img_THR2;
		cv::inRange(img_HSV, cv::Scalar(HueLow, SatLow, ValueLow),
		            cv::Scalar(HueHigh, SatHigh, ValueHigh), img_THR1);

		if (HueLow2 != -1   &&
		    HueHigh2 != -1  &&
		    SatLow2 != -1   &&
		    SatHigh2 != -1  &&
		    ValueLow2 != -1 &&
		    ValueHigh2 != -1)
		{
			// Optional arguments for second thresholds are set
			cv::inRange(img_HSV, cv::Scalar(HueLow2, SatLow2, ValueLow2),
			            cv::Scalar(HueHigh2, SatHigh2, ValueHigh2), img_THR2);
			cv::bitwise_or(img_THR1, img_THR2, img_THR);
		}
		else
		{
			img_THR = img_THR1;
		}
	}

	int kernel_size = 5;
	cv::Mat kernel = cv::Mat::ones( kernel_size, kernel_size, CV_32F ) / (float)(kernel_size * kernel_size);
	cv::dilate(img_THR, img_THR, kernel, cv::Point(-1,-1), 3, cv::BORDER_CONSTANT, cv::morphologyDefaultBorderValue());
	cv::erode (img_THR, img_THR, kernel, cv::Point(-1,-1), 4, cv::BORDER_CONSTANT, cv::morphologyDefaultBorderValue());
	// cv::floodFill(img_THR, cv::Point(0, 0), cv::Scalar(0), NULL, cv::Scalar(20), cv::Scalar(20), 4);

	// Detect edges using canny
	cv::Mat canny_output; int thresh = 100;
	cv::Canny(img_THR, canny_output, thresh, thresh * 2, 3);
	// Find contours
	std::vector<std::vector<cv::Point> > contours;
	std::vector<cv::Vec4i> hierarchy;
	cv::findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
	// Aproximate contours
	std::vector<std::vector<cv::Point> > approxContours;
	approxContours.resize(contours.size());
	// Draw contours
	for (unsigned int i = 0; i < contours.size(); i++)
	{
		cv::Scalar color(rand()&255, rand()&255, rand()&255);
		// cv::drawContours(img_BGR, contours, i, color, CV_FILLED, 8, hierarchy );
		cv::drawContours(img_THR, contours, i,   255, CV_FILLED, 8, hierarchy );
	}
	cv::medianBlur(img_THR, img_THR, 5);

	// Blur image
	cv::GaussianBlur(img_THR, img_THR, cv::Size(7,7), 15000, 15000, cv::BORDER_DEFAULT);

	// Detect edges using Threshold
	cv::threshold(img_THR, img_THR, 100, 250, cv::THRESH_BINARY);

	// Find contours
	findContours(img_THR, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));

	// Find the convex hull object for each contour
	std::vector<std::vector<cv::Point> > hull(contours.size());
	for(unsigned int i = 0; i < contours.size(); i++)
	{
		convexHull(cv::Mat(contours[i]), hull[i], false);
	}

	// Draw contours + hull results
	for(unsigned int i = 0; i< contours.size(); i++)
	{
		cv::Scalar color = cv::Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255));
		drawContours(img_BGR, contours, i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
		drawContours(img_THR,     hull, i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
	}

	// for (unsigned int i = 0; i < contours.size(); i++)
	// {
	// 	approxPolyDP(cv::Mat(contours[i]), approxContours[i], 4, 1);
	// 	drawContours(img_BGR, contours      , i, CV_RGB(rand()&255, rand()&255, rand()&255) );
	// 	// drawContours(img_BGR, approxContours, i, CV_RGB(rand()&255, rand()&255, rand()&255) );
	// }

	// cv::Mat draw_contour = cv::Mat::zeros(canny_output.size(), CV_8UC3);
	// for (unsigned int i = 0; i < contours.size(); i++)
	// {
	// 	cv::Scalar color = cv::Scalar(rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255));
	// 	cv::drawContours(img_BGR, contours, i, color, 2, 8, hierarchy, 0, cv::Point());
	// }

	// Detect blobs
	std::vector<cv::KeyPoint> keyPoints;
	blobDetector->detect(img_THR, keyPoints);
	// Draw keypoints
	cv::drawKeypoints(img_BGR, keyPoints, img_BGR,
	                  CV_RGB(rand()&255, rand()&255, rand()&255),
	                  cv::DrawMatchesFlags::DEFAULT);

	float X_obj = 0; float Y_obj = 0;

	std::cout << "*Keypoints " << keyPoints.size() << "  *Contours  " << contours.size()  << std::endl;
	for (unsigned int i = 0; i < keyPoints.size(); i++) // check the logic of this for loop
	{
		float X = keyPoints[i].pt.x;
		float Y = keyPoints[i].pt.y;
		float R = keyPoints[i].size;
		int intR = (int)R;
		if (intR > *nr_pixels_ptr) *nr_pixels_ptr = intR;
		circle(img_BGR, cv::Point(X, Y), R + 5, Color, 8, 0);
		X_obj += X; Y_obj += Y;
		std::cout << "  i: " << i << "  (X -|- Y) : (" << X << " -|- " << Y << ") Radius: " << R << std::endl;
	}
	X_obj /= keyPoints.size();
	Y_obj /= keyPoints.size();

	std::pair<int, int> return_value(-1, -1);

	if (keyPoints.size() != 0)
	{
		X_obj_old = X_obj; Y_obj_old = Y_obj;
		return_value.first = X_obj;
		return_value.second = Y_obj;
		circle(img_BGR, cv::Point(X_obj, Y_obj), 5, CV_RGB(255,255,255), 4, 8, 0);
	}
	else
	{
		std::cout << "******************** NO BLOBS FOUND ********************" << std::endl;
		circle(img_BGR, cv::Point(X_obj_old, Y_obj_old), 5, CV_RGB(255,255,255), 4, 8, 0);
	}

	// std::cout << "Reached end of GetThresholdedImage" << std::endl;

	// sleep(5);

	return return_value;
}
コード例 #21
0
int main(int argc, char** argv) {
	cv::VideoCapture stream(0); // open video stream from any video source
	int count = 0;   
int frame_width = stream.get(CV_CAP_PROP_FRAME_WIDTH);
int frame_height = stream.get(CV_CAP_PROP_FRAME_HEIGHT);

VideoWriter outputVideo("salient_video.avi", CV_FOURCC('M', 'J', 'P', 'G'), 20 , Size(frame_width, frame_height), true); 
while(stream.isOpened())
{
  cv::Mat inputImage;
   if ( ! stream.read(inputImage) ) // try to read a frame
            break;
   
   Mat finalImage;
	Mat * ptr = NULL;
	pthread_t intensityThread, colorThread;
   long totaltime = timestamp();
	
	//long intTime = timestamp();
	IntensityImg = Get_Intensity_Image(inputImage);
	pthread_create(&intensityThread, NULL, intensity_processing, (void *) ptr);
	//pthread_join(intensityThread, NULL);
	//long intFinal = timestamp() - intTime;
	//cout << "Intensity Map Time: " << intFinal << "\n";
	
	ptr = &inputImage;
	//long colTime = timestamp();
	pthread_create(&colorThread, NULL, color_processing, (void *) ptr);
	//pthread_join(colorThread, NULL);
	//long colFinal = timestamp() - colTime;
	//cout << "Color Map Time: " << colFinal << "\n";
	
	//long orTime = timestamp();
	Mat AggOr = getGaborImage();
	normalize(AggOr, AggOr, 0, 255, NORM_MINMAX, -1);
	//long orFinal = timestamp() - orTime;
	//cout << "Orientation Map Time: " << orFinal << "\n";

        pthread_join(intensityThread, NULL);
        pthread_join(colorThread, NULL);

	
	finalImage = (AggInt + AggColor + AggOr) / 3;
	normalize(finalImage, finalImage, 0, 255, NORM_MINMAX, -1);

	for (int bCtr = 0; bCtr < 4; bCtr++) {
		pyrUp(finalImage, finalImage);
	}
	
	long finaltime = timestamp() - totaltime;
	cout << "Total Time: " << finaltime << "\n";
	Mat contImg;
	inRange(finalImage, 130, 255, contImg);
	vector < vector<Point> > contours;
	vector < Vec4i > hierarchy;

	findContours(contImg, contours, hierarchy, CV_RETR_CCOMP,
			CV_CHAIN_APPROX_SIMPLE);
	for (int i = 0; i >= 0; i = hierarchy[i][0]) {
		Scalar color(rand() & 255, rand() & 255, rand() & 255);
		drawContours(inputImage, contours, i, color, 3, 8, hierarchy);
	}


   
   outputVideo.write(inputImage);
  
}
	return 0;
}
コード例 #22
0
int main(int argc, char** argv)
{
long totaltime, intTime, intTime_o, colTime, colTime_o , orTime, orTime_o;
if(argc != 2)
{
	cout << "No image"<<endl;
	return -1;
}
cout<<"Loading Image: ";
cout<< argv[1]<<endl;
Mat inputImage = imread(argv[1], CV_LOAD_IMAGE_COLOR);

if(!inputImage.data)
{
	cout <<"Invalid Image"<<endl;
}

Mat IntensityImg, finalImage;
for(int counter = 0; counter < 1; counter++)
{
totaltime = timestamp();

intTime = timestamp();
IntensityImg = Get_Intensity_Image(inputImage);
vector<Mat> Intensity_Maps = Pyr_CenSur(IntensityImg);
Mat AggInt = aggregateMaps(Intensity_Maps);
normalize(AggInt, AggInt, 0, 255, NORM_MINMAX, -1);
intTime_o = timestamp() - intTime;

colTime = timestamp();
vector<Mat> color_map;
color_map = Normalize_color(inputImage, IntensityImg);
vector<Mat> RGBYMap(6); 
for(int i = 0; i<6; i++)
addWeighted(color_map[i], 0.5, color_map[i+6], 0.5, 0, RGBYMap[i], -1);
Mat AggColor = aggregateMaps(RGBYMap);
normalize(AggColor, AggColor, 0, 255, NORM_MINMAX, -1);
colTime_o = timestamp() - colTime;

orTime = timestamp();
Mat AggOr;
AggOr = getGaborImage(IntensityImg);
normalize(AggOr, AggOr, 0, 255, NORM_MINMAX, -1);
orTime_o = timestamp() - orTime;

finalImage = (AggInt + AggColor + AggOr) /3;
normalize(finalImage, finalImage, 0, 255, NORM_MINMAX, -1);

for(int bCtr = 0; bCtr<4; bCtr++)
{
	pyrUp(finalImage, finalImage);
}

cout <<"Intensity Time: "<< (intTime_o) << "\n";
cout <<"Color Time: "<< (colTime_o) << "\n";
cout <<"Orientation Time: "<< (orTime_o) << "\n";
cout <<"Total Time: "<< (timestamp() - totaltime) << "\n";
}



Mat contImg;
inRange(finalImage, 160, 230, contImg);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;

findContours(contImg, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
for(int i = 0; i>=0; i =hierarchy[i][0])
{
	Scalar color(rand()&255, rand()&255, rand()&255);
	drawContours(inputImage, contours, i, color, 3, 8, hierarchy);
}

imwrite("Salient_Image.jpg" , inputImage);

waitKey(0);
return 0;
}
コード例 #23
0
void RecognitionDemos( Mat& full_image, Mat& template1, Mat& template2, Mat& template1locations, Mat& template2locations, VideoCapture& bicycle_video, Mat& bicycle_background, Mat& bicycle_model, VideoCapture& people_video, CascadeClassifier& cascade, Mat& numbers, Mat& good_orings, Mat& bad_orings, Mat& unknown_orings )
{
	Timestamper* timer = new Timestamper();

	// Principal Components Analysis
	PCASimpleExample();
    char ch = cvWaitKey();
	cvDestroyAllWindows();

	PCAFaceRecognition();
    ch = cvWaitKey();
	cvDestroyAllWindows();

	// Statistical Pattern Recognition
	Mat gray_numbers,binary_numbers;
	cvtColor(numbers, gray_numbers, CV_BGR2GRAY);
	threshold(gray_numbers,binary_numbers,128,255,THRESH_BINARY_INV);
    vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(binary_numbers,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
	Mat contours_image = Mat::zeros(binary_numbers.size(), CV_8UC3);
	contours_image = Scalar(255,255,255);
	// Do some processing on all contours (objects and holes!)
	vector<RotatedRect> min_bounding_rectangle(contours.size());
	vector<vector<Point>> hulls(contours.size());
	vector<vector<int>> hull_indices(contours.size());
	vector<vector<Vec4i>> convexity_defects(contours.size());
	vector<Moments> contour_moments(contours.size());
	for (int contour_number=0; (contour_number<(int)contours.size()); contour_number++)
	{
		if (contours[contour_number].size() > 10)
		{
			min_bounding_rectangle[contour_number] = minAreaRect(contours[contour_number]);
			convexHull(contours[contour_number], hulls[contour_number]);
			convexHull(contours[contour_number], hull_indices[contour_number]);
			convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
			contour_moments[contour_number] = moments( contours[contour_number] );
		}
	}
	for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
	{
		if (contours[contour_number].size() > 10)
		{
        Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
        drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
		char output[500];
		double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
		// Process any holes (removing the area from the are of the enclosing contour)
		for (int hole_number=hierarchy[contour_number][2]; (hole_number>=0); hole_number=hierarchy[hole_number][0])
		{
			area -= (contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
 			drawContours( contours_image, contours, hole_number, colour, CV_FILLED, 8, hierarchy );
			sprintf(output,"Area=%.0f", contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Point location( contours[hole_number][0].x +20, contours[hole_number][0].y +5 );
			putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
		// Draw the minimum bounding rectangle
		Point2f bounding_rect_points[4];
		min_bounding_rectangle[contour_number].points(bounding_rect_points);
		line( contours_image, bounding_rect_points[0], bounding_rect_points[1], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[1], bounding_rect_points[2], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[2], bounding_rect_points[3], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[3], bounding_rect_points[0], Scalar(0, 0, 127));
		float bounding_rectangle_area = min_bounding_rectangle[contour_number].size.area();
		// Draw the convex hull
        drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
		// Highlight any convexities
		int largest_convexity_depth=0;
		for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
		{
			if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
				largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
			if (convexity_defects[contour_number][convexity_index][3] > 256*2)
			{
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
			}
		}
		double hu_moments[7];
		HuMoments( contour_moments[contour_number], hu_moments );
		sprintf(output,"Perimeter=%d, Area=%.0f, BArea=%.0f, CArea=%.0f", contours[contour_number].size(),area,min_bounding_rectangle[contour_number].size.area(),contourArea(hulls[contour_number]));
		Point location( contours[contour_number][0].x, contours[contour_number][0].y-3 );
		putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		sprintf(output,"HuMoments = %.2f, %.2f, %.2f", hu_moments[0],hu_moments[1],hu_moments[2]);
		Point location2( contours[contour_number][0].x+100, contours[contour_number][0].y-3+15 );
		putText( contours_image, output, location2, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
	}
	imshow("Shape Statistics", contours_image );
	char c = cvWaitKey();
	cvDestroyAllWindows();

	// Support Vector Machine
	imshow("Good - original",good_orings);
	imshow("Defective - original",bad_orings);
	imshow("Unknown - original",unknown_orings);
	SupportVectorMachineDemo(good_orings,"Good",bad_orings,"Defective",unknown_orings);
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Template Matching
	Mat display_image, correlation_image;
	full_image.copyTo( display_image );
	double min_correlation, max_correlation;
	Mat matched_template_map;
	int result_columns =  full_image.cols - template1.cols + 1;
	int result_rows = full_image.rows - template1.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->reset();
	double before_tick_count = static_cast<double>(getTickCount());
	matchTemplate( full_image, template1, correlation_image, CV_TM_CCORR_NORMED );
	double after_tick_count = static_cast<double>(getTickCount());
	double duration_in_ms = 1000.0*(after_tick_count-before_tick_count)/getTickFrequency();
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (1)");
	Mat matched_template_display1;
	cvtColor(matched_template_map, matched_template_display1, CV_GRAY2BGR);
	Mat correlation_window1 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template1, Scalar(0,0,255) );
	double precision, recall, accuracy, specificity, f1;
	Mat template1locations_gray;
	cvtColor(template1locations, template1locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template1locations_gray, precision, recall, accuracy, specificity, f1 );
	char results[400];
	Scalar colour( 255, 255, 255);
	sprintf( results, "precision=%.2f", precision);
	Point location( 7, 213 );
	putText( display_image, "Results (1)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
  
	result_columns =  full_image.cols - template2.cols + 1;
	result_rows = full_image.rows - template2.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->ignoreTimeSinceLastRecorded();
	matchTemplate( full_image, template2, correlation_image, CV_TM_CCORR_NORMED );
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (2)");
	Mat matched_template_display2;
	cvtColor(matched_template_map, matched_template_display2, CV_GRAY2BGR);
	Mat correlation_window2 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template2, Scalar(0,0,255) );
	timer->putTimes(display_image);
	Mat template2locations_gray;
	cvtColor(template2locations, template2locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template2locations_gray, precision, recall, accuracy, specificity, f1 );
	sprintf( results, "precision=%.2f", precision);
	location.x = 123;
	location.y = 213;
	putText( display_image, "Results (2)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	Mat correlation_display1, correlation_display2;
	cvtColor(correlation_window1, correlation_display1, CV_GRAY2BGR);
	cvtColor(correlation_window2, correlation_display2, CV_GRAY2BGR);

	Mat output1 = JoinImagesVertically(template1,"Template (1)",correlation_display1,"Correlation (1)",4);
	Mat output2 = JoinImagesVertically(output1,"",matched_template_display1,"Local maxima (1)",4);
	Mat output3 = JoinImagesVertically(template2,"Template (2)",correlation_display2,"Correlation (2)",4);
	Mat output4 = JoinImagesVertically(output3,"",matched_template_display2,"Local maxima (2)",4);
	Mat output5 = JoinImagesHorizontally( full_image, "Original Image", output2, "", 4 );
	Mat output6 = JoinImagesHorizontally( output5, "", output4, "", 4 );
	Mat output7 = JoinImagesHorizontally( output6, "", display_image, "", 4 );
	imshow( "Template matching result", output7 );
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Chamfer Matching
    Mat model_gray,model_edges,model_edges2;
	cvtColor(bicycle_model, model_gray, CV_BGR2GRAY);
	threshold(model_gray,model_edges,127,255,THRESH_BINARY);
	Mat current_frame;
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,400);  // Just in case the video has already been used.
	bicycle_video >> current_frame;
	bicycle_background = current_frame.clone();
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,500); 
	timer->reset();
	int count = 0;
	while (!current_frame.empty() && (count < 8))
    {
		Mat result_image = current_frame.clone();
		count++;
		Mat difference_frame, difference_gray, current_edges;
		absdiff(current_frame,bicycle_background,difference_frame);
		cvtColor(difference_frame, difference_gray, CV_BGR2GRAY);
		Canny(difference_frame, current_edges, 100, 200, 3);

		vector<vector<Point> > results;
		vector<float> costs;
		threshold(model_gray,model_edges,127,255,THRESH_BINARY);
		Mat matching_image, chamfer_image, local_minima;
		timer->ignoreTimeSinceLastRecorded();
		threshold(current_edges,current_edges,127,255,THRESH_BINARY_INV);
		distanceTransform( current_edges, chamfer_image, CV_DIST_L2 , 3);
		timer->recordTime("Chamfer Image");
		ChamferMatching( chamfer_image, model_edges, matching_image );
		timer->recordTime("Matching");
		FindLocalMinima( matching_image, local_minima, 500.0 );
		timer->recordTime("Find Minima");
		DrawMatchingTemplateRectangles( result_image, local_minima, model_edges, Scalar( 255, 0, 0 ) );
		Mat chamfer_display_image = convert_32bit_image_for_display( chamfer_image );
		Mat matching_display_image = convert_32bit_image_for_display( matching_image );
		//timer->putTimes(result_image);
		Mat current_edges_display, local_minima_display, model_edges_display, colour_matching_display_image, colour_chamfer_display_image;
		cvtColor(current_edges, current_edges_display, CV_GRAY2BGR);
		cvtColor(local_minima, local_minima_display, CV_GRAY2BGR);
		cvtColor(model_edges, model_edges_display, CV_GRAY2BGR);
		cvtColor(matching_display_image, colour_matching_display_image, CV_GRAY2BGR);
		cvtColor(chamfer_display_image, colour_chamfer_display_image, CV_GRAY2BGR);

		Mat output1 = JoinImagesVertically(current_frame,"Video Input",current_edges_display,"Edges from difference", 4);
		Mat output2 = JoinImagesVertically(output1,"",model_edges_display,"Model", 4);
		Mat output3 = JoinImagesVertically(bicycle_background,"Static Background",colour_chamfer_display_image,"Chamfer image", 4);
		Mat output4 = JoinImagesVertically(output3,"",colour_matching_display_image,"Degree of fit", 4);
		Mat output5 = JoinImagesVertically(difference_frame,"Difference",result_image,"Result", 4);
		Mat output6 = JoinImagesVertically(output5,"",local_minima_display,"Local minima", 4);
		Mat output7 = JoinImagesHorizontally( output2, "", output4, "", 4 );
		Mat output8 = JoinImagesHorizontally( output7, "", output6, "", 4 );
		imshow("Chamfer matching", output8);
		c = waitKey(1000);  // This makes the image appear on screen
		bicycle_video >> current_frame;
	}
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Cascade of Haar classifiers (most often shown for face detection).
    VideoCapture camera;
	camera.open(1);
	camera.set(CV_CAP_PROP_FRAME_WIDTH, 320);
	camera.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
    if( camera.isOpened() )
	{
		timer->reset();
		Mat current_frame;
		do {
			camera >> current_frame;
			if( current_frame.empty() )
				break;
			vector<Rect> faces;
			timer->ignoreTimeSinceLastRecorded();
			Mat gray;
			cvtColor( current_frame, gray, CV_BGR2GRAY );
			equalizeHist( gray, gray );
			cascade.detectMultiScale( gray, faces, 1.1, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
			timer->recordTime("Haar Classifier");
			for( int count = 0; count < (int)faces.size(); count++ )
				rectangle(current_frame, faces[count], cv::Scalar(255,0,0), 2);
			//timer->putTimes(current_frame);
			imshow( "Cascade of Haar Classifiers", current_frame );
			c = waitKey(10);  // This makes the image appear on screen
        } while (c == -1);
	}
コード例 #24
0
ファイル: visionUtils.cpp プロジェクト: towardthesea/wysiwyd
Mat visionUtils::cannySegmentation(Mat img0, int minPixelSize, bool displayFaces)
{
    // Segments items in gray image (img0)
    // minPixelSize=
    // -1, returns largest region only
    // pixels, threshold for removing smaller regions, with less than minPixelSize pixels
    // 0, returns all detected segments


    // LB: Zero pad image to remove edge effects when getting regions....
    int padPixels=20;
    // Rect border added at start...
    Rect tempRect;
    tempRect.x=padPixels;
    tempRect.y=padPixels;
    tempRect.width=img0.cols;
    tempRect.height=img0.rows;

    Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
    img0.copyTo(img1(tempRect));


    if (useGPU)// converted to GPU -> NOT tested to speed up here!
    {
        GpuMat imgGPU;
        imgGPU.upload(img1);
#if CV_MAJOR_VERSION == 2
        gpu::Canny(imgGPU, imgGPU, 100, 200, 3); //100, 200, 3);
#elif CV_MAJOR_VERSION == 3
        cv::Ptr<cv::cuda::CannyEdgeDetector> canny = cv::cuda::createCannyEdgeDetector(100, 200, 3);
        canny->detect(imgGPU, imgGPU);
#endif
        imgGPU.download(img1);
    }
    else
    {
        Canny(img1, img1, 100, 200, 3); //100, 200, 3);
    }


    // find the contours
    vector< vector<Point> > contours;
    findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Mask for segmented regiond
    Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);

    vector<double> areas(contours.size());

    if (minPixelSize==-1)
    {   // Case of taking largest region
        for(int i = 0; i < (int)contours.size(); i++)
            areas[i] = contourArea(Mat(contours[i]));
        double max;
        Point maxPosition;
        cv::minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
        drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
    }
    else
    {   // Case for using minimum pixel size
        for (int i = 0; i < (int)contours.size(); i++)
        {
            if (contourArea(Mat(contours[i]))>minPixelSize)
                drawContours(mask, contours, i, Scalar(1), CV_FILLED);
        }
    }
    // normalize so imwrite(...)/imshow(...) shows the mask correctly!
    cv::normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);

    Mat returnMask;
    returnMask=mask(tempRect);

    // show the images
    if (displayFaces)   imshow("Canny: Img in", img0);
    if (displayFaces)   imshow("Canny: Mask", returnMask);
    if (displayFaces)   imshow("Canny: Output", img1);

    return returnMask;
}
コード例 #25
0
ファイル: Vision.cpp プロジェクト: mehdithreem/sharifcup
void RobotVision::update(Field & field,bool type) {
	vector<MovingObj> obstacles ;
	MovingObj agent ,rival ;
	Mat frame ;

	if(!camera->isOpened()){
		cout << "camera not ready !" ;
		return ;
	}

	(*camera) >> frame ;
	frame = frame(Rect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
	Mat paintingFrame;
	frame.copyTo(paintingFrame);

	//blur(frame, frame, Size(3,3));
	//resize(frame, frame, Size(0,0),0.5,0.5);
	this->currFrame = &frame ;

	if(type){
		for (ColorObject currColorObject : colorObjects) {
			vector<MovingObj> currObjects ;
			currObjects = currColorObject.findObjects(frame,paintingFrame);
			cout << "---" << params::getColorName(currColorObject.getColor()) << "--- " << currObjects.size() << endl;
			if(currColorObject.getColor() == params::black){
				if(currObjects.empty()){
					cout << "CAN'T RECOGNIZE ROBOT AT ALL !" << endl ;
					field.agent = MovingObj() ;
					field.agent.updated = false ;
				}else{
					field.agent = currObjects[0];
					if(field.agent.coords.size() == 3){
						field.agent.updated = true ;
					}else{
						field.agent.updated = false ;
					}
				}
			}else{
				for(MovingObj currObject : currObjects){
					obstacles.push_back(currObject);
				}
			}
			
		}
		field.obstacles = obstacles ;
		// imshow("frame", frame);
		// cout << "wait!" << endl;
		// cin.ignore();
	}else{
		vector<MovingObj> currObjects ;
		currObjects = colorObjects[0].findObjects(frame,*(this->currFrame));
		if(currObjects.empty()){
			cout << "CAN'T RECOGNIZE ROBOT AT ALL !" << endl ;
			field.agent = MovingObj() ;
			field.agent.updated = false ;
		}else{
			field.agent = currObjects[0];
			if(field.agent.coords.size() == 3){
				comPath.push_back(Point(field.agent.COM.x, field.agent.COM.y));
				field.agent.updated = true ;
			}else{
				field.agent.updated = false ;
			}
		}        
	}
	field.rival = rival ;

	drawPoints(paintingFrame, points);
	drawContours(paintingFrame, vector<vector<Point> >(1,comPath),-1,Scalar(255,255,255));
	// cvtColor(frame, frame, CV_HSV2BGR);
	// imshow("frame2", frame);
	imshow("Painting",paintingFrame);
}
コード例 #26
0
ファイル: visionUtils.cpp プロジェクト: towardthesea/wysiwyd
Mat visionUtils::segmentFace(Mat srcImage, Mat maskImage, bool displayFaces, Mat *skinSegMaskInv)
{

    // Check mask and original image are the same size
    Size srcS = srcImage.size();
    int heightS = srcS.height;
    int widthS = srcS.width;

    Size maskS = maskImage.size();
    int heightM = maskS.height;
    int widthM = maskS.width;

    if (heightS!=heightM || widthS!=widthM)
    {
        cout << "hS:" << heightS << " wS:" << widthS << " hM:" << heightM << " wM" << widthM << endl;
        cout << "Source and mask images are not the same size... aborting" << endl;
        Mat ttt;
        return (ttt);
    }

    /// Convert image to gray and blur it
    cv::cvtColor( maskImage, src_gray, CV_BGR2GRAY );
    cv::blur( src_gray, src_gray, Size(3,3) );
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;

    /// Detect edges using Threshold
    /// Find contours
    findContours( src_gray, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );

    // ########## Remove contour indents (defects), by finding the convex
    /// Find the convex hull object for each contour
    vector<vector<Point> >hull( contours.size() );

    for( int i = 0; i < (int)contours.size(); i++ )
    {
        convexHull( Mat(contours[i]), hull[i], false );
    }

    /// Draw contours + hull results
    Mat drawingHull = Mat::zeros( src_gray.size(), CV_8UC3 );

    //Check minimum contour size and find largest....
    int largest_area=-1;
    int largest_contour_index=0;
    for( int i = 0; i< (int)contours.size(); i++ )
    {
        if( (int)contours[i].size() > minContourSize )
        {
            double a=contourArea( contours[i],false);  //  Find the area of contour
            if(a>largest_area)
            {
                largest_area=a;
                largest_contour_index=i;
            }
        }
    }

    if (displayFaces)
    {
        RNG rng(12345); // for colour generation

        for( int i = 0; i< (int)contours.size(); i++ )
        {
            Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
            drawContours( drawingHull, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
            drawContours( drawingHull, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
        }
        imshow( "Contour Convex Hull", drawingHull );
    }

    //// ############### Selected Hull contour to use -> ignoring ellipse etc
    // Check if hull found successfully... if not ABORT
    if (hull.empty() )
    {
        cout << "Hull region not found > returning...." << endl;
        Mat ttt;
        return (ttt);
    }

    // Check area of hull and abort if neded
    vector<Point> approx;
    approxPolyDP(hull[largest_contour_index], approx, 5, true);
    double area1 = contourArea(approx);
    if  (area1<4000)
    {
        cout << "Hull area too small > returning...." << endl;
        Mat ttt;
        return (ttt);
    }

    // Cut down rect around convex contour hull
    Rect boundRect;
    boundRect=boundingRect(Mat(hull[largest_contour_index]));
    // Check bounding box fits inside image.... resize if needed
    boundRect=checkRoiInImage(srcImage, boundRect);

    // Check bounding box has greater dimensions than 5x5pix
    if (boundRect.height<=5 || boundRect.width<=5)
    {
        cout << "Region selected too small... exiting" << endl;
        Mat ttt;
        return (ttt);
    }
    else
    {
        /// Repeat boxing but for masked skin data (Hull)
        // Make binary mask using hull largest contour
        Mat srcSegSkin = Mat::zeros( srcImage.size(), CV_8UC3 );
        Mat skinSegMask = Mat::zeros( srcImage.size(), CV_8UC1 );
        drawContours( skinSegMask, hull, largest_contour_index, Scalar(255), -1, 8, vector<Vec4i>(), 0, Point() );
        srcImage.copyTo(srcSegSkin,skinSegMask);  // Copy using mask from skinSegMask
        srcSegSkin=srcSegSkin(boundRect);

        // Make face blocking mask (face pix = 0)
        Mat skinSegMaskInvTemp = Mat::zeros( srcImage.size(), CV_8UC1 );
        cv::bitwise_not(skinSegMaskInvTemp,*skinSegMaskInv,skinSegMask);

        if (displayFaces)
        {   // Take boxed region of face from original image data
            // Copy inital image and section with bounding box
            Mat srcSegmented = srcImage.clone();
            srcSegmented=srcSegmented(boundRect);
            imshow("Rect region orig",srcSegmented);
            Mat maskSegmented = maskImage.clone();
            maskSegmented=maskSegmented(boundRect);
            imshow("Rect region, with SkinSeg",maskSegmented);
            imshow("Rect region, with hull region SkinSeg",srcSegSkin);
        }

        return(srcSegSkin);
    }
}
コード例 #27
0
ファイル: experiments.cpp プロジェクト: mike-live/Astrocytes
void my_threshold (Mat & img, Mat & img_thr, bool otsu = false)
{
	Mat img_thrd, img_thru;
	if (otsu) threshold (img, img_thrd, 0, 255, THRESH_BINARY | CV_THRESH_OTSU);
	else threshold (img, img_thrd, 100, 255, THRESH_BINARY);
	//threshold (img, img_thru, 130, 255, THRESH_BINARY_INV);

	vector<vector<Point> > contours;
	vector<double> area;
	vector<Vec4i> hierarchy;

	findContours (img_thrd, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

	// iterate through all the top-level contours,
	// draw each connected component with its own random color
	/*int idx = 0;
	for (; idx >= 0; idx = hierarchy[idx][0]) {
	Scalar color (255, 255, 255);
	drawContours (img_max_thr, contours, idx, color, 1, 8, hierarchy);
	}*/

	// Calculate area of countours
	double mxs = 0;
	for (int i = 0; i < (int)contours.size (); i++) {
		double ss = contourArea (contours[i]);
		//for (int j = 1; j < (int)contours[i].size (); j++) ss += contours[i][j].x * contours[i][j - 1].y - contours[i][j].y * contours[i][j - 1].x;
		area.push_back (ss);
		mxs = max (ss, mxs);
	}
	img_thr.create (img.rows, img.cols, CV_8UC1);
	img_thr.setTo (0);

	// Draw countours with area more than 2.5% of max area
	const double percent_area = 2.5;
	for (int i = 0; i < (int)contours.size (); i++) if (abs (area[i]) > percent_area / 100.0 * mxs) drawContours (img_thr, contours, i, Scalar (255), 1, 8);
}
コード例 #28
0
static cv::Mat signalDetect_inROI(const cv::Mat& roi,
                                  const cv::Mat&     src_img,
                                  const double       estimatedRadius,
                                  const cv::Point roi_topLeft
                                  )
{
  /* reduce noise */
  cv::Mat noiseReduced(roi.rows, roi.cols, CV_8UC3);
  GaussianBlur(roi, noiseReduced, cv::Size(3, 3), 0, 0);

  /* extract color information */
  cv::Mat red_mask(roi.rows, roi.cols, CV_8UC1);
  colorExtraction(noiseReduced       ,
                  &red_mask          ,
                  thSet.Red.Hue.lower, thSet.Red.Hue.upper,
                  thSet.Red.Sat.lower, thSet.Red.Sat.upper,
                  thSet.Red.Val.lower, thSet.Red.Val.upper);

  cv::Mat yellow_mask(roi.rows, roi.cols, CV_8UC1);
  colorExtraction(noiseReduced          ,
                  &yellow_mask          ,
                  thSet.Yellow.Hue.lower, thSet.Yellow.Hue.upper,
                  thSet.Yellow.Sat.lower, thSet.Yellow.Sat.upper,
                  thSet.Yellow.Val.lower, thSet.Yellow.Val.upper);

  cv::Mat green_mask(roi.rows, roi.cols, CV_8UC1);
  colorExtraction(noiseReduced         ,
                  &green_mask          ,
                  thSet.Green.Hue.lower, thSet.Green.Hue.upper,
                  thSet.Green.Sat.lower, thSet.Green.Sat.upper,
                  thSet.Green.Val.lower, thSet.Green.Val.upper);

  /* combine all color mask and create binarized image */
  cv::Mat binarized = cv::Mat::zeros(roi.rows, roi.cols, CV_8UC1);
  bitwise_or(red_mask, yellow_mask, binarized);
  bitwise_or(binarized, green_mask, binarized);
  threshold(binarized, binarized, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);

  /* filter by its shape and index each bright region */
  std::vector< std::vector<cv::Point> > bright_contours;
  std::vector<cv::Vec4i> bright_hierarchy;
  findContours(binarized,
               bright_contours,
               bright_hierarchy,
               CV_RETR_CCOMP,
               CV_CHAIN_APPROX_NONE);


  cv::Mat bright_mask = cv::Mat::zeros(roi.rows, roi.cols, CV_8UC1);

  int contours_idx = 0;
  std::vector<regionCandidate> candidates;
  for (unsigned int i=0; i<bright_contours.size(); i++)
    {
      cv::Rect bound = boundingRect(bright_contours.at(contours_idx));
      cv::Scalar rangeColor = BLACK;
      struct regionCandidate cnd;
      double area = contourArea(bright_contours.at(contours_idx));
      double perimeter = arcLength(bright_contours.at(contours_idx), true);
      double circleLevel = (IsNearlyZero(perimeter)) ? 0.0f : (4.0f * CV_PI * area / pow(perimeter, 2));

      if (std::max(bound.width, bound.height) < 2*std::min(bound.width, bound.height) && /* dimension ratio */
          CIRCLE_LEVEL_THRESHOLD <= circleLevel                                       &&
          CIRCLE_AREA_THRESHOLD  <= area)
        {
          // std::cerr << "circleLevel: " << circleLevel << std::endl;
          rangeColor    = WHITE;
          cnd.center.x  = bound.x + bound.width/2;
          cnd.center.y  = bound.y + bound.height/2;
          cnd.idx       = contours_idx;
          cnd.circleLevel = (IsNearlyZero(perimeter)) ? 0.0f : (4.0 * CV_PI * area / pow(perimeter, 2));
          cnd.isBlacked = false;
          candidates.push_back(cnd);
        }

      drawContours(bright_mask,
                   bright_contours,
                   contours_idx,
                   rangeColor,
                   CV_FILLED,
                   8,
                   bright_hierarchy,
                   0);

      /* only contours on toplevel are considered */
      contours_idx = bright_hierarchy[contours_idx][0];
      if (contours_idx < 0)
        break;
    }

  // imshow("bright_mask", bright_mask);
  // waitKey(10);

  unsigned int candidates_num = candidates.size();

  // std::cerr << "before checkExtrinctionLight. candidates: " << candidates_num << std::endl;

  /* decrease candidates by checking existence of turned off light in their neighborhood */
  if (candidates_num > 1)    /* if there are multipule candidates */
    {
      for (unsigned int i=0; i<candidates.size(); i++)
        {
          /* check wheter this candidate seems to be green lamp */
          cv::Point check_roi_topLeft  = cv::Point(candidates.at(i).center.x - 2*estimatedRadius + roi_topLeft.x,
                                                   candidates.at(i).center.y - 2*estimatedRadius + roi_topLeft.y);
          cv::Point check_roi_botRight = cv::Point(candidates.at(i).center.x + 6*estimatedRadius + roi_topLeft.x,
                                                   candidates.at(i).center.y + 2*estimatedRadius + roi_topLeft.y);
          bool likeGreen = checkExtinctionLight(src_img, check_roi_topLeft, check_roi_botRight, candidates.at(i).center);

          /* check wheter this candidate seems to be yellow lamp */
          check_roi_topLeft  = cv::Point(candidates.at(i).center.x - 4*estimatedRadius + roi_topLeft.x,
                                     candidates.at(i).center.y - 2*estimatedRadius + roi_topLeft.y);
          check_roi_botRight = cv::Point(candidates.at(i).center.x + 4*estimatedRadius + roi_topLeft.x,
                                     candidates.at(i).center.y + 2*estimatedRadius + roi_topLeft.y);
          bool likeYellow = checkExtinctionLight(src_img, check_roi_topLeft, check_roi_botRight, candidates.at(i).center);

          /* check wheter this candidate seems to be red lamp */
          check_roi_topLeft  = cv::Point(candidates.at(i).center.x - 6*estimatedRadius + roi_topLeft.x,
                                     candidates.at(i).center.y - 2*estimatedRadius + roi_topLeft.y);
          check_roi_botRight = cv::Point(candidates.at(i).center.x + 2*estimatedRadius + roi_topLeft.x,
                                     candidates.at(i).center.y + 2*estimatedRadius + roi_topLeft.y);
          bool likeRed = checkExtinctionLight(src_img, check_roi_topLeft, check_roi_botRight, candidates.at(i).center);


          if (!likeGreen && !likeYellow && !likeRed) /* this region may not be traffic light */
            {
              candidates_num--;
              drawContours(bright_mask,
                           bright_contours,
                           candidates.at(i).idx,
                           BLACK,
                           CV_FILLED,
                           8,
                           bright_hierarchy,
                           0);
              candidates.at(i).isBlacked = true;
            }
        }
    }

  // std::cerr << "after checkExtrinctionLight. candidates: " << candidates_num << std::endl;

  /* choose one candidate by comparing degree of circularity */
  if (candidates_num > 1)       /* if there are still multiple candidates */
    {
      double min_diff = DBL_MAX;
      unsigned int min_idx = 0;

      /* search the region that has nearest degree of circularity to 1 */
      for (unsigned int i=0; i<candidates.size(); i++)
        {
          if(candidates.at(i).isBlacked)
            continue;

          double diff = fabs(1 - candidates.at(i).circleLevel);
          if (min_diff > diff)
            {
              min_diff = diff;
              min_idx = i;
            }
        }

      /* fill region of non-candidate */
      for (unsigned int i=0; i<candidates.size(); i++)
        {

          if(candidates.at(i).isBlacked)
            continue;

          cv::Scalar regionColor = BLACK;
          candidates.at(i).isBlacked = true;
          if (i == min_idx)
            {
              regionColor = WHITE;
              candidates.at(i).isBlacked = false;
            }

          drawContours(bright_mask,
                       bright_contours,
                       candidates.at(i).idx,
                       regionColor,
                       CV_FILLED,
                       8,
                       bright_hierarchy,
                       0);
        }
    }

  return bright_mask;

} /* static void signalDetect_inROI() */
コード例 #29
0
void getContours(const char* filename)
{
	  cv::Mat img = cv::imread(filename, 0);
  
	 /*
	  //Apply blur to smooth edges and use adapative thresholding
	  cv::Size size(3,3);
	  cv::GaussianBlur(img,img,size,0);
	  adaptiveThreshold(img, img,255,CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY,75,10);
	  cv::bitwise_not(img, img);
	 */

	  cv::Mat tmp;
	  cv::GaussianBlur(img, tmp, cv::Size(25,25), 25);
	  cv::addWeighted(img, 1.5, tmp, -0.5, 0, img);
	  adaptiveThreshold(img, img,255,CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY,55,60);
	  cv::bitwise_not(img, img);




	  cv::Mat img2 = img.clone();

	  std::vector<cv::Point> points;
	  cv::Mat_<uchar>::iterator it = img.begin<uchar>();
	  cv::Mat_<uchar>::iterator end = img.end<uchar>();
	  for (; it != end; ++it)
		if (*it)
		  points.push_back(it.pos());
 
	  cv::RotatedRect box = cv::minAreaRect(cv::Mat(points));
 
	  double angle = box.angle;
	  if (angle < -45.)
		angle += 90.;
         
	  cv::Point2f vertices[4];
	  box.points(vertices);
	  for(int i = 0; i < 4; ++i)
		cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(255, 0, 0), 1, CV_AA);
 
	  cv::Mat rot_mat = cv::getRotationMatrix2D(box.center, angle, 1);
 
	  cv::Mat rotated;
	  cv::warpAffine(img2, rotated, rot_mat, img.size(), cv::INTER_CUBIC); 
 
	  cv::Size box_size = box.size;
	  if (box.angle < -45.)
		std::swap(box_size.width, box_size.height);
	  cv::Mat cropped;
 
	  cv::getRectSubPix(rotated, box_size, box.center, cropped);
	  cv::imshow("Cropped", cropped);
	 // imwrite("example5.jpg",cropped);
       
	   Mat cropped2=cropped.clone();
		cvtColor(cropped2,cropped2,CV_GRAY2RGB);
 
	  Mat cropped3 = cropped.clone();
	  cvtColor(cropped3,cropped3,CV_GRAY2RGB);
 
	  vector<vector<Point> > contours;
	  vector<Vec4i> hierarchy;
 
	  /// Find contours
	  cv:: findContours( cropped, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_KCOS, Point(0, 0) );
 
 
 
	  /// Approximate contours to polygons + get bounding rects and circles
	  vector<vector<Point> > contours_poly( contours.size() );
	  vector<Rect> boundRect( contours.size() );
	  vector<Point2f>center( contours.size() );
	  vector<float>radius( contours.size() );
 
 
	  //Get poly contours
	   for( int i = 0; i < contours.size(); i++ )
	   {
				approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
	   }
 
 
	  //Get only important contours, merge contours that are within another
	  vector<vector<Point> > validContours;
	  for (int i=0;i<contours_poly.size();i++){
					Rect r = boundingRect(Mat(contours_poly[i]));
					if(r.area()<100)continue;
					bool inside = false;
					for(int j=0;j<contours_poly.size();j++){
							if(j==i)continue;
                       
							Rect r2 = boundingRect(Mat(contours_poly[j]));
							if(r2.area()<100||r2.area()<r.area())continue;
							if(r.x>r2.x&&r.x+r.width<r2.x+r2.width&&
									r.y>r2.y&&r.y+r.height<r2.y+r2.height){
 
									inside = true;
							}
					}
					if(inside)continue;
					validContours.push_back(contours_poly[i]);
			}
 
 
			//Get bounding rects
			for(int i=0;i<validContours.size();i++){
					boundRect[i] = boundingRect( Mat(validContours[i]) );
			}
 
 
			//Display
	  Scalar color = Scalar(0,255,0);
	  for( int i = 0; i< validContours.size(); i++ )
		 {
			if(boundRect[i].area()<100)continue;
		  drawContours( cropped2, validContours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
		   rectangle( cropped2, boundRect[i].tl(), boundRect[i].br(),color, 2, 8, 0 );
		 }
 
	  //imwrite("example6.jpg",cropped2);
	  imshow("Contours",cropped2);
 
	  extractContours(cropped3,validContours);
 
	cv::waitKey(0);
 
}
コード例 #30
0
ファイル: characteranalysis.cpp プロジェクト: Witek-/openalpr
void CharacterAnalysis::analyze()
{


    thresholds = produceThresholds(img_gray, config);


    /*
      // Morph Close the gray image to make it easier to detect blobs
      int morph_elem  = 1;
      int morph_size = 1;
      Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );

      for (int i = 0; i < thresholds.size(); i++)
      {
        //morphologyEx( mask, mask, MORPH_CLOSE, element );
        morphologyEx( thresholds[i], thresholds[i], MORPH_OPEN, element );
        //dilate( thresholds[i], thresholds[i],  element );

      }
    */


    timespec startTime;
    getTime(&startTime);


    for (int i = 0; i < thresholds.size(); i++)
    {
        vector<vector<Point> > contours;
        vector<Vec4i> hierarchy;

        Mat tempThreshold(thresholds[i].size(), CV_8U);
        thresholds[i].copyTo(tempThreshold);
        findContours(tempThreshold,
                     contours, // a vector of contours
                     hierarchy,
                     CV_RETR_TREE, // retrieve all contours
                     CV_CHAIN_APPROX_SIMPLE ); // all pixels of each contours

        allContours.push_back(contours);
        allHierarchy.push_back(hierarchy);
    }




    if (config->debugTiming)
    {
        timespec endTime;
        getTime(&endTime);
        cout << "  -- Character Analysis Find Contours Time: " << diffclock(startTime, endTime) << "ms." << endl;
    }
    //Mat img_equalized = equalizeBrightness(img_gray);


    getTime(&startTime);

    for (int i = 0; i < thresholds.size(); i++)
    {
        vector<bool> goodIndices = this->filter(thresholds[i], allContours[i], allHierarchy[i]);
        charSegments.push_back(goodIndices);

        if (config->debugCharAnalysis)
            cout << "Threshold " << i << " had " << getGoodIndicesCount(goodIndices) << " good indices." << endl;
    }

    if (config->debugTiming)
    {
        timespec endTime;
        getTime(&endTime);
        cout << "  -- Character Analysis Filter Time: " << diffclock(startTime, endTime) << "ms." << endl;
    }



    this->plateMask = findOuterBoxMask();

    if (hasPlateMask)
    {
        // Filter out bad contours now that we have an outer box mask...
        for (int i = 0; i < thresholds.size(); i++)
        {
            charSegments[i] = filterByOuterMask(allContours[i], allHierarchy[i], charSegments[i]);
        }
    }


    int bestFitScore = -1;
    int bestFitIndex = -1;
    for (int i = 0; i < thresholds.size(); i++)
    {

        //vector<bool> goodIndices = this->filter(thresholds[i], allContours[i], allHierarchy[i]);
        //charSegments.push_back(goodIndices);

        int segmentCount = getGoodIndicesCount(charSegments[i]);


        if (segmentCount > bestFitScore)
        {
            bestFitScore = segmentCount;
            bestFitIndex = i;
            bestCharSegments = charSegments[i];
            bestThreshold = thresholds[i];
            bestContours = allContours[i];
            bestHierarchy = allHierarchy[i];
            bestCharSegmentsCount = segmentCount;
        }
    }

    if (this->config->debugCharAnalysis)
        cout << "Best fit score: " << bestFitScore << " Index: " << bestFitIndex << endl;


    if (bestFitScore <= 1)
        return;


    //getColorMask(img, allContours, allHierarchy, charSegments);

    if (this->config->debugCharAnalysis)
    {

        Mat img_contours(bestThreshold.size(), CV_8U);
        bestThreshold.copyTo(img_contours);
        cvtColor(img_contours, img_contours, CV_GRAY2RGB);

        vector<vector<Point> > allowedContours;
        for (int i = 0; i < bestContours.size(); i++)
        {
            if (bestCharSegments[i])
                allowedContours.push_back(bestContours[i]);
        }

        drawContours(img_contours, bestContours,
                     -1, // draw all contours
                     cv::Scalar(255,0,0), // in blue
                     1); // with a thickness of 1

        drawContours(img_contours, allowedContours,
                     -1, // draw all contours
                     cv::Scalar(0,255,0), // in green
                     1); // with a thickness of 1


        displayImage(config, "Matching Contours", img_contours);
    }


    //charsegments = this->getPossibleCharRegions(img_threshold, allContours, allHierarchy, STARTING_MIN_HEIGHT + (bestFitIndex * HEIGHT_STEP), STARTING_MAX_HEIGHT + (bestFitIndex * HEIGHT_STEP));



    this->linePolygon =  getBestVotedLines(img_gray, bestContours, bestCharSegments);

    if (this->linePolygon.size() > 0)
    {
        this->topLine = LineSegment(this->linePolygon[0].x, this->linePolygon[0].y, this->linePolygon[1].x, this->linePolygon[1].y);
        this->bottomLine = LineSegment(this->linePolygon[3].x, this->linePolygon[3].y, this->linePolygon[2].x, this->linePolygon[2].y);
        //this->charArea = getCharSegmentsBetweenLines(bestThreshold, bestContours, this->linePolygon);
        filterBetweenLines(bestThreshold, bestContours, bestHierarchy, linePolygon, bestCharSegments);

        this->charArea = getCharArea();

        if (this->charArea.size() > 0)
        {
            this->charBoxTop = LineSegment(this->charArea[0].x, this->charArea[0].y, this->charArea[1].x, this->charArea[1].y);
            this->charBoxBottom = LineSegment(this->charArea[3].x, this->charArea[3].y, this->charArea[2].x, this->charArea[2].y);
            this->charBoxLeft = LineSegment(this->charArea[3].x, this->charArea[3].y, this->charArea[0].x, this->charArea[0].y);
            this->charBoxRight = LineSegment(this->charArea[2].x, this->charArea[2].y, this->charArea[1].x, this->charArea[1].y);


        }
    }

    this->thresholdsInverted = isPlateInverted();



}