Mat SegmentationUtility::computeBoundary(const Mat& inputImage) {
    const Mat kernel = getStructuringElement(cv::MORPH_CROSS, cv::Size(3, 3));
    
    Mat dest;
    morphologyEx(inputImage, dest, cv::MORPH_GRADIENT, kernel);
    dilate(dest, dest, kernel, cv::Point(-1,-1), 2);

    return dest;
}
예제 #2
0
Mat EdgeHandle::MatIllumination(Mat img){

	Mat src, src_gray;
	Mat grad;
	int scale = 1;
	int delta = 0;
	int ddepth = CV_16S;

	int c;

	/// Load an image
	src = img;

	GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );

	/// Convert it to gray
	cvtColor( src, src_gray, CV_RGB2GRAY );

	/// Generate grad_x and grad_y
	Mat grad_x, grad_y;
	Mat abs_grad_x, abs_grad_y;

	//sobel
	Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
	convertScaleAbs( grad_x, abs_grad_x );
	Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
	convertScaleAbs( grad_y, abs_grad_y );
	addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad );

	// binary image.
	//threshold(grad,grad,0,255,THRESH_BINARY);

	// Apply the specified morphology operation
	int morph_size = 1;
	Mat element = getStructuringElement( 1, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
	morphologyEx( grad, grad, 5, element );
	morphologyEx( grad, grad, 5, element );
	morphologyEx( grad, grad, 5, element );

	return grad;

}
예제 #3
0
Mat Normalized::Skeleton(Mat img)
{
	Mat skel(img.size(), CV_8UC1, cv::Scalar(0));
	int morph_element = MORPH_CLOSE;
	int operation = MORPH_RECT;
	int morph_size = 1;
	Mat element = getStructuringElement(operation, cv::Size(2 * morph_size + 1, 2 * morph_size + 1), cv::Point(morph_size, morph_size));

	// Apply the specified morphology operation
	morphologyEx(img, skel, morph_element, element);

	return skel;
}
예제 #4
0
void MainWindow::on_AperturaBT_clicked()
{
    Mat BStructElement = getStructuringElement(CV_SHAPE_RECT,Size(2,2));

    morphologyEx(this->dstImageThresholdAdaptative, this->dstImageClose, CV_MOP_CLOSE, BStructElement,Point(-1,-1) ,2 );


    QImage filtradoImage = Mat2QImage(dstImageClose);
    ui->FiltradoMorfologicoLB->setPixmap(QPixmap::fromImage(filtradoImage));

    ui->SegmentacionBT->setEnabled(true);

}
예제 #5
0
Mat equalizeBrightness(Mat img)
{
  // Divide the image by its morphologically closed counterpart
  Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(19,19));
  Mat closed;
  morphologyEx(img, closed, MORPH_CLOSE, kernel);

  img.convertTo(img, CV_32FC1); // divide requires floating-point
  divide(img, closed, img, 1, CV_32FC1);
  normalize(img, img, 0, 255, NORM_MINMAX);
  img.convertTo(img, CV_8U); // convert back to unsigned int

  return img;
}
예제 #6
0
Mat ShapeDetect::DetectCarPlateOrLight( Mat src, int choosePlateOrLight ){
	Mat hsv; 
	Mat dst; 
	Mat mask=Mat::zeros(src.rows,src.cols, CV_8U); //為了濾掉其他顏色
	Mat r,r2,w; //各顏色的閥值

	cvtColor(src,hsv,CV_BGR2HSV);//轉成hsv平面

	if ( choosePlateOrLight == 0 )
	{
		//use to car lights
		inRange(hsv,Scalar(0,43,46) , Scalar(10,255,255), r);             
		inRange(hsv,Scalar(156,43,46) , Scalar(180,255,255), r2);      
		mask = r + r2;//red

		src.copyTo(dst,mask ); //將原圖片經由遮罩過濾後,得到結果dst
		// Apply the specified morphology operation
		int morph_size = 1;
		Mat element = getStructuringElement( 1, Size( 2*morph_size + 1, 2*morph_size + 1 ), Point( morph_size, morph_size ) );
		morphologyEx( dst, dst, 1, element );

	}else if ( choosePlateOrLight == 1 )
	{
		//use to car plate
		inRange(hsv,Scalar(0,0,200) ,Scalar(180,30,255), w);       

		mask= w;//white
		src.copyTo(dst,mask ); //將原圖片經由遮罩過濾後,得到結果dst
		// Apply the specified morphology operation
		int morph_size = 1;
		Mat element = getStructuringElement( 1, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
		morphologyEx( dst, dst, 1, element );
		morphologyEx( dst, dst, 1, element );
		morphologyEx( dst, dst, 1, element );
		morphologyEx( dst, dst, 1, element );
		morphologyEx( dst, dst, 1, element );
		morphologyEx( dst, dst, 1, element );


		/*imshow("",dst);
		moveWindow("",0,0);
		cvWaitKey(0);*/

	}else{
		src.copyTo(dst,mask ); //將原圖片經由遮罩過濾後,得到結果dst
	}

	return dst;
}
예제 #7
0
vector< vector< Point> > BlobDetection::detectContours(Mat frame, Ptr< BackgroundSubtractor>& pMOG2Pointer , Mat& fgMaskMOG2)
{
	vector< vector< Point> > result;

	cvNamedWindow("Original"	, CV_WINDOW_NORMAL);
	cvNamedWindow("Blurred"		, CV_WINDOW_NORMAL);
	//cvNamedWindow("fgMaskMOG2X"	, CV_WINDOW_NORMAL);
	cvNamedWindow("Background Subtracted", CV_WINDOW_NORMAL);
	cvNamedWindow("Shadow Removed"	, CV_WINDOW_NORMAL);

	Mat fgMaskMOG2X = fgMaskMOG2.clone(); 

	Mat ContourImg; 
	Ptr< BackgroundSubtractor> pMOG2 = pMOG2Pointer; 
	Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3, 3));
	imshow("Original", frame);

	//PreProcess
	blur(frame, frame, Size(4, 4));
	imshow("Blurred", frame);

	//Background subtraction
	pMOG2->operator()(frame, fgMaskMOG2X, -1);
	//imshow("fgMaskMOG2X", frame);

	morphologyEx(fgMaskMOG2X, frame, CV_MOP_CLOSE, element);
	imshow("Background Subtracted", frame);

	threshold(frame, frame, 180, 255, CV_THRESH_BINARY);
	imshow("Shadow Removed", frame);

	cvWaitKey(1);
	ContourImg = frame.clone();
	findContours(ContourImg,
		result, // a vector of contours
		CV_RETR_EXTERNAL, // retrieve the external contours
		CV_CHAIN_APPROX_NONE); // all pixels of each contours


	fgMaskMOG2 = fgMaskMOG2X.clone();
	return result;
}
vector<vector<Point>> CTipDetection::contourDetect(Mat img)
{
	vector<vector<Point>>* contours = new vector<vector<Point>>();
	Mat img_gray, img_sobel, img_threshold, element;
	cvtColor(img, img_gray, CV_BGR2GRAY); // convert to grayscale


											// Sobel (gray image, result sobel image, depth maps, gradient X, gradient Y, Scale, Delta )
	Sobel(img_gray, img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT);					// CV_8U is 8bits per pixels , CV_32F is float the pixel value 0-1.0

																						// calculate the threshold
																						// img_sobel: gradient image
																						// img_threshold: resulting picture after use threshold
																						// threshold value: 0
																						// Max value: 255
																						// last: 0 : Binary (>threshold -> max value, <threshold = 0), 1: Binary inverted, 2: Threshold truncated, 3: Threshold to zero, 4:Threshold to Zero inverted
	threshold(img_sobel, img_threshold, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);

	// build structure element like morphological structure element in Image Processing (erose, dilate)
	// MORPH_RECT: rectangle
	// MORPH_ELLIPSE: ellipse
	// MORPH_CROSS: cross-shaped
	// Size(17,3): width 17, height 3
	// Optional if it is MORPH_CROSS, Point(-1,-1): position of the anchor
	element = getStructuringElement(MORPH_RECT, Size(8, 2));

	// img_threshold: input matrix for processing
	// img_threshold: output matrix
	// CV_MOP_CLOSE: type of morphology (CLOSE or OPEN)
	// element: structure for doing erose or dilation
	morphologyEx(img_threshold, img_threshold, CV_MOP_CLOSE, element);

	// find the contour
	// 0 = CV_RETR_EXTERNAL: retrieve only extreme outer contours 
	// 1 = CV_CHAIN_APPROX_NONE: store absolutely all the contour points into the array. 2 = CV_CHAIN_APPROX_SIMPLE: compress and leaves only their end points (rectangle just with 4 points)
	findContours(img_threshold, *contours, 0, 1);

	return (*contours);
}
예제 #9
0
Result VideoHandle::getDirectionPoints()
{
    Mat frame;
    Mat prev;
    vector<Point> List;

	for (int i=30;i--;) getFrame();
    prev = getFrame();

    while(true)
    {
        int prev_clock = clock();

        frame = getFrame();
        if(frame.empty()) break;

        Mat temp;
        subtract(prev, frame, temp);
        const Size size = Size(160, 120);
        resize(temp, temp, size, 0, 0, CV_INTER_LINEAR);
        cvtColor(temp, temp, CV_BGR2GRAY);
        threshold(temp, temp, 20, 255, CV_THRESH_BINARY);

        morphologyEx(temp.clone(), temp, MORPH_OPEN, Mat::ones(3, 3, CV_8U));

        Moments m = ::moments(temp);
        Point p = Point(m.m10/m.m00, m.m01/m.m00);

		// Mat_<Point2f> points(1,1), dst(1,1);
		// points(0) = Point2f(p.x,p.y);
		// undistortPoints(points, dst, distortmtx, distortdist);
		// p.x = - dst(0).y * size.width;
		// p.y = - dst(0).x * size.height;
        cout << "Point : " << p.x << " " << p.y << endl;

        List.push_back(p);
        if (p.x < 5 || p.y < 5 || p.x > size.width - 6 || p.y > size.height - 6) {
            cout << "455555555555555" << endl;
            List.clear();
        }

        const int TIMES = 1;
        Point sum = Point(0, 0);
        for(int i=1;i<=TIMES;i++)
        {
            if(List.size() < TIMES + 1) continue;
            Point a = List[List.size() - i];
            Point b = List[List.size() - i - 1];
            Point sub = Point(a.x-b.x, a.y-b.y);
            sum.x += sub.x;
            sum.y += sub.y;
        }

        sum.x /= TIMES;
        sum.y /= TIMES;

        cout << "vector : " << sum.x << " " << sum.y << endl;
        if(abs(sum.x) >= 2 || abs(sum.y) >= 2) {
            Result ret = generateOutput(p, Point(p.x+sum.x, p.y+sum.y));
            ret.angle *= -1;
            return ret;
        }

        int now_clock = clock();
        double speed = double(now_clock - prev_clock) / CLOCKS_PER_SEC;
        cout << "speed : " << speed << " " << (1.0/speed) << endl;
    }
}
예제 #10
0
파일: myRadar.cpp 프로젝트: yingjh/IARC_13
// display function should be good enough
void OpenRadar::DrawRadarData()
{
	int usualColor[15] = {16777215,255,128,65280,32768,
		      16711680,16711935,8421376,65535,32896 }; /*<usual color*/
	CvPoint pt1, pt2;

	cvZero(RadarImage);
	cvCircle(RadarImage, cvPoint(DisplayDx,DisplayDy),3, CV_RGB(0,255,255), -1, 8,0);
	int x,y;
	unsigned char * pPixel = 0;
	int colorIndex = 0, colorRGB;
	int R = 255, G = 0, B = 0;
    
	for (int i = 0; i < RadarDataCnt;i++)
	{  
		if (RadarRho[i] < 0)
		{
			
			//change color
			colorRGB = usualColor[colorIndex];
			R = colorRGB/65536;
			G = (colorRGB%65536)/256;
			B = colorRGB%256;
			colorIndex = (colorIndex + 1)%10;
			
		}
		else 
		{
			x = (int)(RadarRho[i]*cos(RadarTheta[i])/DisplayRatio) + DisplayDx;
			y = (int)(-RadarRho[i]*sin(RadarTheta[i])/DisplayRatio)+ DisplayDy;
	
			if (x >= 0 && x < RadarImageWdith && y >= 0 && y < RadarImageHeight)
			{
				pPixel = (unsigned char*)RadarImage->imageData + y*RadarImage->widthStep + 3*x;
				pPixel[0] = B;
				pPixel[1] = G;
				pPixel[2] = R;
			}
		}     
	}
	
	pt1.x = DisplayDx; pt1.y = DisplayDy;
	pt2.x = DisplayDx+line_length*v_scale*sin(v_angle + 0.5*M_PI); 
	pt2.y = DisplayDy+line_length*v_scale*cos(v_angle + 0.5*M_PI);
	cvLine(RadarImage, pt1, pt2, CV_RGB(255,255,255),2,8,0);

	pt2.x = DisplayDx+line_length*cos(-(-120 + skip_bin_idx * polarH_resolution)* M_PI/180 ); 	
	pt2.y = DisplayDy+line_length*sin(-(-120 + skip_bin_idx * polarH_resolution)* M_PI/180 ); 
	cvLine(RadarImage, pt1, pt2, CV_RGB(0,255,0),1,8,0);

	pt2.x = DisplayDx+line_length*cos(-(-120 + (polarH_length-skip_bin_idx) * polarH_resolution)* M_PI/180 ); 
	pt2.y = DisplayDy+line_length*sin(-(-120 + (polarH_length-skip_bin_idx) * polarH_resolution)* M_PI/180 ); 
	//pt2.x = DisplayDx+line_length*cos(0.25*M_PI); 
	//pt2.y = DisplayDy+line_length*sin(0.25*M_PI);
	//cout<< line_length <<endl; 
	//cout<< pt1.x <<" , " << pt1.y <<endl;
	//cout<< pt2.x <<" , " << pt2.y <<endl;
	cvLine(RadarImage, pt1, pt2, CV_RGB(0,255,0),1,8,0);

	float angle;
	int line_length2;
	for (int i=0; i<polarH_length;i++)
	{
		angle = (-30+i*polarH_resolution)*M_PI/180;
		line_length2 = H[i]/10;
		pt2.x = DisplayDx+line_length2*sin(angle); 
		pt2.y = DisplayDy+line_length2*cos(angle);
		cvCircle(RadarImage, pt2, 2, CV_RGB(255,255,255),1,8,0);
	}

	////////////////////////////////////////////////////////////////////////////////////
	// mine
	////////////////////////////////////////////////////////////////////////////////////
	Mat binImg = Mat::zeros(RadarImageHeight,RadarImageWdith,CV_8UC1);
	vector< Point> centerRaw;
	centerRaw.clear();
	for (int i = 0; i < RadarDataCnt;i++)
	{  
		if (RadarRho[i] > 200)
		{
			x = (int)(RadarRho[i]*cos(RadarTheta[i])/DisplayRatio) + DisplayDx;
			y = (int)(-RadarRho[i]*sin(RadarTheta[i])/DisplayRatio)+ DisplayDy;
			//centerRaw.push_back(Point(x,y));
			//cout<<"P:" <<centerRaw[i].x<<","<<centerRaw[i].y<<endl;
			if (x >= 0 && x < RadarImageWdith && y >= 0 && y < RadarImageHeight)
			{
				 circle( binImg,Point(x,y),1,Scalar(255),-1);
			}
		}     
	}
	imshow("binImg",binImg);
	Mat element = getStructuringElement(MORPH_RECT, Size(1,2));
	Mat element2 = getStructuringElement(MORPH_RECT, Size(10,10));
	erode(binImg, binImg, element);
	morphologyEx(binImg, binImg, MORPH_OPEN, element);
	dilate(binImg, binImg, element2);
	morphologyEx(binImg, binImg, MORPH_CLOSE, element2);
	imshow("dilate",binImg);

	vector< vector<Point> > contours;	
	vector< vector<Point> > filterContours;	
	vector< Vec4i > hierarchy;	
	vector< Point2f> center;
	vector< float > radius;
	vector<Point2f> realPoint;
	

	findContours(binImg, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
	center.resize(contours.size());
	radius.resize(contours.size());
	//realPoint.resize(contours.size());
	for(int i = 0; i< contours.size(); i++)
	{
		minEnclosingCircle(Mat(contours[i]),center[i],radius[i]);//对轮廓进行多变形逼近
		circle(binImg,center[i],650/DisplayRatio,Scalar(255),1); 
		//cout<<"No."<<i<<" | P: "<< center[i].x<<","<<center[i].y<<endl;
		float realX = (center[i].x - DisplayDx) * DisplayRatio;
		float realY = (center[i].y - DisplayDy) * DisplayRatio;

		realPoint.push_back(Point2f(realX,realY));
		//cout<<"No."<<i<<" | P: "<< realPoint[i].x<<","<<realPoint[i].y<<endl;
	}
	imshow("findContours",binImg);
	// colar map
	Mat mapImg = Mat::zeros(RadarImageHeight,RadarImageWdith,CV_8UC3);
	circle(mapImg, Point(DisplayDx,DisplayDy),3, CV_RGB(255,255,255),-1);
	line(mapImg, Point(DisplayDx,DisplayDy), Point(DisplayDx+40,DisplayDy), Scalar(0,0,255),1);
	line(mapImg, Point(DisplayDx,DisplayDy), Point(DisplayDx,DisplayDy+40), Scalar(0,255,0),1);
	for(int i = 0; i< center.size(); i++)
	{
		circle(mapImg,center[i],650/DisplayRatio,Scalar(255,255,0),1,CV_AA); 
		circle(mapImg,center[i],100/DisplayRatio,Scalar(0,255,255),-1); 
	}
	imshow("Map",mapImg);
	////////////////////////////////////
	float freq = 50.0f;
	if(timeInit)
	{
		time_old = ros::Time::now();
		timeInit = false;
	}
	else 
	{
		float dt = (ros::Time::now() -time_old).toSec();
		cout<<"Freqence: "<<1/dt<<" Hz"<<endl;
		if( (ros::Time::now() -time_old).toSec() > (1/ freq))
		{
			time_old = ros::Time::now();
			ukftest::laserPoint msg;
			vector <float> xvec;
			vector <float> yvec;
			for(int i = 0 ; i < realPoint.size(); i++)
			{
				// cm
				xvec.push_back(realPoint[i].x/10.0f);
				yvec.push_back(realPoint[i].y/10.0f);
			}

			// msg
			msg.header.stamp = ros::Time::now();
			msg.header.frame_id = "hokuyo_laser";
			msg.x =xvec;
			msg.y =yvec;
			if(realPoint.size() >0) msg.isBlocking = 1;
			else msg.isBlocking = 0;
			pub_xy.publish(msg);

			// msg
			ukftest::ukfData ukfmsg;
			ukfmsg.avoid.x = v_scale*sin(v_angle + 0.5*M_PI); 
			ukfmsg.avoid.y = v_scale*cos(v_angle + 0.5*M_PI); 
			ukfmsg.dt = dt;
			ukfmsg.isBlocking = 1;
			//cout<< "xyz: "<<  ukfmsg.avoid.x <<"|"<<ukfmsg.avoid.y <<"|"<<ukfmsg.avoid.z <<endl;
			//cout<< "isBlocking:" <<  ukfmsg.isBlocking<< endl;
			pub.publish(ukfmsg);
		}
	}	
	

}
예제 #11
0
  vector<PlateRegion> DetectorMorph::detect(Mat frame, std::vector<cv::Rect> regionsOfInterest) {

    Mat frame_gray,frame_gray_cp;

    if (frame.channels() > 2)
    {
      cvtColor( frame, frame_gray, CV_BGR2GRAY );
    }
    else
    {
      frame.copyTo(frame_gray);
    }

    frame_gray.copyTo(frame_gray_cp);
    blur(frame_gray, frame_gray, Size(5, 5));

    vector<PlateRegion> detectedRegions;
    for (int i = 0; i < regionsOfInterest.size(); i++) {
      Mat img_open, img_result;
      Mat element = getStructuringElement(MORPH_RECT, Size(30, 4));
      morphologyEx(frame_gray, img_open, CV_MOP_OPEN, element, cv::Point(-1, -1));

      img_result = frame_gray - img_open;

      if (config->debugDetector && config->debugShowImages) {
        imshow("Opening", img_result);
      }

      //threshold image using otsu thresholding
      Mat img_threshold, img_open2;
      threshold(img_result, img_threshold, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);

      if (config->debugDetector && config->debugShowImages) {
        imshow("Threshold Detector", img_threshold);
      }

      Mat diamond(5, 5, CV_8U, cv::Scalar(1));

	diamond.at<uchar>(0, 0) = 0;
	diamond.at<uchar>(0, 1) = 0;
	diamond.at<uchar>(1, 0) = 0;
	diamond.at<uchar>(4, 4) = 0;
	diamond.at<uchar>(3, 4) = 0;
	diamond.at<uchar>(4, 3) = 0;
	diamond.at<uchar>(4, 0) = 0;
	diamond.at<uchar>(4, 1) = 0;
	diamond.at<uchar>(3, 0) = 0;
	diamond.at<uchar>(0, 4) = 0;
	diamond.at<uchar>(0, 3) = 0;
	diamond.at<uchar>(1, 4) = 0;
			
      morphologyEx(img_threshold, img_open2, CV_MOP_OPEN, diamond, cv::Point(-1, -1));
      Mat rectElement = getStructuringElement(cv::MORPH_RECT, Size(13, 4));
      morphologyEx(img_open2, img_threshold, CV_MOP_CLOSE, rectElement, cv::Point(-1, -1));

      if (config->debugDetector && config->debugShowImages) {
        imshow("Close", img_threshold);
        waitKey(0);
      }

      //Find contours of possibles plates
      vector< vector< Point> > contours;
      findContours(img_threshold,
              contours, // a vector of contours
              CV_RETR_EXTERNAL, // retrieve the external contours
              CV_CHAIN_APPROX_NONE); // all pixels of each contours

      //Start to iterate to each contour founded
      vector<vector<Point> >::iterator itc = contours.begin();
      vector<RotatedRect> rects;

      //Remove patch that are no inside limits of aspect ratio and area.    
      while (itc != contours.end()) {
        //Create bounding rect of object
        RotatedRect mr = minAreaRect(Mat(*itc));
        
        if (mr.angle < -45.) {
					mr.angle += 90.0;
					swap(mr.size.width, mr.size.height);
				}  
        
        if (!CheckSizes(mr))
          itc = contours.erase(itc);
        else {
          ++itc;
					rects.push_back(mr);
        }
      }

     //Now prunning based on checking all candidate plates for a min/max number of blobsc
Mat img_crop, img_crop_b, img_crop_th, img_crop_th_inv;
vector< vector< Point> > plateBlobs;
vector< vector< Point> > plateBlobsInv;
double thresholds[] = { 10, 40, 80, 120, 160, 200, 240 };
const int num_thresholds = 7;
int numValidChars = 0;
Mat rotated;
for (int i = 0; i < rects.size(); i++) {
	numValidChars = 0;
	RotatedRect PlateRect = rects[i];
	Size rect_size = PlateRect.size;

	// get the rotation matrix
	Mat M = getRotationMatrix2D(PlateRect.center, PlateRect.angle, 1.0);
	// perform the affine transformation
	warpAffine(frame_gray_cp, rotated, M, frame_gray_cp.size(), INTER_CUBIC);
	//Crop area around candidate plate
	getRectSubPix(rotated, rect_size, PlateRect.center, img_crop);

	 if (config->debugDetector && config->debugShowImages) {
		imshow("Tilt Correction", img_crop);
		waitKey(0);
	}

	for (int z = 0; z < num_thresholds; z++) {

		cv::threshold(img_crop, img_crop_th, thresholds[z], 255, cv::THRESH_BINARY);
		cv::threshold(img_crop, img_crop_th_inv, thresholds[z], 255, cv::THRESH_BINARY_INV);

		findContours(img_crop_th,
			plateBlobs, // a vector of contours
			CV_RETR_LIST, // retrieve the contour list
			CV_CHAIN_APPROX_NONE); // all pixels of each contours

		findContours(img_crop_th_inv,
			plateBlobsInv, // a vector of contours
			CV_RETR_LIST, // retrieve the contour list
			CV_CHAIN_APPROX_NONE); // all pixels of each contours

		int numBlobs = plateBlobs.size();
		int numBlobsInv = plateBlobsInv.size();
	
		float idealAspect = config->avgCharWidthMM / config->avgCharHeightMM;
		for (int j = 0; j < numBlobs; j++) {
			cv::Rect r0 = cv::boundingRect(cv::Mat(plateBlobs[j]));
			
			if (ValidateCharAspect(r0, idealAspect))
				numValidChars++;
		}

		for (int j = 0; j < numBlobsInv; j++) {
			cv::Rect r0 = cv::boundingRect(cv::Mat(plateBlobsInv[j]));
			if (ValidateCharAspect(r0, idealAspect))
				numValidChars++;
		}

	}
	//If too much or too lcittle might not be a true plate
	//if (numBlobs < 3 || numBlobs > 50) continue;
	if (numValidChars < 4  || numValidChars > 50) continue;

        PlateRegion PlateReg;

        // Ensure that the rectangle isn't < 0 or > maxWidth/Height
        Rect bounding_rect = PlateRect.boundingRect();
        PlateReg.rect = expandRect(bounding_rect, 0, 0, frame.cols, frame.rows);
        
        
        detectedRegions.push_back(PlateReg);

      }

    }
    
    return detectedRegions;
  }
예제 #12
0
파일: Source.cpp 프로젝트: Nbeleski/tcc_rmk
int main()
{

	// Opening video and testing integrity ---------------------------/

	VideoCapture capture(FILENAME);

	if (!capture.isOpened())
	{
		cerr << "Nao conseguiu abrir o video.\n";
		return -1;
	}

	//----------------------------------------------------------------/

	for (int i = 0; i < samples.size(); i++)
	{
		samples[i] = Mat(Size(WIDTH, HEIGHT), CV_8UC3);
	}

	while (cont_n < SAMPLES)
	{

		bool bSuccess = capture.read(src);

		if (!bSuccess)
		{
			cout << "ERROR: could not read frame from file..." << endl;
			break;
		}

		img_8u3c = resizeFixed(src);

		if (cont_t % INTERVAL == 0)
		{
			for (int j = 0; j < HEIGHT; j++)
			{
				for (int i = 0; i < WIDTH; i++)
				{
					samples[cont_n].at<Vec3b>(j, i) = img_8u3c.at<Vec3b>(j, i);
				}
			}

			cont_n++;
		}
		cont_t++;
	}


	for (int j = 0; j < HEIGHT; j++)
	{
		for (int i = 0; i < WIDTH; i++)
		{
			for (int c = 0; c < SAMPLES; c++)
			{
				pixel_list[c] = samples[c].at<Vec3b>(j, i);
			}
			sort(begin(pixel_list), end(pixel_list), Compare_Vec3f);
			bg_8u3c.at<Vec3b>(j, i) = pixel_list[SAMPLES / 2 + 1];
		}
	}

	// At this point we have an aproximation of the background
	// This is used to start the samples in the Vibe algorithm
	// imshow("Bg gerado", bg_8u3c);

	cvtColor(bg_8u3c, bg_lab_8u3c, CV_BGR2Lab);

	// Background gradients will be used in the texture patch test
	bg_8u3c.convertTo(bg_32fc3, CV_32FC3, 1 / 255.0);
	calcGradients(bg_32fc3, bg_dx_32f, bg_dy_32f, bg_mag_32f, bg_ori_32f);

	initBackground(bg_32fc3);
	Mat vibe_mask(Size(WIDTH, HEIGHT), CV_8U);
	Mat vibe_filtered(Size(WIDTH, HEIGHT), CV_8U);

	// Start of the real-time analysis (after initial bg generation) --------------------------/
	while (true)
	{
		bool bSuccess = capture.read(src);

		if (!bSuccess)
		{
			cout << "ERROR: could not read frame from file..." << endl;
			return -1;
		}

		img_8u3c = resizeFixed(src);
		// In this point we have the Mat image holding
		// a smaller version of the actual frame.

		cvtColor(img_8u3c, img_lab_8u3c, CV_BGR2Lab);
		img_8u3c.convertTo(img_32fc3, CV_32FC3, 1 / 255.0);

		// Bloco para gerar mascara - usado no lugar do Vibe para debug -----------------------/
		
		if (!VIBE)
		{

			Mat img_8u_gray, bg_8u_gray;
			cvtColor(img_8u3c, img_8u_gray, CV_BGR2GRAY);
			cvtColor(bg_8u3c, bg_8u_gray, CV_BGR2GRAY);
			absdiff(img_8u_gray, bg_8u_gray, diff);
			threshold(diff, mask_8u, 50, 10, CV_8U);
			morphologyEx(mask_8u, mask_8u, MORPH_CLOSE, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
			//GaussianBlur(mask_8u, mask_8u, Size(3, 3), 0);

			filtered_mask_8u = media_binary(mask_8u, 3, 10);
		}
		else
		{
			// Vibe -------------------------------------------------------------------------------/
			vibe(img_32fc3, vibe_mask);
			morphologyEx(vibe_mask, vibe_mask, MORPH_CLOSE, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
			filtered_mask_8u = media_binary(vibe_mask, 3, 10);
		}
		// ------------------------------------------------------------------------------------/

		//findConnectedComponents(filtered_mask_8u, components);
		//components.clear();

		findConnectedComponents(filtered_mask_8u, components);

		// For all connected components:
		for (int i = 0; i < components.size(); i++)
		{
			Rect roi = components[i];

			calcGradients(Mat(img_32fc3, roi), Mat(img_dx_32f, roi),
				Mat(img_dy_32f, roi), Mat(img_mag_32f, roi), Mat(img_ori_32f, roi));

			/*imshow("ori_bg", bg_ori_32f);
			imshow("ori_fg", img_ori_32f);
			Mat diff_ori_ffs;
			absdiff(img_ori_32f, bg_ori_32f, diff_ori_ffs);
			threshold(diff_ori_ffs, diff_ori_ffs, 0.75, 255, CV_8U);
			imshow("ori_diff", Mat(diff_ori_ffs, roi));
			waitKey(1000000);*/

			// Detect shadows
			detectShadows(Mat(img_8u3c, roi), Mat(img_lab_8u3c, roi), Mat(bg_lab_8u3c, roi), Mat(filtered_mask_8u, roi),
				Mat(img_dx_32f, roi), Mat(img_dy_32f, roi), Mat(img_mag_32f, roi), Mat(img_ori_32f, roi), 
				Mat(bg_dx_32f, roi), Mat(bg_dy_32f, roi), Mat(bg_mag_32f, roi), Mat(img_ori_32f, roi), roi);

		}
		components.clear();


		//imshow("mask", filtered_mask_8u);
		//imshow("vibe", vibe_filtered);

		//imshow("img", img_8u3c);

		imshow("final", img_8u3c);
		waitKey(10000000);

		switch (waitKey(1))	{
		case ESC_KEY:
			return 0;
		}
	}

	capture.release();
	return 0;

}
예제 #13
0
  // Tries to find a rectangular area surrounding most of the characters.  Not required
  // but helpful when determining the plate edges
  void PlateMask::findOuterBoxMask( vector<TextContours > contours )
  {
    double min_parent_area = pipeline_data->config->templateHeightPx * pipeline_data->config->templateWidthPx * 0.10;	// Needs to be at least 10% of the plate area to be considered.

    int winningIndex = -1;
    int winningParentId = -1;
    int bestCharCount = 0;
    double lowestArea = 99999999999999;

    if (pipeline_data->config->debugCharAnalysis)
      cout << "CharacterAnalysis::findOuterBoxMask" << endl;

    for (unsigned int imgIndex = 0; imgIndex < contours.size(); imgIndex++)
    {
      //vector<bool> charContours = filter(thresholds[imgIndex], allContours[imgIndex], allHierarchy[imgIndex]);

      int charsRecognized = 0;
      int parentId = -1;
      bool hasParent = false;
      for (unsigned int i = 0; i < contours[imgIndex].goodIndices.size(); i++)
      {
        if (contours[imgIndex].goodIndices[i]) charsRecognized++;
        if (contours[imgIndex].goodIndices[i] && contours[imgIndex].hierarchy[i][3] != -1)
        {
          parentId = contours[imgIndex].hierarchy[i][3];
          hasParent = true;
        }
      }

      if (charsRecognized == 0)
        continue;

      if (hasParent)
      {
        double boxArea = contourArea(contours[imgIndex].contours[parentId]);
        if (boxArea < min_parent_area)
          continue;

        if ((charsRecognized > bestCharCount) ||
            (charsRecognized == bestCharCount && boxArea < lowestArea))
          //(boxArea < lowestArea)
        {
          bestCharCount = charsRecognized;
          winningIndex = imgIndex;
          winningParentId = parentId;
          lowestArea = boxArea;
        }
      }
    }

    if (pipeline_data->config->debugCharAnalysis)
      cout << "Winning image index (findOuterBoxMask) is: " << winningIndex << endl;

    if (winningIndex != -1 && bestCharCount >= 3)
    {

      Mat mask = Mat::zeros(pipeline_data->thresholds[winningIndex].size(), CV_8U);

      // get rid of the outline by drawing a 1 pixel width black line
      drawContours(mask, contours[winningIndex].contours,
                   winningParentId, // draw this contour
                   cv::Scalar(255,255,255), // in
                   FILLED,
                   8,
                   contours[winningIndex].hierarchy,
                   0
                  );

      // Morph Open the mask to get rid of any little connectors to non-plate portions
      int morph_elem  = 2;
      int morph_size = 3;
      Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );

      //morphologyEx( mask, mask, MORPH_CLOSE, element );
      morphologyEx( mask, mask, MORPH_OPEN, element );

      //morph_size = 1;
      //element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
      //dilate(mask, mask, element);

      // Drawing the edge black effectively erodes the image.  This may clip off some extra junk from the edges.
      // We'll want to do the contour again and find the larges one so that we remove the clipped portion.

      vector<vector<Point> > contoursSecondRound;

      findContours(mask, contoursSecondRound, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
      int biggestContourIndex = -1;
      double largestArea = 0;
      for (unsigned int c = 0; c < contoursSecondRound.size(); c++)
      {
        double area = contourArea(contoursSecondRound[c]);
        if (area > largestArea)
        {
          biggestContourIndex = c;
          largestArea = area;
        }
      }

      if (biggestContourIndex != -1)
      {
        mask = Mat::zeros(pipeline_data->thresholds[winningIndex].size(), CV_8U);

        vector<Point> smoothedMaskPoints;
        approxPolyDP(contoursSecondRound[biggestContourIndex], smoothedMaskPoints, 2, true);

        vector<vector<Point> > tempvec;
        tempvec.push_back(smoothedMaskPoints);
        //fillPoly(mask, smoothedMaskPoints.data(), smoothedMaskPoints, Scalar(255,255,255));
        drawContours(mask, tempvec,
                     0, // draw this contour
                     cv::Scalar(255,255,255), // in
                     FILLED,
                     8,
                     contours[winningIndex].hierarchy,
                     0
                    );
      }

      if (pipeline_data->config->debugCharAnalysis)
      {
        vector<Mat> debugImgs;
        Mat debugImgMasked = Mat::zeros(pipeline_data->thresholds[winningIndex].size(), CV_8U);

        pipeline_data->thresholds[winningIndex].copyTo(debugImgMasked, mask);

        debugImgs.push_back(mask);
        debugImgs.push_back(pipeline_data->thresholds[winningIndex]);
        debugImgs.push_back(debugImgMasked);

        Mat dashboard = drawImageDashboard(debugImgs, CV_8U, 1);
        displayImage(pipeline_data->config, "Winning outer box", dashboard);
      }

      hasPlateMask = true;
      this->plateMask = mask;
	} else {
	  hasPlateMask = false;
	  Mat fullMask = Mat::zeros(pipeline_data->thresholds[0].size(), CV_8U);
	  bitwise_not(fullMask, fullMask);
	  this->plateMask = fullMask;
	}
  }
예제 #14
0
percepunit::percepunit(Mat *src)
{
    Mat orig, meanshift, mask, flood, reconstruction, Matrice;
    vector<percepunit> percepunits;                 // dynamic vector to store instances of percepunit.

    image = *src;

    /// Copy the original image for in-place processing.
    image.copyTo(orig);
    image.copyTo(Matrice);

    // morphology (supports in place operation)
    Mat element = getStructuringElement(MORPH_ELLIPSE, Size(5,5), Point(2, 2) );
    morphologyEx(image, image, MORPH_CLOSE, element);
    morphologyEx(image, image, MORPH_OPEN, element);

    // Mean shift filtering
    pyrMeanShiftFiltering(image, meanshift, 10, 35, 3);

    RNG rng = theRNG();

    // place to store ffill masks
    mask = Mat( meanshift.rows+2, meanshift.cols+2, CV_8UC1, Scalar::all(0) ); // Make black single-channel image.
    meanshift.copyTo(flood); // copy image
    int area;
    Rect *boundingRect = new Rect(); // Stored bounding box for each flooded area.

    // Loop through all the pixels and flood fill.
    for( int y = 0; y < meanshift.rows; y++ )
    {
        for( int x = 0; x < meanshift.cols; x++ )
        {
            if( mask.at<uchar>(y+1, x+1) == 0 ) // mask is offset from original image.
            {
                Scalar newVal( rng(256), rng(256), rng(256) );

                area = floodFill( flood, mask, Point(x,y), newVal, boundingRect, Scalar::all(1), Scalar::all(1), 8|255<<8);
                //Extract a subimage for each flood, if the flood is large enough.
                if (boundingRect->width >35 && boundingRect->height >35) {

                    Mat ROI = orig(*boundingRect); // Make a cropped reference (not copy) of the image

                    // crop translated mask to register with original image.
                    boundingRect->y++;
                    boundingRect->height++;
                    boundingRect->x++;
                    boundingRect->width++;
                    Mat alpha = mask(*boundingRect);

                    // Append an instance to the vector.
                    percepunits.push_back(percepunit(ROI, alpha, boundingRect->x-1, boundingRect->y-1, boundingRect->width-1, boundingRect->height-1));
                }

            }
        }
    }

    // New Image for reconstruction
    reconstruction = Mat(mask.rows,mask.cols,CV_8UC3, Scalar(0,0,0)); // red background

    /// Loop through instances and print
    for(int i = 0; i <percepunits.size(); i++) {

        // Copy percept into reconstruction.
        copyPercept(percepunits[i], reconstruction);

    }

    for (int row = 0 ; row < Matrice.rows ; row++ )
    {
        for (int col = 0 ; col  < Matrice.cols ; col++ )
        {
            Matrice.at<uchar>(row,col) = reconstruction.at<uchar>(row,col);
        }
    }
    *src = Matrice;

    /// Destructor free memory
    orig.release();
    meanshift.release();
    mask.release();
    flood.release();
    reconstruction.release();
    Matrice.release();

}
예제 #15
0
//! 定位车牌图像
//! src 原始图像
//! resultVec 一个Mat的向量,存储所有抓取到的图像
//! 成功返回0,否则返回-1
int CPlateLocate::plateLocate(Mat src, vector<Mat>& resultVec)
{
	Mat src_blur, src_gray;
	Mat grad;

	int scale = SOBEL_SCALE;
	int delta = SOBEL_DELTA;
	int ddepth = SOBEL_DDEPTH;

	if( !src.data )
	{ return -1; }

	//高斯均衡。Size中的数字影响车牌定位的效果。
	GaussianBlur( src, src_blur, Size(m_GaussianBlurSize, m_GaussianBlurSize), 
		0, 0, BORDER_DEFAULT );

	/// Convert it to gray
	cvtColor( src_blur, src_gray, CV_RGB2GRAY );

	/// Generate grad_x and grad_y
	Mat grad_x, grad_y;
	Mat abs_grad_x, abs_grad_y;

	/// Gradient X
	//Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
	Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
	convertScaleAbs( grad_x, abs_grad_x );

	/// Gradient Y
	//Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
	Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
	convertScaleAbs( grad_y, abs_grad_y );

	/// Total Gradient (approximate)
	addWeighted( abs_grad_x, SOBEL_X_WEIGHT, abs_grad_y, SOBEL_Y_WEIGHT, 0, grad );

	Mat img_threshold;
	threshold(grad, img_threshold, 0, 255, CV_THRESH_OTSU+CV_THRESH_BINARY);
	//threshold(grad, img_threshold, 75, 255, CV_THRESH_BINARY);

	Mat element = getStructuringElement(MORPH_RECT, Size(m_MorphSizeWidth, m_MorphSizeHeight) );
	morphologyEx(img_threshold, img_threshold, CV_MOP_CLOSE, element);
	
	//Find 轮廓 of possibles plates
	vector< vector< Point> > contours;
	findContours(img_threshold,
		contours, // a vector of contours
		CV_RETR_EXTERNAL, // 提取外部轮廓
		CV_CHAIN_APPROX_NONE); // all pixels of each contours

	//Start to iterate to each contour founded
	vector<vector<Point> >::iterator itc = contours.begin();
	
	vector<RotatedRect> rects;
	//Remove patch that are no inside limits of aspect ratio and area.
	int t = 0;
	while (itc != contours.end())
	{
		//Create bounding rect of object
		RotatedRect mr = minAreaRect(Mat(*itc));

		//large the rect for more
		if( !verifySizes(mr))
		{
			itc = contours.erase(itc);
		}
		else
		{
			++itc;
			rects.push_back(mr);
		}
	}

	for(int i=0; i< rects.size(); i++)
	{
		RotatedRect minRect = rects[i];
		if(verifySizes(minRect))
		{	
			// rotated rectangle drawing 
			// Get rotation matrix
			// 旋转这部分代码确实可以将某些倾斜的车牌调整正,
			// 但是它也会误将更多正的车牌搞成倾斜!所以综合考虑,还是不使用这段代码。
			// 2014-08-14,由于新到的一批图片中发现有很多车牌是倾斜的,因此决定再次尝试
			// 这段代码。
			float r = (float)minRect.size.width / (float)minRect.size.height;
			float angle = minRect.angle;
			Size rect_size = minRect.size;
			if (r < 1)
			{
				angle = 90 + angle;
				swap(rect_size.width, rect_size.height);
			}
			//如果抓取的方块旋转超过m_angle角度,则不是车牌,放弃处理
			if (angle - m_angle < 0 && angle + m_angle > 0)
			{
				//Create and rotate image
				Mat rotmat = getRotationMatrix2D(minRect.center, angle, 1);
				Mat img_rotated;
				warpAffine(src, img_rotated, rotmat, src.size(), CV_INTER_CUBIC);

				Mat resultMat;
				resultMat = showResultMat(img_rotated, rect_size, minRect.center);

				resultVec.push_back(resultMat);
			}
		}
	}
	return 0;
}
예제 #16
0
  // Gets the hue/sat/val for areas that we believe are license plate characters
  // Then uses that to filter the whole image and provide a mask.
  void ColorFilter::findCharColors()
  {
    int MINIMUM_SATURATION = 45;

    if (this->debug)
      cout << "ColorFilter::findCharColors" << endl;

    //charMask.copyTo(this->colorMask);
    this->colorMask = Mat::zeros(charMask.size(), CV_8U);
    bitwise_not(this->colorMask, this->colorMask);

    Mat erodedCharMask(charMask.size(), CV_8U);
    Mat element = getStructuringElement( 1,
                                         Size( 2 + 1, 2+1 ),
                                         Point( 1, 1 ) );
    erode(charMask, erodedCharMask, element);

    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    findContours(erodedCharMask, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);

    vector<float> hMeans, sMeans, vMeans;
    vector<float> hStdDevs, sStdDevs, vStdDevs;

    for (unsigned int i = 0; i < contours.size(); i++)
    {
      if (hierarchy[i][3] != -1)
        continue;

      Mat singleCharMask = Mat::zeros(hsv.size(), CV_8U);

      drawContours(singleCharMask, contours,
                   i, // draw this contour
                   cv::Scalar(255,255,255), // in
                   CV_FILLED,
                   8,
                   hierarchy
                  );

      // get rid of the outline by drawing a 1 pixel width black line
      drawContours(singleCharMask, contours,
                   i, // draw this contour
                   cv::Scalar(0,0,0), // in
                   1,
                   8,
                   hierarchy
                  );

      //drawAndWait(&singleCharMask);

      Scalar mean;
      Scalar stddev;
      meanStdDev(hsv, mean, stddev, singleCharMask);

      if (this->debug)
      {
        cout << "ColorFilter " << setw(3) << i << ". Mean:  h: " << setw(7) << mean[0] << " s: " << setw(7) <<mean[1] << " v: " << setw(7) << mean[2]
             << " | Std: h: " << setw(7) <<stddev[0] << " s: " << setw(7) <<stddev[1] << " v: " << stddev[2] << endl;
      }

      if (mean[0] == 0 && mean[1] == 0 && mean[2] == 0)
        continue;

      hMeans.push_back(mean[0]);
      sMeans.push_back(mean[1]);
      vMeans.push_back(mean[2]);
      hStdDevs.push_back(stddev[0]);
      sStdDevs.push_back(stddev[1]);
      vStdDevs.push_back(stddev[2]);
    }

    if (hMeans.size() == 0)
      return;

    int bestHueIndex = this->getMajorityOpinion(hMeans, .65, 30);
    int bestSatIndex = this->getMajorityOpinion(sMeans, .65, 35);
    int bestValIndex = this->getMajorityOpinion(vMeans, .65, 30);

    if (sMeans[bestSatIndex] < MINIMUM_SATURATION)
      return;

    bool doHueFilter = false, doSatFilter = false, doValFilter = false;
    float hueMin, hueMax;
    float satMin, satMax;
    float valMin, valMax;

    if (this->debug)
      cout << "ColorFilter Winning indices:" << endl;
    if (bestHueIndex != -1)
    {
      doHueFilter = true;
      hueMin = hMeans[bestHueIndex] - (2 * hStdDevs[bestHueIndex]);
      hueMax = hMeans[bestHueIndex] + (2 * hStdDevs[bestHueIndex]);

      if (abs(hueMin - hueMax) < 20)
      {
        hueMin = hMeans[bestHueIndex] - 20;
        hueMax = hMeans[bestHueIndex] + 20;
      }

      if (hueMin < 0)
        hueMin = 0;
      if (hueMax > 180)
        hueMax = 180;

      if (this->debug)
        cout << "ColorFilter Hue: " << bestHueIndex << " : " << setw(7) << hMeans[bestHueIndex] << " -- " << hueMin << "-" << hueMax << endl;
    }
    if (bestSatIndex != -1)
    {
      doSatFilter = true;

      satMin = sMeans[bestSatIndex] - (2 * sStdDevs[bestSatIndex]);
      satMax = sMeans[bestSatIndex] + (2 * sStdDevs[bestSatIndex]);

      if (abs(satMin - satMax) < 20)
      {
        satMin = sMeans[bestSatIndex] - 20;
        satMax = sMeans[bestSatIndex] + 20;
      }

      if (satMin < 0)
        satMin = 0;
      if (satMax > 255)
        satMax = 255;

      if (this->debug)
        cout << "ColorFilter Sat: " << bestSatIndex << " : " << setw(7) << sMeans[bestSatIndex] << " -- " << satMin << "-" << satMax << endl;
    }
    if (bestValIndex != -1)
    {
      doValFilter = true;

      valMin = vMeans[bestValIndex] - (1.5 * vStdDevs[bestValIndex]);
      valMax = vMeans[bestValIndex] + (1.5 * vStdDevs[bestValIndex]);

      if (abs(valMin - valMax) < 20)
      {
        valMin = vMeans[bestValIndex] - 20;
        valMax = vMeans[bestValIndex] + 20;
      }

      if (valMin < 0)
        valMin = 0;
      if (valMax > 255)
        valMax = 255;

      if (this->debug)
        cout << "ColorFilter Val: " << bestValIndex << " : " << setw(7) << vMeans[bestValIndex] << " -- " << valMin << "-" << valMax  << endl;
    }

    Mat imgDebugHueOnly = Mat::zeros(hsv.size(), hsv.type());
    Mat imgDebug = Mat::zeros(hsv.size(), hsv.type());
    Mat imgDistanceFromCenter = Mat::zeros(hsv.size(), CV_8U);
    Mat debugMask = Mat::zeros(hsv.size(), CV_8U);
    bitwise_not(debugMask, debugMask);

    for (int row = 0; row < charMask.rows; row++)
    {
      for (int col = 0; col < charMask.cols; col++)
      {
        int h = (int) hsv.at<Vec3b>(row, col)[0];
        int s = (int) hsv.at<Vec3b>(row, col)[1];
        int v = (int) hsv.at<Vec3b>(row, col)[2];

        bool hPasses = true;
        bool sPasses = true;
        bool vPasses = true;

        int vDistance = abs(v - vMeans[bestValIndex]);

        imgDebugHueOnly.at<Vec3b>(row, col)[0] = h;
        imgDebugHueOnly.at<Vec3b>(row, col)[1] = 255;
        imgDebugHueOnly.at<Vec3b>(row, col)[2] = 255;

        imgDebug.at<Vec3b>(row, col)[0] = 255;
        imgDebug.at<Vec3b>(row, col)[1] = 255;
        imgDebug.at<Vec3b>(row, col)[2] = 255;

        if (doHueFilter && (h < hueMin || h > hueMax))
        {
          hPasses = false;
          imgDebug.at<Vec3b>(row, col)[0] = 0;
          debugMask.at<uchar>(row, col) = 0;
        }
        if (doSatFilter && (s < satMin || s > satMax))
        {
          sPasses = false;
          imgDebug.at<Vec3b>(row, col)[1] = 0;
        }
        if (doValFilter && (v < valMin || v > valMax))
        {
          vPasses = false;
          imgDebug.at<Vec3b>(row, col)[2] = 0;
        }

        //if (pixelPasses)
        //  colorMask.at<uchar>(row, col) = 255;
        //else
        //imgDebug.at<Vec3b>(row, col)[0] = hPasses & 255;
        //imgDebug.at<Vec3b>(row, col)[1] = sPasses & 255;
        //imgDebug.at<Vec3b>(row, col)[2] = vPasses & 255;

        if ((hPasses) ||  (hPasses && sPasses))//(hPasses && vPasses) || (sPasses && vPasses) ||
          this->colorMask.at<uchar>(row, col) = 255;
        else
          this->colorMask.at<uchar>(row, col) = 0;

        if ((hPasses && sPasses) || (hPasses && vPasses) || (sPasses && vPasses))
        {
          vDistance = pow(vDistance, 0.9);
        }
        else
        {
          vDistance = pow(vDistance, 1.1);
        }
        if (vDistance > 255)
          vDistance = 255;
        imgDistanceFromCenter.at<uchar>(row, col) = vDistance;
      }
    }

    vector<Mat> debugImagesSet;

    if (this->debug)
    {
      debugImagesSet.push_back(addLabel(charMask, "Charecter mask"));
      //debugImagesSet1.push_back(erodedCharMask);
      Mat maskCopy(colorMask.size(), colorMask.type());
      colorMask.copyTo(maskCopy);
      debugImagesSet.push_back(addLabel(maskCopy, "color Mask Before"));
    }

    Mat bigElement = getStructuringElement( 1,
                                            Size( 3 + 1, 3+1 ),
                                            Point( 1, 1 ) );

    Mat smallElement = getStructuringElement( 1,
                       Size( 1 + 1, 1+1 ),
                       Point( 1, 1 ) );

    morphologyEx(this->colorMask, this->colorMask, MORPH_CLOSE, bigElement);
    //dilate(this->colorMask, this->colorMask, bigElement);

    Mat combined(charMask.size(), charMask.type());
    bitwise_and(charMask, colorMask, combined);

    if (this->debug)
    {
      debugImagesSet.push_back(addLabel(colorMask, "Color Mask After"));

      debugImagesSet.push_back(addLabel(combined, "Combined"));

      //displayImage(config, "COLOR filter Mask", colorMask);
      debugImagesSet.push_back(addLabel(imgDebug, "Color filter Debug"));

      cvtColor(imgDebugHueOnly, imgDebugHueOnly, CV_HSV2BGR);
      debugImagesSet.push_back(addLabel(imgDebugHueOnly, "Color Filter Hue"));

      equalizeHist(imgDistanceFromCenter, imgDistanceFromCenter);
      debugImagesSet.push_back(addLabel(imgDistanceFromCenter, "COLOR filter Distance"));

      debugImagesSet.push_back(addLabel(debugMask, "COLOR Hues off"));

      Mat dashboard = drawImageDashboard(debugImagesSet, imgDebugHueOnly.type(), 3);
      displayImage(config, "Color Filter Images", dashboard);
    }
  }
예제 #17
0
/**
 * @brief DialogoCaracteristicas::getNumber
 * @param srcImage
 * @return
 */
int MainWindow::getNumber(){
    srcImage = imread(imageFile.toStdString().data(),0);
    line(srcImage,Point(0,0),Point(0,srcImage.rows),Scalar(255),20);
    line(srcImage,Point(0,0),Point(srcImage.cols,0),Scalar(255),20);
    line(srcImage,Point(srcImage.cols,srcImage.rows),Point(srcImage.cols,0),Scalar(255),20);
    line(srcImage,Point(srcImage.cols,srcImage.rows),Point(0,srcImage.rows),Scalar(255),20);

    equalizeHist(srcImage,srcImageEqualizada);
    //CALCULAR THRESHOLD
    this->dstImageThresholdAdaptative = ControlPreprocesamiento::umbralAutomaticoAdaptativo(srcImage);
    this->dstImageThreshold = ControlPreprocesamiento::umbralAutomatico(srcImageEqualizada);

    //FILTRADO
    Mat BStructElement = getStructuringElement(CV_SHAPE_RECT,Size(2,2));
    morphologyEx(this->dstImageThresholdAdaptative, this->dstImageClose, CV_MOP_CLOSE, BStructElement,Point(-1,-1) ,2 );

    //SEGMENTACION
    Mat src = imread(imageFile.toStdString().data());
    ControlSegmentacion::encontrarSegmentos(src,dstImageClose,dstImageSegmentacion,dstRectanguloEnvolvente);

    //ADELGAZAMIENTOsrcImage
    dstImageAdelgazada = Mat::zeros(dstImageClose.size(), CV_8UC1);
    dstImageClose.copyTo(dstImageAdelgazada);

    ControlPreprocesamiento::adelgazamiento(dstImageAdelgazada);

    dstImageAdelgazada.copyTo(dstImageRectanguloEnvolvente);
    //Se suman 5 pixeles de distancia a las medidas del rectangulo para darle espacio
    //alalgoritmo de busqueda de end-points
    //if(dstRectanguloEnvolvente.x <= 5 || dstRectanguloEnvolvente.y <= 5 ) continue;
    dstRectanguloEnvolvente.height += 10;
    dstRectanguloEnvolvente.width += 10;
    dstRectanguloEnvolvente.x -= 5;
    dstRectanguloEnvolvente.y -= 5;

    rectangle(dstImageRectanguloEnvolvente,dstRectanguloEnvolvente,Scalar(255));

    //CALCULO CARACTERISTICAS
    cout<<"ancho "<<dstRectanguloEnvolvente.width<<endl;
    cout<<"alto "<<dstRectanguloEnvolvente.height<<endl;
    cout<<"x "<<dstRectanguloEnvolvente.x<<endl;
    cout<<"y "<<dstRectanguloEnvolvente.y<<endl;
    cout<<dstImageAdelgazada.rows<<endl;

    dstImageFinal = dstImageAdelgazada(dstRectanguloEnvolvente).clone();
    double relacionAnchoAlto = (double)dstImageFinal.cols/dstImageFinal.rows;

    vector<Point> endPoints;
    vector<Point> insersectPoints;
    ControlObtencionCaracteristicas::buscarPuntos(dstImageFinal,endPoints, insersectPoints);
    cout<<endPoints.size()<<endl;

    Mat dstImageMorph = ControlPreprocesamiento::morphImage(dstImageThreshold);
    vector<vector<Point> > contornos;

    contornos = ControlObtencionCaracteristicas::getContornos(dstImageMorph);
    vector<vector<double> > momentosHu = ControlObtencionCaracteristicas::getHuMoments(contornos);

// POLIGONO ENVOLVENTE
   vector<Point > poligono = ControlObtencionCaracteristicas::getEnvolvingPolygon( contornos);
   Mat poligonoimagen = ControlObtencionCaracteristicas::getEnvolvingPolygonImage(srcImage, poligono);
   vector<vector<Point > > contornoPoligono = ControlObtencionCaracteristicas::getContornos(poligonoimagen);
       momentosHu = ControlObtencionCaracteristicas::getHuMoments(contornoPoligono);

 /*   cout<<momentosHu.at(0).at(0)<<","
        <<momentosHu.at(0).at(1)<<","
        <<momentosHu.at(0).at(2)<<","
        <<momentosHu.at(0).at(3)<<","
        <<momentosHu.at(0).at(4)<<","
        <<momentosHu.at(0).at(5)<<","
        <<momentosHu.at(0).at(6)<2<","
        <<endl;*/
    ///Determina a cual cuadrante pertenece cada punto
       int cuadEndPoints0 = 0,cuadEndPoints1 = 0, cuadEndPoints2 = 0, cuadEndPoints3 = 0,
               cuadEndPoints4 = 0, cuadEndPoints5 = 0, cuadEndPoints6 = 0, cuadEndPoints7 = 0, cuadEndPoints8 = 0;
       int mitadX1 = int(dstImageFinal.cols/3);
       int mitadX2 = int(2*dstImageFinal.cols/3);
       int mitadY1 = int(dstImageFinal.rows/3);
       int mitadY2 = int(2*dstImageFinal.rows/3);
       Point p;

       //Calcular cuantos endPoints hay en cada cuadrante
       for(unsigned c = 0; c< endPoints.size();c++){
           p = endPoints.at(c);
           //0
           if( p.x <  mitadX1 && p.y <  mitadY1 ){ cuadEndPoints0++; continue;}
           //1
           if( p.x <  mitadX2 && p.y <  mitadY1 ){ cuadEndPoints1++; continue;}
           //2
           if( p.x >= mitadX2 && p.y <  mitadY1 ){ cuadEndPoints2++; continue;}
           //3
           if( p.x <  mitadX1 && p.y <  mitadY2 ){ cuadEndPoints3++; continue;}
           //4
           if( p.x <  mitadX2 && p.y <  mitadY2 ){ cuadEndPoints4++; continue;}
           //5
           if( p.x >= mitadX2 && p.y <  mitadY2 ){ cuadEndPoints5++; continue;}
           //6
           if( p.x <  mitadX1 && p.y >= mitadY2 ){ cuadEndPoints6++; continue;}
           //7
           if( p.x <  mitadX2 && p.y >= mitadY2 ){ cuadEndPoints7++; continue;}
           //8
           if( p.x >= mitadX2 && p.y >= mitadY2 ){ cuadEndPoints8++; continue;}
       }

       int cuadInterPoints0 = 0,cuadInterPoints1 = 0, cuadInterPoints2 = 0, cuadInterPoints3 = 0,
               cuadInterPoints4 = 0, cuadInterPoints5 = 0, cuadInterPoints6 = 0, cuadInterPoints7 = 0, cuadInterPoints8 = 0;


       for(unsigned z = 0; z< insersectPoints.size();z++)
       {
           p = insersectPoints.at(z);
           //0
           if( p.x <  mitadX1 && p.y <  mitadY1 ){ cuadInterPoints0=1; continue;}
           //1
           if( p.x <  mitadX2 && p.y <  mitadY1 ){ cuadInterPoints1=1; continue;}
           //2
           if( p.x >= mitadX2 && p.y <  mitadY1 ){ cuadInterPoints2=1; continue;}
           //3
           if( p.x <  mitadX1 && p.y <  mitadY2 ){ cuadInterPoints3=1; continue;}
           //4
           if( p.x <  mitadX2 && p.y <  mitadY2 ){ cuadInterPoints4=1; continue;}
           //5
           if( p.x >= mitadX2 && p.y <  mitadY2 ){ cuadInterPoints5=1; continue;}
           //6
           if( p.x <  mitadX1 && p.y >= mitadY2 ){ cuadInterPoints6=1; continue;}
           //7
           if( p.x <  mitadX2 && p.y >= mitadY2 ){ cuadInterPoints7=1; continue;}
           //8
           if( p.x >= mitadX2 && p.y >= mitadY2 ){ cuadInterPoints8=1; continue;}
       }


       ///CREAR RED NEURONAL Y ENVIAR VECTOR DE CARACTERISTICAS
       controlredneuronal red = controlredneuronal("./NeuralNetwork.xml");
       cv::Mat caracteristicas(1,20,CV_32F);


       caracteristicas.at<float>(0, 0) = momentosHu.at(0).at(0);
       caracteristicas.at<float>(0, 1) = momentosHu.at(0).at(1);
       caracteristicas.at<float>(0, 2) = momentosHu.at(0).at(2);
       caracteristicas.at<float>(0, 3) = momentosHu.at(0).at(3);
       caracteristicas.at<float>(0, 4) = momentosHu.at(0).at(4);
       caracteristicas.at<float>(0, 5) = momentosHu.at(0).at(5);
       caracteristicas.at<float>(0, 6) = momentosHu.at(0).at(6);
       caracteristicas.at<float>(0, 7) = relacionAnchoAlto;
       caracteristicas.at<float>(0, 8) = endPoints.size();
       caracteristicas.at<float>(0, 9) = cuadEndPoints0;
       caracteristicas.at<float>(0, 10) = cuadEndPoints1;
       caracteristicas.at<float>(0, 11) = cuadEndPoints2;
       caracteristicas.at<float>(0, 12) = cuadEndPoints3;
       caracteristicas.at<float>(0, 13) = cuadEndPoints4;
       caracteristicas.at<float>(0, 14) = cuadEndPoints5;
       caracteristicas.at<float>(0, 15) = cuadEndPoints6;
       caracteristicas.at<float>(0, 16) = cuadEndPoints7;
       caracteristicas.at<float>(0, 17) = cuadEndPoints8;
       caracteristicas.at<float>(0, 18) = contornos.size();
       caracteristicas.at<float>(0, 19) = poligono.size();

       return red.predict( caracteristicas );
}
예제 #18
0
int vehicle_det::do_iteration()
{
    //cout<<__PRETTY_FUNCTION__<<endl;
    cv::Mat img_input, src;
    cap >> src;

    if(!src.data)
    {
        printf("Exiting\n");
        return -1;
    }

    Mat img_display = src.clone();
    draw_ROI_poly(img_display);
    src.copyTo(img_input, mask);
    img_input = Mat(img_input, main_roi);
    IplImage temp = img_input;
    IplImage * frame = &temp;
    //getting the polygon
    // bgs->process(...) internally process and show the foreground mask image
    cv::Mat img_mask;
    //bgs->process(img_input, img_mask);
    get_foreground(img_input, img_mask);
    blur(img_mask, img_mask, Size(4, 4));
    img_mask = img_mask > 10;
    /*morphologyEx(img_mask, img_mask, MORPH_CLOSE, Mat(25, 2, CV_8U));
    morphologyEx(img_mask, img_mask, MORPH_OPEN, Mat(10, 10, CV_8U));*/
    morphologyEx(img_mask, img_mask, MORPH_CLOSE, Mat(2, 2, CV_8U));
    //morphologyEx(img_mask, img_mask, MORPH_OPEN, Mat(10, 10, CV_8U));
    //morphologyEx(img_mask, img_mask, MORPH_GRADIENT , Mat(5,5, CV_8U));
    //bgs->operator()(img_input,img_mask,0.2);
    //erode(img_mask, img_mask, Mat());
    //dilate(img_mask, img_mask, Mat());
    //imshow("fore", img_mask);

    if(!img_mask.empty())
    {
        //vector<Rect> rois;// to be added all the ROIs
        IplImage copy = img_mask;
        IplImage * new_mask = &copy;
        IplImage * labelImg = cvCreateImage(cvGetSize(new_mask), IPL_DEPTH_LABEL, 1);
        CvBlobs blobs, filtered_blobs;
        unsigned int result = cvb::cvLabel(new_mask, labelImg, blobs);
        cvFilterByArea(blobs, 40, 2000);
        int count = 0;

        for(CvBlobs::const_iterator it = blobs.begin(); it != blobs.end(); ++it)
        {
            count++;
            //  cout << "Blob #" << it->second->label << ": Area=" << it->second->area << ", Centroid=(" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
            int x, y;
            x = (int)it->second->centroid.x;
            y = (int)it->second->centroid.y;
            //cv::Point2f p(x,y );
            // circle(img_input, p, (int)10, cv::Scalar(255, 0 , 0), 2, 8, 0);
            int x_final = 0;
            int y_final = 0;

            if(x - (width_roi / 2) <= 0)
            {
                x_final = 1;
            }
            else if(x + (width_roi / 2) >= img_input.cols)
            {
                x_final = (x - (width_roi / 2)) - (x + (width_roi / 2) - (img_input.cols - 1));
            }
            else
            {
                x_final = x - (width_roi / 2);
            }

            if(y - (height_roi / 2) <= 0)
            {
                y_final = 1;
            }
            else if(y + (height_roi / 2) >= img_input.rows)
            {
                y_final = (y - (height_roi / 2)) - (y + (height_roi / 2) - (img_input.rows - 1));
            }
            else
            {
                y_final = y - (height_roi / 2);
            }

            //printf("resized x_final=%d y_final=%d  cols=%d,  rows=%d \n", x_final,y_final,img_input.cols,img_input.rows);
            Rect roi(x_final, y_final, width_roi, height_roi);
            //rois.push_back(roi);//adding ROIs using rectangles
            //		Mat image = imread("");
            Mat image_roi = Mat(img_input, roi);
            int vehicle_ct = detect(image_roi); //getting the vehicle count per ROI

            if(vehicle_ct > 0)
            {
                filtered_blobs[it->first] = it->second;
                int matched = 0;
                int c1 = 255, c2 = 0;

                if(matched)
                {
                    c1 = 0;
                    c2 = 255;
                }
                else
                {
                    //print something to debug
                }//changing the colour of  the rectanged depending on matched or not matched

                rectangle(img_display,
                          Point(min_x + x - 5, min_y + y - 5),
                          Point(min_x + x + 5, min_y + y + 5),
                          CV_RGB(c1, c2, 0), 2, 8, 0);
                /*rectangle(img_input,
                          Point(x - 5, y - 5),
                          Point(x + 5, y + 5),
                          CV_RGB(c1, c2, 0), 2, 8, 0);*/
            }
        }

        //cvUpdateTracks(filtered_blobs, tracks, 5., 10);
        cvUpdateTracks(filtered_blobs, tracks, 10., 5);
        cvRenderBlobs(labelImg, filtered_blobs, frame, frame, CV_BLOB_RENDER_CENTROID | CV_BLOB_RENDER_BOUNDING_BOX);
        //cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_LOG);
        cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID);
        printf("num of active tracks %d\n", tracks.size());
        process_equation(tracks.size());//number of people given as input
	if(abstract_det::Total_Score<0){
		destroyAllWindows();	
	}
    }

    if(!img_display.empty())
    {
        cv::imshow("vehicle_view", img_display);
    }

    waitKey(30);
    
    return 1;
}
void TrackShirt::ImageCallback(const sensor_msgs::ImageConstPtr& msg)
{
	cv_bridge::CvImagePtr cv_ptr;
	try
	{
	cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
	}
	catch (cv_bridge::Exception& e)
	{
	ROS_ERROR("cv_bridge exception: %s", e.what());
	return;
	}

	frame = cv_ptr->image;

	char key = (char)cvWaitKey(10);
	if (key ==27 )	{
		ros::requestShutdown();
	} else if ( key =='z' )	{
		IMSHOW = true;
		//namedWindow(OPENCV_WINDOW);
	} else if (key == 'x')	{
		IMSHOW =  false;
		cvDestroyAllWindows() ;
		//namedWindow(OPENCV_WINDOW);
	}

	if (trackObject == -1)	{
		//Initial stage, before selecting object. Do nothing. Camera view shown as is.
	} else if (trackObject == 0)	{
		rectangle(frame, Point(selection.x,selection.y),Point(selection.x+selection.width,selection.y+selection.height),Scalar(0,0,255),1);
	} else if (PerFoRoMode == 3)	{
		Mat imgHSV, imgThresh, binFrame;
		int contSize;

		cvtColor(frame, imgHSV, CV_BGR2HSV); 

		//Get binary image using HSV threshold
		inRange(imgHSV, mLowerBound, mUpperBound, imgThresh); 

		//Morphological operations to get smoother blobs with reduced noise                
		dilate( imgThresh, imgThresh, elemDilate );
		erode( imgThresh, imgThresh, elemErode ); 
		dilate( imgThresh, imgThresh, elemDilate );                               
		erode( imgThresh, imgThresh, elemErode );                                                       
		morphologyEx(imgThresh, imgThresh, MORPH_OPEN, structure_elem);  
			  
		imgThresh.copyTo(binFrame);
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
			      
		/// Find contours
		findContours( binFrame, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );

		contSize = contours.size();
		//cout<<"contours size "<<contSize<<endl;
				                
		//If no contours 
		if (contSize==0)	{       
			navX = 0;
			navY = 0;
			if (IMSHOW)	{             
				imshow(OPENCV_WINDOW, frame);
				//imshow("Binary Image with Detected Object", imgThresh); 
			}
			return;
		}
				                    
		/// Approximate contours to polygons + get bounding rects 
		vector<vector<Point> > contours_poly( contSize );
		vector<Rect> boundRect( contSize );
				  
		/// Get the moments
		vector<Moments> mu(contSize );
		cv::Mat contArea = Mat::zeros(contSize,1,CV_32FC1);
		for( int i = 0; i < contSize; i++ )
		{ 
			approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
			boundRect[i] = boundingRect( Mat(contours_poly[i]) );
				     
			mu[i] = moments( contours[i], false );
				     
			contArea.at<float>(i) = contourArea(contours[i]);
		}

		///  Get the mass centers:
		vector<Point2f> mc( contSize );
		for( int i = 0; i < contSize; i++ )
		{ 
			mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
		}

				
		///Nearest centroid to previous position
		cv::Mat dist = Mat::zeros(contSize,1,CV_32FC1); 
		cv::Mat normDist = Mat::zeros(contSize,1,CV_32FC1);
				
		for( int i = 0; i < contSize; i++ )
		{ 
			dist.at<float>(i) = abs(mc[i].x - selectCentroid.x) + abs(mc[i].y - selectCentroid.y);

			normDist.at<float>(i) = maxDistance - dist.at<float>(i);
		}
				                   
				   
		cv::Mat normSelect= Mat::zeros(contSize,1,CV_32FC1);

		normSelect =  contArea + normDist; //

		cv::Mat sortedSelect = Mat::zeros(contSize,1,CV_32FC1);

		cv::sortIdx(normSelect, sortedSelect, CV_SORT_EVERY_COLUMN+CV_SORT_DESCENDING);
		       
		Point selectPt = mc[sortedSelect.at<int>(0)];

		//If first tracked frame, initialze Kalman
		if (trackObject == 1)	{
			initTracker();
			trackObject = 2;  
		}

		
		//Kalman estimate based on previous state and measurement   
		kalmanEstimatePt = kalmanTracker(selectPt);
		  
		///Distance of object position estimate from previous position
		distPrevCurrent = abs(kalmanEstimatePt.x - selectCentroid.x) + abs(kalmanEstimatePt.y - selectCentroid.y);
		distPrevCurrent = distPrevCurrent / maxDistance;


		if (missCount > 5)	{
			distThresh*=1.5;
		} else	{
			distThresh = minDistThresh;
		}                                               
				        
		/// /////////////////////////////////////////////////////////////
		///Threshold the detected centroid's distance from prev///////////////
		if (distPrevCurrent < distThresh && contArea.at<float>(sortedSelect.at<int>(0)) >= 10)	{
			//Final object position estimate using kalman
			selectCentroid = kalmanEstimatePt;
			if (IMSHOW)	{ 
				rectangle( frame, boundRect[sortedSelect.at<int>(0)], Scalar(255,255,255), 2, 8, 0 );
			}  
			shirt_msg.x = selectCentroid.x;
			shirt_msg.y = selectCentroid.y;
			shirt_msg.area = boundRect[sortedSelect.at<int>(0)].width * boundRect[sortedSelect.at<int>(0)].height;
			track_shirt_pub_.publish(shirt_msg);
			//cout<<"X="<<navX<<"Y="<<navY<<endl; 
			missCount = 0;
			drawArrow(frame, cv::Point(frame.cols/2, frame.rows/2), selectCentroid, Scalar(255,0,0));
		} else	{
			missCount++;
			navX = 0.0;
			navY = 0.0;
		}
	}
	// Update GUI Window
	//if (IMSHOW)	{
	//	imshow(OPENCV_WINDOW, frame);
		///imshow("Binary Image with Detected Object", imgThresh);
	//}
	//cv::waitKey(3);

	// Output modified video stream
	image_shirt_pub_.publish(cv_ptr->toImageMsg());
}
void apritags(const Mat src,
              const int current_direction,
              double &direction,
              bool &state,
              Mat &result)
{
        Histogram hc;
        MatND colorhist;
        Mat thresholded;
        Mat imageBinary;

        src.copyTo(result);

        Mat imageFilter;
        for (int i=1; i<9; i=i+2) GaussianBlur(src, imageFilter, Size(i, i), 0, 0);

        //imshow("img", src);
        //imshow("img_Gaussian", imageFilter);

        //createTrackbar("alpha", "camera", &alpha, 3, on_track);
        //on_track(alpha, 0);

        Mat imageL = imageFilter - Scalar(20,20,20);

        hc.getHueHistogram(imageL);
        equalizeHist(hc.v[2], hc.v[2]);

        //imshow("v[2]",hc.v[2]);

        threshold(hc.v[2], imageBinary, COLOR_BLACK_TH, 255, 1);
        //imshow("img_binary", imageBinary);

        Mat imageClosed;
        Mat element = getStructuringElement(MORPH_CROSS, Size(7,7), Point(0,0));
        morphologyEx(imageBinary, imageClosed, MORPH_CLOSE,  element);
        dilate(imageClosed, imageClosed, element);
        //imshow("img_open", imageClosed);

        vector<vector<Point> > contours;
        findContours(imageClosed, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, Point(0, 0));
        Mat imageContours(imageClosed.size(), CV_8U, Scalar(255));
        drawContours(imageContours, contours, -1, Scalar(0), 2);

        /* calculate the ju*/        
        vector<Moments> mu(contours.size());
        for (int i=0; i<contours.size(); i++) mu[i] = moments(contours[i], false);
        //imshow("contours", imageContours);

        //imageContours.copyTo(result);

        vector<vector<Point> > apcontours; //AprilTags' contours
        vector<RotatedRect> rotatedRects;

        /* number of apriltags */
        int countAp = 0;
        float angle[20]; //the rotating angle of each Apriltage
        double area, length, p;
        double d = src.cols * src.rows;
        Point center(src.cols/2, src.rows/2);
        state = false;
        double maxp = PROPERTY;
        cout << "X: " << center.x << " Y: " << center.y << endl;
        for ( int i=0; i<contours.size(); i++)
        {
            area = abs(contourArea( contours[i] ));
            length = abs(arcLength( contours[i], true ));
            p = 1.0*area/length;
            if (p>PROPERTY)
            {
                cout << "Area: " << area << "  Length: " << length << "  Property: " << int(p) << endl;
                countAp++;
                apcontours.push_back(contours.at(i));
                vector<Point> p = contours.at(i);
                rotatedRects.push_back(minAreaRect(Mat(p)));
                angle[countAp-1] = rotatedRects[countAp-1].angle;

                Point2f mc;
                mc = Point2f(mu[i].m10/mu[i].m00, mu[i].m01/mu[i].m00);
                circle(result, mc, 2, Scalar::all(255));
                cout << "X: " << mc.x << " Y: " << mc.y << endl;

                if ((((current_direction == UP) && (mc.y<=center.y)) ||
                        ((current_direction == DOWN) && (mc.y>center.y)) ||
                        ((current_direction == LEFT) && (mc.x<=center.x)) ||
                        ((current_direction == RIGHT) && (mc.x>center.x))))
                {
                    if (sqrt(pow(mc.x-center.x, 2.0) + pow(mc.y-center.y, 2.0)) < d)
                    {
                        d = sqrt(pow(mc.x-center.x, 2.0) + pow(mc.y-center.y, 2.0));
                        //direction = atan2(mc.x-center.x, mc.y-center.y);
                        //direction = direction * 180 / PI;
                        //maxp = p;
                        if (center.y > mc.y)
                        {
                            if ( mc.x>center.x )    //第一象限
                            {
                               direction=180*atan(float ((mc.x-center.x)/(center.y-mc.y))) / PI;
                            }
                            else            //第四象限
                            {
                                direction=360 + 180*atan(float ((mc.x-center.x)/(center.y-mc.y))) / PI;
                            }
                        }
                        else if (center.y < mc.y)
                        {
                             if (mc.x>center.x )            //第er象限
                            {
                               direction=180 - 180*atan(float ((mc.x-center.x)/(mc.y-center.y))) / PI;
                            }
                            else                 //第san象限
                            {
                                direction=180 + 180*atan(float ((center.x-mc.x)/(mc.y-center.y))) / PI;
                            }
                        }
                        state = true;
                    }
                }
                // apriltag is near
                if ( d<20 )
                {
                    state = false;
                    return;
                }
            }
            //cout << "A:" << area << "  L:" << length << "  P:" << p << endl;
        }

        drawContours(result, apcontours, -1, Scalar(255), 5);

        //waitKey(0);
        /*
        vector<Mat> imageAp;
        Rect rects;
        for (int i=0; i<countAp; i++)
        {
            Point2f rect_points[4];
            rotatedRects[i].points(rect_points);
            cout << rect_points[0].x << "  " << rect_points[0].y << "  " <<  rect_points[1].x << "  " <<  rect_points[1].y << "  " <<  rect_points[2].x << "  " << rect_points[2].y << "  " <<  rect_points[3].x << "  " <<  rect_points[3].y << endl;
            int x = min(min(min(rect_points[0].x, rect_points[1].x), rect_points[2].x), rect_points[3].x);
            int y = min(min(min(rect_points[0].y, rect_points[1].y), rect_points[2].y), rect_points[3].y);
            int rows = max(max(max(rect_points[0].y, rect_points[1].y), rect_points[2].y), rect_points[3].y)-y;
            int cols = max(max(max(rect_points[0].x, rect_points[1].x), rect_points[2].x), rect_points[3].x)-x;
            if (x + cols>image.cols) cols = image.cols-x;
            if (y + rows>image.rows) rows = image.rows-y;
            rects = Rect(max(x, 0), max(y, 0), cols, rows);
            setROI(image, imageAp, rects);
        }

        for (int i=0; i<imageAp.size(); i++)
        {
            Mat imageGray;
            cvtColor(imageAp[i], imageGray, CV_RGB2GRAY);
            Mat imageAdaptive;
            adaptiveThreshold(imageGray, imageAdaptive, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY , 3, 5);
            //imshow("6", imageAdaptive);
            waitKey(0);
        }*/
}
예제 #21
0
vector<Plate> DetectRegions::segment(Mat input){
    vector<Plate> output;

    //convert image to gray
    Mat img_gray; //= *new Mat(input.size().width,input.size().height, CV_8UC1);
    cvtColor(input, img_gray, CV_BGR2GRAY);
    blur(img_gray, img_gray, Size(5,5));

    //Finde vertical lines. Car plates have high density of vertical lines
    Mat img_sobel;
    Sobel(img_gray, img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT);
    if(showSteps)
        imshow("Sobel", img_sobel);

    //threshold image
    Mat img_threshold;
    threshold(img_sobel, img_threshold, 0, 255, CV_THRESH_OTSU+CV_THRESH_BINARY);
    if(showSteps)
        imshow("Threshold", img_threshold);

    //Morphplogic operation close
    Mat element = getStructuringElement(MORPH_RECT, Size(17, 3) );
    morphologyEx(img_threshold, img_threshold, CV_MOP_CLOSE, element);
    if(showSteps)
        imshow("Close", img_threshold);

    //Find contours of possibles plates
    vector< vector< Point> > contours;
    findContours(img_threshold,
            contours, // a vector of contours
            CV_RETR_EXTERNAL, // retrieve the external contours
            CV_CHAIN_APPROX_NONE); // all pixels of each contours

    //Start to iterate to each contour founded
    vector<vector<Point> >::iterator itc= contours.begin();
    vector<RotatedRect> rects;

    //Remove patch that are no inside limits of aspect ratio and area.    
    while (itc!=contours.end()) {
        //Create bounding rect of object
        RotatedRect mr= minAreaRect(Mat(*itc));
        if( !verifySizes(mr)){
            itc= contours.erase(itc);
        }else{
            ++itc;
            rects.push_back(mr);
        }
    }

    // Draw blue contours on a white image
    cv::Mat result;
    input.copyTo(result);
    cv::drawContours(result,contours,
            -1, // draw all contours
            cv::Scalar(255,0,0), // in blue
            1); // with a thickness of 1

    for(int i=0; i< rects.size(); i++){

        //For better rect cropping for each posible box
        //Make floodfill algorithm because the plate has white background
        //And then we can retrieve more clearly the contour box
        circle(result, rects[i].center, 3, Scalar(0,255,0), -1);
        //get the min size between width and height
        float minSize=(rects[i].size.width < rects[i].size.height)?rects[i].size.width:rects[i].size.height;
        minSize=minSize-minSize*0.5;
        //initialize rand and get 5 points around center for floodfill algorithm
        srand ( time(NULL) );
        //Initialize floodfill parameters and variables
        Mat mask;
        mask.create(input.rows + 2, input.cols + 2, CV_8UC1);
        mask= Scalar::all(0);
        int loDiff = 30;
        int upDiff = 30;
        int connectivity = 4;
        int newMaskVal = 255;
        int NumSeeds = 10;
        Rect ccomp;
        int flags = connectivity + (newMaskVal << 8 ) + CV_FLOODFILL_FIXED_RANGE + CV_FLOODFILL_MASK_ONLY;
        for(int j=0; j<NumSeeds; j++){
            Point seed;
            seed.x=rects[i].center.x+rand()%(int)minSize-(minSize/2);
            seed.y=rects[i].center.y+rand()%(int)minSize-(minSize/2);
            circle(result, seed, 1, Scalar(0,255,255), -1);
            int area = floodFill(input, mask, seed, Scalar(255,0,0), &ccomp, Scalar(loDiff, loDiff, loDiff), Scalar(upDiff, upDiff, upDiff), flags);
        }
        if(showSteps)
            imshow("MASK", mask);
        //cvWaitKey(0);

        //Check new floodfill mask match for a correct patch.
        //Get all points detected for get Minimal rotated Rect
        vector<Point> pointsInterest;
        Mat_<uchar>::iterator itMask= mask.begin<uchar>();
        Mat_<uchar>::iterator end= mask.end<uchar>();
        for( ; itMask!=end; ++itMask)
            if(*itMask==255)
                pointsInterest.push_back(itMask.pos());

        RotatedRect minRect = minAreaRect(pointsInterest);

        if(verifySizes(minRect)){
            // rotated rectangle drawing 
            Point2f rect_points[4]; minRect.points( rect_points );
            for( int j = 0; j < 4; j++ )
                line( result, rect_points[j], rect_points[(j+1)%4], Scalar(0,0,255), 1, 8 );    

            //Get rotation matrix
            float r= (float)minRect.size.width / (float)minRect.size.height;
            float angle=minRect.angle;    
            if(r<1)
                angle=90+angle;
            Mat rotmat= getRotationMatrix2D(minRect.center, angle,1);

            //Create and rotate image
            Mat img_rotated;
            warpAffine(input, img_rotated, rotmat, input.size(), CV_INTER_CUBIC);

            //Crop image
            Size rect_size=minRect.size;
            if(r < 1)
                swap(rect_size.width, rect_size.height);
            Mat img_crop;
            getRectSubPix(img_rotated, rect_size, minRect.center, img_crop);
            
            Mat resultResized;
            resultResized.create(33,144, CV_8UC3);
            resize(img_crop, resultResized, resultResized.size(), 0, 0, INTER_CUBIC);
            //Equalize croped image
            Mat grayResult;
            cvtColor(resultResized, grayResult, CV_BGR2GRAY); 
            blur(grayResult, grayResult, Size(3,3));
            grayResult=histeq(grayResult);
            if(saveRegions){ 
                stringstream ss(stringstream::in | stringstream::out);
                ss << "tmp/" << filename << "_" << i << ".jpg";
                imwrite(ss.str(), grayResult);
            }
            output.push_back(Plate(grayResult,minRect.boundingRect()));
        }
    }       
    if(showSteps) 
        imshow("Contours", result);

    return output;
}
예제 #22
0
Mat skinDetector::detect(Mat captureframe, bool verboseSelect, Mat *skinMask)
{
	verboseOutput=verboseSelect;
	//if (argc>=1) frame = argv[1]);
	//if (argc>=2) singleRegionChoice = int(argv[2]);

	int step = 0;
	Mat3b frame;
	// Forcing resize to 640x480 -> all thresholds / pixel filters configured for this size.....
	// Note returned to original size at end...
    Size s = captureframe.size();
	resize(captureframe,captureframe,Size(640,480));

	
	// CHANGED HERE TO BGR
	//cvtColor(captureframe, captureframe, CV_RGB2BGR);
	if (verboseOutput)	imshow("Raw Image (A)",captureframe);
	/* THRESHOLD ON HSV*/
	// HSV data -> used to find skin
	cvtColor(captureframe, frame, CV_BGR2HSV);
	//cvtColor(captureframe, frame, CV_BGR2HLS);
	GaussianBlur(frame, frame, Size(imgBlurPixels,imgBlurPixels), 1, 1);
	//medianBlur(frame, frame, 15);
	for(int r=0; r<frame.rows; ++r){
		for(int c=0; c<frame.cols; ++c) 
			// 0<H<0.25  -   0.15<S<0.9    -    0.2<V<0.95   
			if( (frame(r,c)[0]>5) && (frame(r,c)[0] < 17) && (frame(r,c)[1]>38) && (frame(r,c)[1]<250) && (frame(r,c)[2]>51) && (frame(r,c)[2]<242) ); // do nothing
			else for(int i=0; i<3; ++i)	frame(r,c)[i] = 0;
	}

	if (verboseOutput)	imshow("Skin HSV (B)",frame);
	/* BGR CONVERSION AND THRESHOLD */
	Mat1b frame_gray;
	cvtColor(frame, frame, CV_HSV2BGR);
	cvtColor(frame, frame_gray, CV_BGR2GRAY);
				
				
	// Adaptive thresholding technique
	// 1. Threshold data to find main areas of skin
	adaptiveThreshold(frame_gray,frame_gray,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY_INV,9,1);
	if (verboseOutput)	imshow("Adaptive_threshold (D1)",frame_gray);
	// 2. Fill in thresholded areas
	morphologyEx(frame_gray, frame_gray, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
	
	
	//GaussianBlur(frame_gray, frame_gray, Size((imgBlurPixels*2)+1,(imgBlurPixels*2)+1), 1, 1);
	GaussianBlur(frame_gray, frame_gray, Size(imgBlurPixels,imgBlurPixels), 1, 1);
	// Select single largest region from image, if singleRegionChoice is selected (1)
	
	if (singleRegionChoice)
	{
		*skinMask = cannySegmentation(frame_gray, -1);
	}
	else // Detect each separate block and remove blobs smaller than a few pixels
	{
		*skinMask = cannySegmentation(frame_gray, minPixelSize);
	}


	// Just return skin
	Mat frame_skin;
	captureframe.copyTo(frame_skin,*skinMask);  // Copy captureframe data to frame_skin, using mask from frame_ttt
	// Resize image to original before return
	resize(frame_skin,frame_skin,s);
	if (verboseOutput)	imshow("Skin segmented",frame_skin);
	return frame_skin;	
	waitKey(1);
}
void motion_segmentation::scene_segmentation(){
	double timestamp = (double)cv::getTickCount()/cv::getTickFrequency();

	cv::Mat frame_diff, temp1, temp2, temp3;
	absdiff(frame_grayscale, prev_frame, frame_diff);



	threshold( frame_diff, frame_diff, 20, 1, CV_THRESH_BINARY );
	frame_diff.copyTo(temp3);
	temp3=temp3*255;
	imshow("frame difference",temp3);
	cv::Mat element = cv::getStructuringElement( cv::MORPH_RECT , cv::Size(5,5), cv::Point( 2, 2 ) );
	morphologyEx( frame_diff, frame_diff, cv::MORPH_CLOSE  , element);


	//	updateMotionHistory(frame_diff, MHI, timestamp, MHI_DURATION);
	frame_diff.copyTo(temp2);
	temp2=temp2*255;
	imshow("frame difference morph",temp2);

	cv::cvtColor(frame_diff,frame_diff,CV_GRAY2RGB);
	cv::Mat test;
	cv::bitwise_and(frame_left,frame_diff,test);
	imshow("test",test);


	std::vector<std::vector<cv::Point> > contours;
	std::vector<cv::Point> joined_contours;
	std::vector<cv::Vec4i> hierarchy;
	cv::findContours( temp2, contours, hierarchy,CV_RETR_EXTERNAL , CV_CHAIN_APPROX_NONE );
	//temp2=cv::Mat::zeros(temp2.size(),CV_8UC3);
	temp2=cv::Mat::ones(temp2.size(),CV_8UC1);
	temp2=temp2*127;
	//cv::cvtColor(temp2,temp2,CV_GRAY2RGB);
	for( int idx=0; idx < (int)hierarchy.size(); idx ++)
	{
		if(contourArea(contours[idx], false)>5){
			joined_contours.insert(joined_contours.end(),contours[idx].begin(),contours[idx].end());
			cv::Scalar color( rand()&255, rand()&255, rand()&255 );
			//drawContours( temp2, contours, idx, color, CV_FILLED, 8, hierarchy );
			drawContours( temp2, contours, idx, cv::Scalar(255), CV_FILLED, 8, hierarchy );
			drawContours( temp2, contours, idx, cv::Scalar(0), 2, 8, hierarchy );
		}
	}

//	cv::Mat frame_extract;
//	frame_left.copyTo(frame_extract);
//
//	cv::Moments moments;
//	std::vector<cv::Point> blobs_center;
//	for( int i = 0; i < (int)contours.size(); i ++)
//	{
//		if(contourArea(contours[i], false)>5){
//			moments = cv::moments(contours[i]);
//			cv::circle(frame_extract,cv::Point(moments.m10/moments.m00,moments.m01/moments.m00),2,cv::Scalar(255,255,255));
//			blobs_center.push_back(cv::Point(moments.m10/moments.m00,moments.m01/moments.m00));
//			//std::cout<<cv::Point(moments.m10/moments.m00,moments.m01/moments.m00)<<std::endl;
//		}
//	}
//	for( int i = 0; i < (int)blobs_center.size(); i ++)
//	{
//
//		floodFill(frame_extract, blobs_center[i], cv::Scalar(255,0,0),0,cv::Scalar(5,5,5),cv::Scalar(5,5,5));
//	}
	cv::bitwise_and(temp2,prev_mask,temp2);
	imshow("contours",temp2);
	//cv::rectangle(temp2,bound_rect,cv::Scalar(0,255,255),1);
	//std::vector<std::vector<cv::Point> > hull (1);
	//if(!joined_contours.empty()) approxPolyDP(joined_contours, hull[0], 100, true);//convexHull( joined_contours, hull[0], false );
	//drawContours( temp2, hull, -1, cv::Scalar(0,0,255), 1, 8 );

	//	cv::Rect bound_rect;
	//	if(!joined_contours.empty()) bound_rect = boundingRect(joined_contours);
	//	imshow("contours",temp2);
	//	cv::Mat grab_mask,bgdModel,fgdModel;
	//	grabCut(frame_left, grab_mask, bound_rect,  bgdModel,  fgdModel, 1, cv::GC_INIT_WITH_RECT  );
	//	grab_mask=grab_mask*255/3;
	//	imshow("result",grab_mask);

	//	cv::Moments moments;
	//	std::vector<cv::Point> blobs_center;
	//	for( int i = 0; i < (int)contours.size(); i ++)
	//	{
	//		moments = cv::moments(contours[i]);
	//		//cv::circle(temp2,cv::Point(moments.m10/moments.m00,moments.m01/moments.m00),2,cv::Scalar(255,255,255));
	//		blobs_center.push_back(cv::Point(moments.m10/moments.m00,moments.m01/moments.m00));
	//	}
	//	imshow("contours",temp2);
	//
	//		cv::Mat markers(frame_left.size(),CV_32S);
	//		for( int i = 0; i < (int)blobs_center.size(); i ++){
	//			markers.at<double>(blobs_center[i])=i;
	//
	//		}
	//		watershed(frame_left,  markers);
	//		markers.convertTo(markers,CV_8UC1);
	//		//markers=markers*255;
	//		std::cout << markers << std::endl;
	//		imshow("watershed",markers);


	//	//MHI.convertTo(temp2,CV_8UC1,255/(*max));
	//	imshow("MHI",MHI);
	//	//MHI.convertTo(MHI,CV_8UC1,255./MHI_DURATION,(MHI_DURATION - timestamp)*255./MHI_DURATION);
	//	cv::Mat segmask;
	//	std::vector <cv::Rect> boundingRects;
	//	cv::segmentMotion(MHI, segmask, boundingRects, timestamp, MAX_TIME_DELTA);
	//	std::cout<< MHI << std::endl;
	//	int rect_size = boundingRects.size();
	//	for(int i = 0; i<rect_size;i++){
	//		cv::rectangle(frame_left,boundingRects[i],cv::Scalar((6*i*255/rect_size)%255,8*(255-i*255/rect_size)%255,i*255*10/rect_size%255));
	//	}


	//imshow("Segmentation mask",segmask);

	temp2.copyTo(prev_mask);
	frame_grayscale.copyTo(prev_frame);
}
예제 #24
0
void CharacterSegmenter::cleanCharRegions(vector<Mat> thresholds, vector<Rect> charRegions)
{
  const float MIN_SPECKLE_HEIGHT_PERCENT = 0.13;
  const float MIN_SPECKLE_WIDTH_PX = 3;
  const float MIN_CONTOUR_AREA_PERCENT = 0.1;
  const float MIN_CONTOUR_HEIGHT_PERCENT = 0.60;
  
  Mat mask = getCharBoxMask(thresholds[0], charRegions);
  
  
  for (int i = 0; i < thresholds.size(); i++)
  {
    bitwise_and(thresholds[i], mask, thresholds[i]);
    vector<vector<Point> > contours;
    
    Mat tempImg(thresholds[i].size(), thresholds[i].type());
    thresholds[i].copyTo(tempImg);
    
    //Mat element = getStructuringElement( 1,
//				    Size( 2 + 1, 2+1 ),
//				    Point( 1, 1 ) );
    //dilate(thresholds[i], tempImg, element);
    //morphologyEx(thresholds[i], tempImg, MORPH_CLOSE, element);
    //drawAndWait(&tempImg);

    findContours(tempImg, contours, RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
      
    for (int j = 0; j < charRegions.size(); j++)
    {
      const float MIN_SPECKLE_HEIGHT = ((float)charRegions[j].height) * MIN_SPECKLE_HEIGHT_PERCENT;
      const float MIN_CONTOUR_AREA = ((float)charRegions[j].area()) * MIN_CONTOUR_AREA_PERCENT;
      
      
      int tallestContourHeight = 0;
      float totalArea = 0;
      for (int c = 0; c < contours.size(); c++)
      {
	if (contours[c].size() == 0)
	  continue;
	if (charRegions[j].contains(contours[c][0]) == false)
	  continue;
	
	
	
	Rect r = boundingRect(contours[c]);
	
	if (r.height <= MIN_SPECKLE_HEIGHT || r.width <= MIN_SPECKLE_WIDTH_PX)
	{
	  // Erase this speckle
	  drawContours(thresholds[i], contours, c, Scalar(0,0,0), CV_FILLED);  
	  
	  if (this->config->debugCharSegmenter)
	  {
	      drawContours(imgDbgCleanStages[i], contours, c, COLOR_DEBUG_SPECKLES, CV_FILLED);
	  }
	}
	else
	{
	  if (r.height > tallestContourHeight)
	    tallestContourHeight = r.height;
	  
	  totalArea += contourArea(contours[c]);
	  

	}
	//else if (r.height > tallestContourHeight)
	//{
	//  tallestContourIndex = c;
	//  tallestContourHeight = h;
	//}

      }


      
      if (totalArea < MIN_CONTOUR_AREA)
      {
	// Character is not voluminous enough.  Erase it.
	if (this->config->debugCharSegmenter)
	{
	  cout << "Character CLEAN: (area) removing box " << j << " in threshold " << i << " -- Area " << totalArea << " < " << MIN_CONTOUR_AREA << endl;
	  
	  Rect boxTop(charRegions[j].x, charRegions[j].y - 10, charRegions[j].width, 10);
	  rectangle(imgDbgCleanStages[i], boxTop, COLOR_DEBUG_MIN_AREA, -1);
	}
	
	
	rectangle(thresholds[i], charRegions[j], Scalar(0, 0, 0), -1);
      }
      else if (tallestContourHeight < ((float) charRegions[j].height * MIN_CONTOUR_HEIGHT_PERCENT))
      {
	// This character is too short.  Black the whole thing out
	if (this->config->debugCharSegmenter)
	{
	  cout << "Character CLEAN: (height) removing box " << j << " in threshold " << i << " -- Height " << tallestContourHeight << " < " << ((float) charRegions[j].height * MIN_CONTOUR_HEIGHT_PERCENT) << endl;
	  
	  Rect boxBottom(charRegions[j].x, charRegions[j].y + charRegions[j].height, charRegions[j].width, 10);
	  rectangle(imgDbgCleanStages[i], boxBottom, COLOR_DEBUG_MIN_HEIGHT, -1);
	}
	rectangle(thresholds[i], charRegions[j], Scalar(0, 0, 0), -1);
      }
      
    }
    
    
    Mat closureElement = getStructuringElement( 1,
				    Size( 2 + 1, 2+1 ),
				    Point( 1, 1 ) );
    
    //morphologyEx(thresholds[i], thresholds[i], MORPH_OPEN, element);
    
    //dilate(thresholds[i], thresholds[i], element);
    //erode(thresholds[i], thresholds[i], element);
    
    morphologyEx(thresholds[i], thresholds[i], MORPH_CLOSE, closureElement);
    
    // Lastly, draw a clipping line between each character boxes
    for (int j = 0; j < charRegions.size(); j++)
    {
	line(thresholds[i], Point(charRegions[j].x - 1, charRegions[j].y), Point(charRegions[j].x - 1, charRegions[j].y + charRegions[j].height), Scalar(0, 0, 0));
	line(thresholds[i], Point(charRegions[j].x + charRegions[j].width + 1, charRegions[j].y), Point(charRegions[j].x + charRegions[j].width + 1, charRegions[j].y + charRegions[j].height), Scalar(0, 0, 0));
    }
  }
  
}
예제 #25
0
char detectBlueBlock(Mat image)
{
	int T=15; //面积与边长之比的阈值
	ColorHistogram hc;	
	MatND colorhist = hc.getHueHistogram(image);
	//遍历直方图数据
	//hc.getHistogramStat(colorhist);
	/*
	Mat histImg = hc.getHistogramImage(colorhist);
	namedWindow("BlueBlockHistogram");
	imshow("BlueBlockHistogram", histImg);*/

	Mat thresholded, thresholded1, thresholded2, thresholded3;
	threshold(hc.v[0], thresholded1, 100, 255, 1);
	threshold(hc.v[0], thresholded2, 124, 255, 0); 
	threshold(hc.v[1], thresholded3, 125, 255, 1); //变成黑色
	thresholded = thresholded1+thresholded2+thresholded3;
	//imshow("1", thresholded1);
	//imshow("2", thresholded2);
	//imshow("3", thresholded3);

	//namedWindow("BlueBlockBinary");
	//imshow("BlueBlockBinary", thresholded);
	int top = (int) (0.05*thresholded.rows); 
	int bottom = (int) (0.05*thresholded.rows);
    int left = (int) (0.05*thresholded.cols); 
	int right = (int) (0.05*thresholded.cols);
	Scalar value = Scalar( 255 );
    copyMakeBorder( thresholded, thresholded, top, bottom, left, right, 0, value );
	
	/*
	Mat eroded;
	erode(thresholded, eroded, Mat());
	namedWindow("ErodedImage");
	imshow("ErodedImage", eroded);

	Mat dilated;
	erode(thresholded, dilated, Mat());
	namedWindow("DilatedImage");
	imshow("DilatedImage", dilated);*/

	//闭运算
	Mat closed;
	morphologyEx(thresholded, closed, MORPH_CLOSE,  Mat());
	//namedWindow("ClosedImage");
	//imshow("ClosedImage", closed);

	vector<vector<Point>>contours;
	findContours(closed, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
	//筛选不合格轮廓
	int cmin = 100; //最小轮廓长度
	vector<vector<Point>>::const_iterator itc = contours.begin();
	while (itc != contours.end())	
	{
		if (itc->size()<cmin)
			itc = contours.erase(itc);
		else
			itc++;
	}

	Mat result(closed.size(), CV_8U, Scalar(255));
	double area, length, p;
	double a[2] = {0,0}; 
	cout << "Size=" << contours.size() << endl;
	for ( int i=0; i<contours.size(); i++)
	{
		area = abs(contourArea( contours[i] ));
		length = abs(arcLength( contours[i], true ));
		p = area/length;
		if (p > a[0]) 
		{
			a[1] = a[0];
			a[0] = p;
		}
		else if (p > a[1]) a[1] = p; 
 		cout << "Area=" << area << "   " << "Length=" << length << "  " << "Property=" << p << endl;
	}
	drawContours(result, contours, -1, Scalar(0), 1);
	//namedWindow("DrawContours");
	//imshow("DrawContours", result);
	cout << "Property=" << a[1] << endl;
	//waitKey();
	
	if (a[1] > T) return BLUEBLOCK;
			  else return NOTHING;
}
예제 #26
0
/*
 * 1. CALCULATE RANGE FROM MEAN AND STANDARD DEVIATION
 * 2. CREATE A MASK FROM THE RANGE
 * 3. SMOOTH STUFF USING MORPHOLOGY
 * 4. DETECT THE CIRCLES
 */
vector<ILAC_Sphere>
ILAC_SphereFinder::findSpheres ( ILAC_Square &square, Mat &img,
                                 const size_t pixSphDiam )
{
  /* 1. CALCULATE RANGE FROM MEAN AND STANDARD DEVIATION */
  Mat mean, stddev;
  {/* Isolate the Hue */
    Mat tmpImg;
    vector<Mat> tmp_dim;
    cvtColor ( square.getImg(), tmpImg, CV_BGR2HSV_FULL );
    split( tmpImg, tmp_dim );
    tmpImg = tmp_dim[0];
    meanStdDev ( tmpImg, mean, stddev );
  }

  /*
   * Range will be -+ 1 standard deviation. This has aprox 68% of the data
   * (http://en.wikipedia.org/wiki/Standard_deviation)
   */
  Mat lowerb = mean - stddev;
  Mat upperb = mean + stddev;

  /* 2. CREATE A MASK FROM THE RANGE */
  Mat himg;
  {
    Mat tmpImg;
    vector<Mat> tmp_dim;
    cvtColor ( img, tmpImg, CV_BGR2HSV_FULL );
    split ( tmpImg, tmp_dim );
    himg = tmp_dim[0];
  }

  Mat mask = Mat::ones(img.rows, img.cols, CV_8UC1);
  inRange(himg, lowerb, upperb, mask);

  /* 3. SMOOTH STUFF USING MORPHOLOGY */
  {
    /*
     * Morphological open is 1.Erode and 2.Dilate. We use 1/4 of the sphere
     * diameter in the hope that its big enough to clean the noise, but not big
     * enough to remove the big sphere blob.
     */
    int openSize = pixSphDiam/4;
    Mat se = getStructuringElement ( MORPH_ELLIPSE, Size(openSize,openSize) );
    morphologyEx ( mask, mask, MORPH_OPEN, se );


    /*
     * We dilate with half of the sphere diameter and hope for a blob
     * that is approx double the radius of the original blob. The edges are more
     * roundy this way.
     */
    int dilateSize = pixSphDiam/2;
    se = getStructuringElement ( MORPH_ELLIPSE,
                                 Size(dilateSize,dilateSize) );
    dilate ( mask, mask, se );
  }

  /* 4. DETECT THE CIRCLES */
  /* Play with the arguments for HoughCircles. */
  vector<Vec3f> circles;
  vector<ILAC_Sphere> spheres;
  int minCircDist = 3*pixSphDiam/2;

  GaussianBlur ( mask, mask, Size(15, 15), 2, 2 );
  HoughCircles ( mask, circles, CV_HOUGH_GRADIENT, 2, minCircDist, 100, 40);

  for( size_t i = 0; i < circles.size(); i++ )
  {
    Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
    int radius = cvRound(circles[i][2]);
    ILAC_Sphere temp ( &img, center, radius );
    spheres.push_back(temp);

    for ( int j = i ;
         j > 0 && spheres[j].getRadius() < spheres[j-1].getRadius() ; j-- )
      std::swap( spheres[j], spheres[j-1] );
  }

  if ( spheres.size() < 3 )
    throw ILACExLessThanThreeSpheres ();

  return spheres;
}
예제 #27
0
void MainWindow::generarDataSet(char* fileName,vector<pair<string,int> > imagesList){

    FILE* archivoCaracteristicas = fopen(fileName,"w");

    for(unsigned i = 0; i<imagesList.size();i++)
    {

        cout<<"Archivo numero:"<<i<<" --- " <<imagesList.at(i).first.data()<<endl;
        //ABRIR IMAGEN
        srcImage = imread(imagesList.at(i).first.data(),0);
        line(srcImage,Point(0,0),Point(0,srcImage.rows),Scalar(255),20);
        line(srcImage,Point(0,0),Point(srcImage.cols,0),Scalar(255),20);
        line(srcImage,Point(srcImage.cols,srcImage.rows),Point(srcImage.cols,0),Scalar(255),20);
        line(srcImage,Point(srcImage.cols,srcImage.rows),Point(0,srcImage.rows),Scalar(255),20);

        equalizeHist(srcImage,srcImageEqualizada);
        //CALCULAR THRESHOLD
        this->dstImageThresholdAdaptative = ControlPreprocesamiento::umbralAutomaticoAdaptativo(srcImage);
        this->dstImageThreshold = ControlPreprocesamiento::umbralAutomatico(srcImageEqualizada);

        //FILTRADO
        Mat BStructElement = getStructuringElement(CV_SHAPE_RECT,Size(2,2));
        morphologyEx(this->dstImageThresholdAdaptative, this->dstImageClose, CV_MOP_CLOSE, BStructElement,Point(-1,-1) ,2 );

        //SEGMENTACION
        Mat src = imread(imagesList.at(i).first);
        ControlSegmentacion::encontrarSegmentos(src,dstImageClose,dstImageSegmentacion,dstRectanguloEnvolvente);

        //ADELGAZAMIENTOsrcImage
        dstImageAdelgazada = Mat::zeros(dstImageClose.size(), CV_8UC1);
        dstImageClose.copyTo(dstImageAdelgazada);

        ControlPreprocesamiento::adelgazamiento(dstImageAdelgazada);

        dstImageAdelgazada.copyTo(dstImageRectanguloEnvolvente);
        //Se suman 5 pixeles de distancia a las medidas del rectangulo para darle espacio
        //alalgoritmo de busqueda de end-points
        if(dstRectanguloEnvolvente.x <= 5 || dstRectanguloEnvolvente.y <= 5 ) continue;
        dstRectanguloEnvolvente.height += 10;
        dstRectanguloEnvolvente.width += 10;
        dstRectanguloEnvolvente.x -= 5;
        dstRectanguloEnvolvente.y -= 5;

        rectangle(dstImageRectanguloEnvolvente,dstRectanguloEnvolvente,Scalar(255));

        //CALCULO CARACTERISTICAS
        cout<<"ancho "<<dstRectanguloEnvolvente.width<<endl;
        cout<<"alto "<<dstRectanguloEnvolvente.height<<endl;
        cout<<"x "<<dstRectanguloEnvolvente.x<<endl;
        cout<<"y "<<dstRectanguloEnvolvente.y<<endl;
        cout<<dstImageAdelgazada.rows<<endl;

        dstImageFinal = dstImageAdelgazada(dstRectanguloEnvolvente).clone();
        double relacionAnchoAlto = (double)dstImageFinal.cols/dstImageFinal.rows;

        vector<Point> endPoints;
        vector<Point> insersectPoints;
        ControlObtencionCaracteristicas::buscarPuntos(dstImageFinal,endPoints, insersectPoints);
        cout<<endPoints.size()<<endl;

        Mat dstImageMorph = ControlPreprocesamiento::morphImage(dstImageThreshold);
        vector<vector<Point> > contornos;

        contornos = ControlObtencionCaracteristicas::getContornos(dstImageMorph);
        vector<vector<double> > momentosHu = ControlObtencionCaracteristicas::getHuMoments(contornos);

    // POLIGONO ENVOLVENTE
       vector<Point > poligono = ControlObtencionCaracteristicas::getEnvolvingPolygon( contornos);
       Mat poligonoimagen = ControlObtencionCaracteristicas::getEnvolvingPolygonImage(srcImage, poligono);
       vector<vector<Point > > contornoPoligono = ControlObtencionCaracteristicas::getContornos(poligonoimagen);
           momentosHu = ControlObtencionCaracteristicas::getHuMoments(contornoPoligono);

     /*   cout<<momentosHu.at(0).at(0)<<","
            <<momentosHu.at(0).at(1)<<","
            <<momentosHu.at(0).at(2)<<","
            <<momentosHu.at(0).at(3)<<","
            <<momentosHu.at(0).at(4)<<","
            <<momentosHu.at(0).at(5)<<","
            <<momentosHu.at(0).at(6)<2<","
            <<endl;*/
        ///Determina a cual cuadrante pertenece cada punto
           int cuadEndPoints0 = 0,cuadEndPoints1 = 0, cuadEndPoints2 = 0, cuadEndPoints3 = 0,
                   cuadEndPoints4 = 0, cuadEndPoints5 = 0, cuadEndPoints6 = 0, cuadEndPoints7 = 0, cuadEndPoints8 = 0;
           int mitadX1 = int(dstImageFinal.cols/3);
           int mitadX2 = int(2*dstImageFinal.cols/3);
           int mitadY1 = int(dstImageFinal.rows/3);
           int mitadY2 = int(2*dstImageFinal.rows/3);
           Point p;

           //Calcular cuantos endPoints hay en cada cuadrante
           for(unsigned c = 0; c< endPoints.size();c++){
               p = endPoints.at(c);
               //0
               if( p.x <  mitadX1 && p.y <  mitadY1 ){ cuadEndPoints0++; continue;}
               //1
               if( p.x <  mitadX2 && p.y <  mitadY1 ){ cuadEndPoints1++; continue;}
               //2
               if( p.x >= mitadX2 && p.y <  mitadY1 ){ cuadEndPoints2++; continue;}
               //3
               if( p.x <  mitadX1 && p.y <  mitadY2 ){ cuadEndPoints3++; continue;}
               //4
               if( p.x <  mitadX2 && p.y <  mitadY2 ){ cuadEndPoints4++; continue;}
               //5
               if( p.x >= mitadX2 && p.y <  mitadY2 ){ cuadEndPoints5++; continue;}
               //6
               if( p.x <  mitadX1 && p.y >= mitadY2 ){ cuadEndPoints6++; continue;}
               //7
               if( p.x <  mitadX2 && p.y >= mitadY2 ){ cuadEndPoints7++; continue;}
               //8
               if( p.x >= mitadX2 && p.y >= mitadY2 ){ cuadEndPoints8++; continue;}
           }

           int cuadInterPoints0 = 0,cuadInterPoints1 = 0, cuadInterPoints2 = 0, cuadInterPoints3 = 0,
                   cuadInterPoints4 = 0, cuadInterPoints5 = 0, cuadInterPoints6 = 0, cuadInterPoints7 = 0, cuadInterPoints8 = 0;


           for(unsigned z = 0; z< insersectPoints.size();z++)
           {
               p = insersectPoints.at(z);
               //0
               if( p.x <  mitadX1 && p.y <  mitadY1 ){ cuadInterPoints0=1; continue;}
               //1
               if( p.x <  mitadX2 && p.y <  mitadY1 ){ cuadInterPoints1=1; continue;}
               //2
               if( p.x >= mitadX2 && p.y <  mitadY1 ){ cuadInterPoints2=1; continue;}
               //3
               if( p.x <  mitadX1 && p.y <  mitadY2 ){ cuadInterPoints3=1; continue;}
               //4
               if( p.x <  mitadX2 && p.y <  mitadY2 ){ cuadInterPoints4=1; continue;}
               //5
               if( p.x >= mitadX2 && p.y <  mitadY2 ){ cuadInterPoints5=1; continue;}
               //6
               if( p.x <  mitadX1 && p.y >= mitadY2 ){ cuadInterPoints6=1; continue;}
               //7
               if( p.x <  mitadX2 && p.y >= mitadY2 ){ cuadInterPoints7=1; continue;}
               //8
               if( p.x >= mitadX2 && p.y >= mitadY2 ){ cuadInterPoints8=1; continue;}
           }


        fprintf(archivoCaracteristicas,"%f;%f;%f;%f;%f;%f;%f;%f;%d;%d;%d;%d;%d;%d;%d;%d;%d;%d;%d;%d;%d\n",
              momentosHu.at(0).at(0),
              momentosHu.at(0).at(1),
              momentosHu.at(0).at(2),
              momentosHu.at(0).at(3),
              momentosHu.at(0).at(4),
              momentosHu.at(0).at(5),
              momentosHu.at(0).at(6),
              relacionAnchoAlto,
              endPoints.size(),//
              cuadEndPoints0,
              cuadEndPoints1,
              cuadEndPoints2,
              cuadEndPoints3,
              cuadEndPoints4,
              cuadEndPoints5,
              cuadEndPoints6,
              cuadEndPoints7,
              cuadEndPoints8,
//              cuadInterPoints0,
//              cuadInterPoints1,
//              cuadInterPoints2,
//              cuadInterPoints3,
//              cuadInterPoints4,
//              cuadInterPoints5,
//              cuadInterPoints6,
//              cuadInterPoints7,
//              cuadInterPoints8,
              contornos.size(),
              poligono.size(),
              imagesList.at(i).second);

    }

    fclose(archivoCaracteristicas);
}
vector<Placa> RegionPlaca::segmento(Mat input){
	vector<Placa> output;// creamos un array de clases de tipo Placa

	//Transformamos la imagen a escala de grises
	Mat img_gray;
	cvtColor(input, img_gray, CV_BGR2GRAY);
	blur(img_gray, img_gray, Size(5, 5));

	//Para encontrar las lineas verticales de la placa, se debe resaltar las lineas
	Mat img_sobel;
	Sobel(img_gray, img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT);
	if (showSteps)
		imshow("Sobel", img_sobel);

	//Se binariza la imagen
	Mat img_threshold;
	threshold(img_sobel, img_threshold, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
	if (showSteps)
		imshow("Threshold", img_threshold);

	//Se hace un barrido de las lineas verticales y horizontales
	Mat element = getStructuringElement(MORPH_RECT, Size(20, 5));
	morphologyEx(img_threshold, img_threshold, CV_MOP_CLOSE, element);
	if (showSteps)
		imshow("Close", img_threshold);

	//Encontramos todas las posibles regiones de placas de autos
	vector< vector< Point> > regiones;
	findContours(img_threshold,
		regiones, // Array de regiones
		CV_RETR_EXTERNAL, // retrieve the external contours
		CV_CHAIN_APPROX_NONE); // all pixels of each contours

	//Empezamos a analizar las regiones una por una
	vector<vector<Point> >::iterator itc = regiones.begin();
	vector<RotatedRect> rects; // Rectangulos dentro de los limites indicados


	//Eliminamos las regiones que no se encuentran dentro de los limites permitidos.    
	while (itc != regiones.end()) {
		//Create bounding rect of object
		RotatedRect angRect = minAreaRect(Mat(*itc));
		if (!verificarTamaño(angRect, false)){
			itc = regiones.erase(itc);
		}
		else{
			++itc;
			rects.push_back(angRect);
		}
	}

	// Pintamos de color azul las regiones que se encuentran dentro de los limites
	cv::Mat resultado;
	input.copyTo(resultado); // copiamos la imagen de entrada en la variable 'resultado'
	cv::drawContours(resultado, regiones,
		-1, // pintamos todos los contornos
		cv::Scalar(255, 0, 0), // asignamos que color
		1, // Pintamos los contornos que encierran otros contornos
		16);// Grosor de las lineas

	if (showSteps)
		imshow("ContornosAzul", resultado);

	
	for (int i = 0; i< rects.size(); i++){
		std::string cadena = "";

		cadena = static_cast<std::ostringstream*>(&(std::ostringstream() << i))->str();
		//For better rect cropping for each posible box
		//Make floodfill algorithm because the plate has white background
		//And then we can retrieve more clearly the contour box
		circle(resultado, rects[i].center, 3, Scalar(0, 255, 0), -1);
		//get the min size between width and height
		float minSize = (rects[i].size.width < rects[i].size.height) ? rects[i].size.width : rects[i].size.height;
		minSize = minSize - minSize*0.5;
		//initialize rand and get 5 points around center for floodfill algorithm
		srand(time(NULL));
		//Initialize floodfill parameters and variables
		Mat mask;
		mask.create(input.rows + 2, input.cols + 2, CV_8UC1);
		mask = Scalar::all(0);
		int loDiff = 30;
		int upDiff = 30;
		int connectivity = 4;
		int newMaskVal = 255;
		int NumSeeds = 10;
		Rect ccomp;
		int flags = connectivity + (newMaskVal << 8) + CV_FLOODFILL_FIXED_RANGE + CV_FLOODFILL_MASK_ONLY;
		for (int j = 0; j<NumSeeds; j++){
			Point seed;
			seed.x = rects[i].center.x + rand() % (int)minSize - (minSize / 2);
			seed.y = rects[i].center.y + rand() % (int)minSize - (minSize / 2);
			circle(resultado, seed, 1, Scalar(0, 255, 255), -1);
			int area = floodFill(input, mask, seed, Scalar(255, 0, 0), &ccomp, Scalar(loDiff, loDiff, loDiff), Scalar(upDiff, upDiff, upDiff), flags);
		}
		if (showSteps)
			imshow("MASK" + cadena, mask);
		//cvWaitKey(0);

		//Check new floodfill mask match for a correct patch.
		//Get all points detected for get Minimal rotated Rect
		vector<Point> pointsInterest;
		Mat_<uchar>::iterator itMask = mask.begin<uchar>();
		Mat_<uchar>::iterator end = mask.end<uchar>();
		for (; itMask != end; ++itMask)
			if (*itMask == 255)
				pointsInterest.push_back(itMask.pos());

		RotatedRect minRect = minAreaRect(pointsInterest);

		//imshow("Rotated minRECT" + cadena, result);

		if (verificarTamaño (minRect, true)){
			// rotated rectangle drawing 
			Point2f rect_points[4]; minRect.points(rect_points);
			for (int j = 0; j < 4; j++)
				line(resultado, rect_points[j], rect_points[(j + 1) % 4], Scalar(0, 0, 255), 1, 8);

			//imshow("Rotated mminRECT SEGUNDO" + cadena, result);
			//Get rotation matrix
			float r = (float)minRect.size.width / (float)minRect.size.height;
			float angle = minRect.angle;
			if (r<1)
				angle = 90 + angle;
			Mat rotmat = getRotationMatrix2D(minRect.center, angle, 1);

			//Create and rotate image
			Mat img_rotated;
			warpAffine(input, img_rotated, rotmat, input.size(), CV_INTER_CUBIC);

			//Se corta la imagen de la placa identificada 
			Size rect_size = minRect.size;
			if (r < 1)
				swap(rect_size.width, rect_size.height);
			Mat img_crop;
			getRectSubPix(img_rotated, rect_size, minRect.center, img_crop);
			if (showSteps)
				imshow("imgCrop" + cadena, img_crop);

			Mat resultResized;
			resultResized.create(33, 144, CV_8UC3);
			resize(img_crop, resultResized, resultResized.size(), 0, 0, INTER_CUBIC);
			//Se convierte a escala de grises la imagen cortada
			Mat grayResult;
			cvtColor(resultResized, grayResult, CV_BGR2GRAY);
			blur(grayResult, grayResult, Size(3, 3));
			grayResult = c1Bgr(grayResult);
			if (showSteps){
				stringstream ss(stringstream::in | stringstream::out);
				ss << "tmp/" << nombreArchivo << "_" << i << ".jpg";

				vector<int> compression_params;
				compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
				compression_params.push_back(9);

				bool success = imwrite("C:/Users/gian/Documents/Visual Studio 2013/Projects/PlacaRNA/CaractSVM/" + ss.str(), grayResult, compression_params);
				if (success)
					cout << ss.str() << endl;
			}
			output.push_back(Placa(grayResult, minRect.boundingRect()));
		}
	}
	if (showSteps)
		imshow("Contours", resultado);

	return output;
}
int Pretreatment::Reconstruction(const Mat &binaryMat, Mat &morphologyMat)
{
	Mat element = getStructuringElement( MORPH_RECT, Size(9,9) );
	morphologyEx( binaryMat, morphologyMat,	MORPH_OPEN, element, Point(-1,-1), 1 );
	return 0;
}
예제 #30
0
int main(int argc,char**argv)
{
	int scale = 1;
	int delta = 0;
	int ddepth = CV_16S;
//	check the number of parameter
	if(argc !=2)
	{	
		printf("please follow like this\n");
		printf("exe[] img_name\n");
		return -1;
	}
//	reads image
	img_src = imread(argv[1]);
//	check whether read operation is ok or not 
	if(img_src.data == NULL)
	{	
		printf("could not open or find the image!\n");
		return -1;
	}
//	use Gaussian blur to reduce the noise
	GaussianBlur(img_src,img_src,Size(3,3),0,0,BORDER_DEFAULT);

//	convert source image to gray image
	cvtColor(img_src,img_gray,CV_BGR2GRAY);
//	sobel in x direction
	Sobel(img_gray,grad_x,ddepth,1,0,3,scale,delta,BORDER_DEFAULT);
	convertScaleAbs(grad_x,abs_grad_x);

//	use sobel in y direction
	Sobel(img_gray,grad_y,ddepth,0,1,3,scale,delta,BORDER_DEFAULT);
	convertScaleAbs(grad_y,abs_grad_y);
//	add weight,and 
	addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0,grad);

//	use threshold to binarition and threshold select use the OTSU method
	threshold(grad,img_bin_thre,0,255,THRESH_BINARY|THRESH_OTSU);
//	first Dilate,second erode
	Mat element = getStructuringElement(MORPH_RECT,Size(2*1+1,2*1+1),Point(-1,-1));
	for(int i = 0;i < 3; i++)
	{
		morphologyEx(img_bin_thre,img_bin_thre,MORPH_OPEN,element);
		morphologyEx(img_bin_thre,img_bin_thre,MORPH_CLOSE,element);
	}
//	origin method ,this is worse than morphologyEx 
	
//	dilate(img_bin_thre,img_bin_thre,element);
//	namedWindow("dilated",CV_WINDOW_NORMAL);
//	imshow("dilated",img_bin_thre);
//	erode(img_bin_thre,img_bin_thre,element);
//	namedWindow("erode",CV_WINDOW_NORMAL);
//	imshow("erode",img_bin_thre);

//	find contour,in here must use the binarition image
//	define 
	vector<Vec4i> hierarchy;
	vector< vector<Point> >contours;
//	use function
	findContours(img_bin_thre,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE,Point(0,0));
//	please change min and the max area value based on reality
	int min_area = 100000;
	int max_area = 300000;
	Rect mRect;
	int tempArea;
//	define the color drawing contour
	Scalar color = Scalar(255,255,0);
	Mat drawing = Mat::zeros(img_bin_thre.size(),CV_8UC1);
	for(int i = 0;i < contours.size();i++)
	{
//	get the minimum rectangle of the contours
		mRect = boundingRect(contours[i]);
//	computer the square of mRect
		tempArea = mRect.height * mRect.width;
//	for debug
//		printf("tempArea.height:%d\ttempArea.width:%d\ttempArea.area=%d\n",mRect.height,mRect.width,tempArea);
//	filter area which meet the requirement
		if(((double)mRect.width/(double)mRect.height) > 2.0 && (tempArea > min_area) && ((double)mRect.width/(double)mRect.height < 4) && (tempArea < max_area))
//	draw contours
		{
			drawContours(drawing,contours,i,color,2,8,hierarchy);
//	here use 2 image ,one is just from image which be processed by threshold,the other is the original gray image,if you just use first,you 
//	may not 
			getRectSubPix(img_bin_thre,Size(mRect.width,mRect.height),Point(mRect.x+mRect.width/2,mRect.y\
					 +mRect.height/2),img_get_rect);	
			getRectSubPix(img_gray,Size(mRect.width,mRect.height),Point(mRect.x+mRect.width/2,mRect.y\
					 +mRect.height/2),img_get_rect_new);
		}
	}
	if(img_get_rect.data == NULL)
	{
		printf("img_get rect is null\n");
		return -1;
	}
	if(img_get_rect_new.data == NULL)
	{
		printf("img_get_rect_new is null!\n");
		return -1;
	}

//	use the HoughLinesP

//	define lines
	vector<Vec4i> lines;
//	Mat color_dst;
//	img_lines = img_get_rect.clone();
	cvtColor(img_get_rect,img_lines,CV_GRAY2BGR);
//	check the line in image img_get_rect
	HoughLinesP(img_get_rect,lines,1,CV_PI/180,200,200,10);
	printf("lines.size()=%d\n",lines.size());
	
	int distance = 0;
//	int theta;
	double temp_slope = 0,slope;
	int res_x1,res_y1,res_x2,res_y2;
//	define map vector for computer the line used frequency
//	vector <int,int> ivect;//first is the number of this line , next is the longest distance 
//	map <double,ivect> imap;
	int delta_x,delta_y;

	
	std::vector <dou_int> ivec;
	std::vector <dou_int>::iterator iter;

	for(size_t i = 0;i < lines.size();i++)
	{
		Vec4i l = lines[i];
		line(img_lines,Point(l[0],l[1]),Point(l[2],l[3]),Scalar(0,0,255),3);
//	find tilt angle
		if(l[2]-l[0] == 0)
			;
		else
		{
//	computer this line 's slope
//	delta_x / delta_y
			delta_y = (l[3]-l[1]);
			delta_x = (l[2]-l[0]);
			
			distance = delta_y*delta_y+delta_x*delta_x;
			temp_slope = ((double)delta_y)/((double)(delta_x));
			printf("in i=%d,delta_y=%d,delta_x=%d\n",i,delta_y,delta_x);

			for(iter = ivec.begin();iter != ivec.end();iter++)
			{
//	if in one line,num++,update the max length
				if(abs(iter->slope - temp_slope) < (double)0.01)
				{
					iter->num++;
					if(iter->maxlength < distance)
					{
						iter->maxlength = distance;
						iter->v0 = Point(l[0],l[1]);
						iter->v1 = Point(l[2],l[3]);
					}
					break;
				}
			}
//	not find this slope ,must add it by hand 
			if(iter == ivec.end())
			{
				ivec.push_back(dou_int(temp_slope,distance,1,Point(l[0],l[1]),Point(l[2],l[3])));	
			}
		}
	}
	int max = 0;
	int j = 0;
	int index = 0;
	dou_int res;

	for(j=0,iter = ivec.begin();iter != ivec.end();j++,iter++)
	{
		if(iter->num > max)
		{
			max = iter->num;
			index = j;
		}
	}
	printf("index is %d\n",index);
	for(j=0,iter = ivec.begin();iter != ivec.end() && j <= index;j++,iter++)
	{
		if(j == index)
		{
			res = dou_int(iter->slope,iter->maxlength,iter->num,iter->v0,iter->v1);
			printf("slope is %f\n",iter->slope);
			break;
		}
	}
//	drawing the tilt line
	line(img_lines,res.v0,res.v1,Scalar(255,255,0),1);


	Mat img_lines_out;
	Point center = Point(img_lines.cols/2,img_lines.rows/2);
	double angle =(double)(180/CV_PI)*(double)atan(res.slope);
	printf("angle is :%f\n",angle);
	Mat rot_mat = getRotationMatrix2D(center,angle,1.0);
	warpAffine(img_lines,img_lines_out,rot_mat,img_lines.size());
	Mat img_rect;
	warpAffine(img_get_rect_new,img_rect,rot_mat,img_get_rect_new.size());

	cvtColor(img_lines_out,img_lines_out,CV_BGR2GRAY);
	printf("img_clip's channel is:%d\n",img_lines_out.channels());
	threshold(img_lines_out,img_lines_out,10,255,THRESH_BINARY | THRESH_OTSU);

	Mat img_clip;
	int up,down;

	if(-1 != remove_Border_Vertical(img_lines_out,up,down))
	{
		printf("up=%d,down=%d\n",up,down);
		getRectSubPix(img_lines_out,Size(img_lines_out.cols,down-up),Point(img_lines_out.cols/2,up+(down-up)/2),img_clip);
		namedWindow("line_clip",CV_WINDOW_NORMAL);
		imshow("line_clip",img_clip);
		getRectSubPix(img_rect,Size(img_rect.cols,down-up),Point(img_rect.cols/2,up+(down-up)/2),img_clip);
		namedWindow("new_clip",CV_WINDOW_NORMAL);
		imshow("new_clip",img_clip);
	}
//	binarition OTSU
	threshold(img_clip,img_clip,10,255,THRESH_BINARY | THRESH_OTSU);
	namedWindow("newrect",CV_WINDOW_NORMAL);
	imshow("newrect",img_clip);

	parting_char(img_clip);
	
	waitKey(0);
	return 0;
}