コード例 #1
0
void CharacterSegmenter::filterEdgeBoxes(vector<Mat> thresholds, const vector<Rect> charRegions, float avgCharWidth, float avgCharHeight)
{
  const float MIN_ANGLE_FOR_ROTATION = 0.4;
  int MIN_CONNECTED_EDGE_PIXELS = (avgCharHeight * 1.5);
  
  // Sometimes the rectangle won't be very tall, making it impossible to detect an edge
  // Adjust for this here.
  int alternate = thresholds[0].rows * 0.92;
  if (alternate < MIN_CONNECTED_EDGE_PIXELS && alternate > avgCharHeight)
    MIN_CONNECTED_EDGE_PIXELS = alternate;
  
  // 
  // Pay special attention to the edge boxes.  If it's a skinny box, and the vertical height extends above our bounds... remove it.
  //while (charBoxes.size() > 0 && charBoxes[charBoxes.size() - 1].width < MIN_SEGMENT_WIDTH_EDGES)
  //  charBoxes.erase(charBoxes.begin() + charBoxes.size() - 1);
  // Now filter the "edge" boxes.  We don't want to include skinny boxes on the edges, since these could be plate boundaries
  //while (charBoxes.size() > 0 && charBoxes[0].width < MIN_SEGMENT_WIDTH_EDGES)
  //  charBoxes.erase(charBoxes.begin() + 0);  
  
    
  // TECHNIQUE #1
  // Check for long vertical lines.  Once the line is too long, mask the whole region
  
  if (charRegions.size() <= 1)
    return;
  
  // Check both sides to see where the edges are
  // The first starts at the right edge of the leftmost char region and works its way left
  // The second starts at the left edge of the rightmost char region and works its way right.
  // We start by rotating the threshold image to the correct angle
  // then check each column 1 by 1.
    
  vector<int> leftEdges;
  vector<int> rightEdges;
  
  for (int i = 0; i < thresholds.size(); i++)
  {
    Mat rotated;
    
    if (top.angle > MIN_ANGLE_FOR_ROTATION)
    {
      // Rotate image:
      rotated = Mat(thresholds[i].size(), thresholds[i].type());
      Mat rot_mat( 2, 3, CV_32FC1 );
      Point center = Point( thresholds[i].cols/2, thresholds[i].rows/2 );
      
      rot_mat = getRotationMatrix2D( center, top.angle, 1.0 );
      warpAffine( thresholds[i], rotated, rot_mat, thresholds[i].size() );
    }
    else
    {
	rotated = thresholds[i];
    }

    int leftEdgeX = 0;
    int rightEdgeX = rotated.cols;
    // Do the left side
    int col = charRegions[0].x + charRegions[0].width;
    while (col >= 0)
    {
      
      int rowLength = getLongestBlobLengthBetweenLines(rotated, col);
      
      if (rowLength > MIN_CONNECTED_EDGE_PIXELS)
      {
	leftEdgeX = col;
	break;
      }
      
      col--;
    }
    
    col = charRegions[charRegions.size() - 1].x;
    while (col < rotated.cols)
    {
      
      int rowLength = getLongestBlobLengthBetweenLines(rotated, col);
      
      if (rowLength > MIN_CONNECTED_EDGE_PIXELS)
      {
	rightEdgeX = col;
	break;
      }
      col++;
    }
    
    
    if (leftEdgeX != 0)
      leftEdges.push_back(leftEdgeX);
    if (rightEdgeX != thresholds[i].cols)
      rightEdges.push_back(rightEdgeX);
    
  }
  
  int leftEdge = 0;
  int rightEdge = thresholds[0].cols;
  
  // Assign the edge values to the SECOND closest value
  if (leftEdges.size() > 1)
  {
    sort (leftEdges.begin(), leftEdges.begin()+leftEdges.size());
    leftEdge = leftEdges[leftEdges.size() - 2] + 1;
  }
  if (rightEdges.size() > 1)
  {
    sort (rightEdges.begin(), rightEdges.begin()+rightEdges.size());
    rightEdge = rightEdges[1] - 1;
  }
  
  if (leftEdge != 0 || rightEdge != thresholds[0].cols)
  {
    Mat mask = Mat::zeros(thresholds[0].size(), CV_8U);
    rectangle(mask, Point(leftEdge, 0), Point(rightEdge, thresholds[0].rows), Scalar(255,255,255), -1);
    
    if (top.angle > MIN_ANGLE_FOR_ROTATION)
    {
      // Rotate mask:
      Mat rot_mat( 2, 3, CV_32FC1 );
      Point center = Point( mask.cols/2, mask.rows/2 );
      
      rot_mat = getRotationMatrix2D( center, top.angle * -1, 1.0 );
      warpAffine( mask, mask, rot_mat, mask.size() );
    }

    
    // If our edge mask covers more than x% of the char region, mask the whole thing...
    const float MAX_COVERAGE_PERCENT = 0.6;
    int leftCoveragePx = leftEdge - charRegions[0].x;
    float leftCoveragePercent = ((float) leftCoveragePx) / ((float) charRegions[0].width);
    float rightCoveragePx = (charRegions[charRegions.size() -1].x + charRegions[charRegions.size() -1].width) - rightEdge;
    float rightCoveragePercent = ((float) rightCoveragePx) / ((float) charRegions[charRegions.size() -1].width);
    if ((leftCoveragePercent > MAX_COVERAGE_PERCENT) ||
      (charRegions[0].width - leftCoveragePx < config->segmentationMinBoxWidthPx))
    {
      rectangle(mask, charRegions[0], Scalar(0,0,0), -1);	// Mask the whole region
      if (this->config->debugCharSegmenter)
	cout << "Edge Filter: Entire left region is erased" << endl;
    }
    if ((rightCoveragePercent > MAX_COVERAGE_PERCENT) ||
      (charRegions[charRegions.size() -1].width - rightCoveragePx < config->segmentationMinBoxWidthPx))
    {
      rectangle(mask, charRegions[charRegions.size() -1], Scalar(0,0,0), -1);
      if (this->config->debugCharSegmenter)
	cout << "Edge Filter: Entire right region is erased" << endl;
    }
    
    for (int i = 0; i < thresholds.size(); i++)
    {
      bitwise_and(thresholds[i], mask, thresholds[i]);
    }
    
    
    if (this->config->debugCharSegmenter)
    {
      cout << "Edge Filter: left=" << leftEdge << " right=" << rightEdge << endl;  
      Mat bordered = addLabel(mask, "Edge Filter #1");
      imgDbgGeneral.push_back(bordered);
      
      Mat invertedMask(mask.size(), mask.type());
      bitwise_not(mask, invertedMask);
      for (int z = 0; z < imgDbgCleanStages.size(); z++)
	fillMask(imgDbgCleanStages[z], invertedMask, Scalar(0,0,255));
    }
  }
  
  // TECHNIQUE #2
  // Check for tall skinny blobs on the edge boxes.  If they're too long and skinny, maks the whole char region
  /*
   * 
  float MIN_EDGE_CONTOUR_HEIGHT = avgCharHeight * 0.7;
  float MIN_EDGE_CONTOUR_AREA_PCT = avgCharHeight * 0.1;
   
  for (int i = 0; i < thresholds.size(); i++)
  {
      // Just check the first and last char box.  If the contour extends too far above/below the line.  Drop it.
      
      for (int boxidx = 0; boxidx < charRegions.size(); boxidx++)
      {
	if (boxidx != 0 || boxidx != charRegions.size() -1)
	{
	  // This is a middle box.  we never want to filter these here.
	  continue;
	}
	
	vector<vector<Point> > contours;
	Mat mask = Mat::zeros(thresholds[i].size(),CV_8U);
	rectangle(mask, charRegions[boxidx], Scalar(255,255,255), CV_FILLED);
	
	bitwise_and(thresholds[i], mask, mask);
	findContours(mask, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
	//int tallContourIndex = isSkinnyLineInsideBox(thresholds[i], charRegions[boxidx], allContours[i], hierarchy[i], avgCharWidth, avgCharHeight);
	float tallestContourHeight = 0;
	float fattestContourWidth = 0;
	float biggestContourArea = 0;
	for (int c = 0; c < contours.size(); c++)
	{
	    Rect r = boundingRect(contours[c]);
	    if (r.height > tallestContourHeight)
	      tallestContourHeight = r.height;
	    if (r.width > fattestContourWidth)
	      fattestContourWidth = r.width;
	    float a = r.area();
	    if (a > biggestContourArea)
	      biggestContourArea = a;
	}
	
	float minArea = charRegions[boxidx].area() * MIN_EDGE_CONTOUR_AREA_PCT;
	if ((fattestContourWidth < MIN_BOX_WIDTH_PX) || 
	  (tallestContourHeight < MIN_EDGE_CONTOUR_HEIGHT) ||
	  (biggestContourArea < minArea)
	)
	{
	  // Find a good place to MASK this contour.
	  // for now, just mask the whole thing
	  if (this->debug)
	  {
	    
	    rectangle(imgDbgCleanStages[i], charRegions[boxidx], COLOR_DEBUG_EDGE, 2);
	    cout << "Edge Filter: threshold " << i << " box " << boxidx << endl;
	  }
	  rectangle(thresholds[i], charRegions[boxidx], Scalar(0,0,0), -1);
	}
	else
	{
	  filteredCharRegions.push_back(charRegions[boxidx]);  
	}
      }
  }
  */
  
}
コード例 #2
0
ファイル: detectormorph.cpp プロジェクト: Joelone/openalpr
  vector<PlateRegion> DetectorMorph::detect(Mat frame, std::vector<cv::Rect> regionsOfInterest) {

    Mat frame_gray,frame_gray_cp;

    if (frame.channels() > 2)
    {
      cvtColor( frame, frame_gray, CV_BGR2GRAY );
    }
    else
    {
      frame.copyTo(frame_gray);
    }

    frame_gray.copyTo(frame_gray_cp);
    blur(frame_gray, frame_gray, Size(5, 5));

    vector<PlateRegion> detectedRegions;
    for (int i = 0; i < regionsOfInterest.size(); i++) {
      Mat img_open, img_result;
      Mat element = getStructuringElement(MORPH_RECT, Size(30, 4));
      morphologyEx(frame_gray, img_open, CV_MOP_OPEN, element, cv::Point(-1, -1));

      img_result = frame_gray - img_open;

      if (config->debugDetector && config->debugShowImages) {
        imshow("Opening", img_result);
      }

      //threshold image using otsu thresholding
      Mat img_threshold, img_open2;
      threshold(img_result, img_threshold, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);

      if (config->debugDetector && config->debugShowImages) {
        imshow("Threshold Detector", img_threshold);
      }

      Mat diamond(5, 5, CV_8U, cv::Scalar(1));

	diamond.at<uchar>(0, 0) = 0;
	diamond.at<uchar>(0, 1) = 0;
	diamond.at<uchar>(1, 0) = 0;
	diamond.at<uchar>(4, 4) = 0;
	diamond.at<uchar>(3, 4) = 0;
	diamond.at<uchar>(4, 3) = 0;
	diamond.at<uchar>(4, 0) = 0;
	diamond.at<uchar>(4, 1) = 0;
	diamond.at<uchar>(3, 0) = 0;
	diamond.at<uchar>(0, 4) = 0;
	diamond.at<uchar>(0, 3) = 0;
	diamond.at<uchar>(1, 4) = 0;
			
      morphologyEx(img_threshold, img_open2, CV_MOP_OPEN, diamond, cv::Point(-1, -1));
      Mat rectElement = getStructuringElement(cv::MORPH_RECT, Size(13, 4));
      morphologyEx(img_open2, img_threshold, CV_MOP_CLOSE, rectElement, cv::Point(-1, -1));

      if (config->debugDetector && config->debugShowImages) {
        imshow("Close", img_threshold);
        waitKey(0);
      }

      //Find contours of possibles plates
      vector< vector< Point> > contours;
      findContours(img_threshold,
              contours, // a vector of contours
              CV_RETR_EXTERNAL, // retrieve the external contours
              CV_CHAIN_APPROX_NONE); // all pixels of each contours

      //Start to iterate to each contour founded
      vector<vector<Point> >::iterator itc = contours.begin();
      vector<RotatedRect> rects;

      //Remove patch that are no inside limits of aspect ratio and area.    
      while (itc != contours.end()) {
        //Create bounding rect of object
        RotatedRect mr = minAreaRect(Mat(*itc));
        
        if (mr.angle < -45.) {
					mr.angle += 90.0;
					swap(mr.size.width, mr.size.height);
				}  
        
        if (!CheckSizes(mr))
          itc = contours.erase(itc);
        else {
          ++itc;
					rects.push_back(mr);
        }
      }

     //Now prunning based on checking all candidate plates for a min/max number of blobsc
Mat img_crop, img_crop_b, img_crop_th, img_crop_th_inv;
vector< vector< Point> > plateBlobs;
vector< vector< Point> > plateBlobsInv;
double thresholds[] = { 10, 40, 80, 120, 160, 200, 240 };
const int num_thresholds = 7;
int numValidChars = 0;
Mat rotated;
for (int i = 0; i < rects.size(); i++) {
	numValidChars = 0;
	RotatedRect PlateRect = rects[i];
	Size rect_size = PlateRect.size;

	// get the rotation matrix
	Mat M = getRotationMatrix2D(PlateRect.center, PlateRect.angle, 1.0);
	// perform the affine transformation
	warpAffine(frame_gray_cp, rotated, M, frame_gray_cp.size(), INTER_CUBIC);
	//Crop area around candidate plate
	getRectSubPix(rotated, rect_size, PlateRect.center, img_crop);

	 if (config->debugDetector && config->debugShowImages) {
		imshow("Tilt Correction", img_crop);
		waitKey(0);
	}

	for (int z = 0; z < num_thresholds; z++) {

		cv::threshold(img_crop, img_crop_th, thresholds[z], 255, cv::THRESH_BINARY);
		cv::threshold(img_crop, img_crop_th_inv, thresholds[z], 255, cv::THRESH_BINARY_INV);

		findContours(img_crop_th,
			plateBlobs, // a vector of contours
			CV_RETR_LIST, // retrieve the contour list
			CV_CHAIN_APPROX_NONE); // all pixels of each contours

		findContours(img_crop_th_inv,
			plateBlobsInv, // a vector of contours
			CV_RETR_LIST, // retrieve the contour list
			CV_CHAIN_APPROX_NONE); // all pixels of each contours

		int numBlobs = plateBlobs.size();
		int numBlobsInv = plateBlobsInv.size();
	
		float idealAspect = config->avgCharWidthMM / config->avgCharHeightMM;
		for (int j = 0; j < numBlobs; j++) {
			cv::Rect r0 = cv::boundingRect(cv::Mat(plateBlobs[j]));
			
			if (ValidateCharAspect(r0, idealAspect))
				numValidChars++;
		}

		for (int j = 0; j < numBlobsInv; j++) {
			cv::Rect r0 = cv::boundingRect(cv::Mat(plateBlobsInv[j]));
			if (ValidateCharAspect(r0, idealAspect))
				numValidChars++;
		}

	}
	//If too much or too lcittle might not be a true plate
	//if (numBlobs < 3 || numBlobs > 50) continue;
	if (numValidChars < 4  || numValidChars > 50) continue;

        PlateRegion PlateReg;

        // Ensure that the rectangle isn't < 0 or > maxWidth/Height
        Rect bounding_rect = PlateRect.boundingRect();
        PlateReg.rect = expandRect(bounding_rect, 0, 0, frame.cols, frame.rows);
        
        
        detectedRegions.push_back(PlateReg);

      }

    }
    
    return detectedRegions;
  }
コード例 #3
0
bool WallFollowing::Iterate()
{
	float angle = 0.0, coefficient_affichage = 45.0/*8.*/;
	float distance = 0.0, taille_pointeur;
	m_iterations++;

	m_map = Mat::zeros(LARGEUR_MAPPING, HAUTEUR_MAPPING, CV_8UC3);
	m_map = Scalar(255, 255, 255);
	
	std::vector<Point2f> points_obstacles;
	
	for(list<pair<float, float> >::const_iterator it = m_obstacles.begin() ; it != m_obstacles.end() ; it ++)
	{
		angle = it->first; 		// clef
		distance = it->second; 	// valeur
		
		//if(distance < 5.)
		if(distance < 0.25 || distance > 2.)
			continue;
		
		float x_obstacle = 0;
		float y_obstacle = 0;

		y_obstacle -= distance * cos(angle * M_PI / 180.0);
		x_obstacle += distance * sin(angle * M_PI / 180.0);
		
		// Filtrage des angles
		double angle_degre = MOOSRad2Deg(MOOS_ANGLE_WRAP(MOOSDeg2Rad(angle)));
		if(angle_degre > -160. && angle_degre < -70.)
		{
			points_obstacles.push_back(Point2f(x_obstacle, y_obstacle));
			
			x_obstacle *= -coefficient_affichage;
			y_obstacle *= coefficient_affichage;
			
			x_obstacle += LARGEUR_MAPPING / 2.0;
			y_obstacle += HAUTEUR_MAPPING / 2.0;
			
			// Pointeurs
			taille_pointeur = 3;
			line(m_map, Point(x_obstacle, y_obstacle - taille_pointeur), Point(x_obstacle, y_obstacle + taille_pointeur), Scalar(161, 149, 104), 1, 8, 0);
			line(m_map, Point(x_obstacle - taille_pointeur, y_obstacle), Point(x_obstacle + taille_pointeur, y_obstacle), Scalar(161, 149, 104), 1, 8, 0);
		}
	}
	
	int echelle_ligne = 150;
	Mat m(points_obstacles);
	
	if(!points_obstacles.empty())
	{
		Vec4f resultat_regression;
		
		try
		{
			// Méthode 1
			fitLine(m, resultat_regression, CV_DIST_L2, 0, 0.01, 0.01);
			float x0 = resultat_regression[2];
			float y0 = resultat_regression[3];
			float vx = resultat_regression[0];
			float vy = resultat_regression[1];
			// Affichage de l'approximation
			line(m_map, 
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage) + (vx * echelle_ligne), 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage) - (vy * echelle_ligne)),
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage) - (vx * echelle_ligne), 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage) + (vy * echelle_ligne)),
					Scalar(29, 133, 217), 1, 8, 0); // Orange
					
			// Méthode 2
			fitLine(m, resultat_regression, CV_DIST_L12, 0, 0.01, 0.01);
			x0 = resultat_regression[2];
			y0 = resultat_regression[3];
			vx = resultat_regression[0];
			vy = resultat_regression[1];
			// Affichage de l'approximation
			line(m_map, 
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage) + (vx * echelle_ligne), 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage) - (vy * echelle_ligne)),
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage) - (vx * echelle_ligne), 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage) + (vy * echelle_ligne)),
					Scalar(77, 130, 27), 1, 8, 0); // Vert
					
			// Méthode 3
			fitLine(m, resultat_regression, CV_DIST_L1, 0, 0.01, 0.01);
			x0 = resultat_regression[2];
			y0 = resultat_regression[3];
			vx = resultat_regression[0];
			vy = resultat_regression[1];
			// Affichage de l'approximation
			line(m_map, 
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage) + (vx * echelle_ligne), 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage) - (vy * echelle_ligne)),
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage) - (vx * echelle_ligne), 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage) + (vy * echelle_ligne)),
					Scalar(13, 13, 188), 1, 8, 0); // Rouge
			// Affichage de l'origine
			taille_pointeur = 6;
			line(m_map, 
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage), 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage) - taille_pointeur),
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage), 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage) + taille_pointeur),
					Scalar(9, 0, 130), 2, 8, 0);
			line(m_map, 
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage) - taille_pointeur, 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage)),
					Point((LARGEUR_MAPPING / 2.0) - (x0 * coefficient_affichage) + taille_pointeur, 
							(HAUTEUR_MAPPING / 2.0) + (y0 * coefficient_affichage)),
					Scalar(9, 0, 130), 2, 8, 0);
			
			angle = atan2(vy, vx);
			cout << "X0 : " << x0 << "\t\tY0 : " << y0 << endl;
			distance = abs(-vy*x0 + vx*y0);
			cout << "Angle : " << angle * 180.0 / M_PI << "\t\tDist : " << distance << endl;
			m_Comms.Notify("DIST_MUR", distance);

			if(m_regulate)
				computeAndSendCommands(angle, distance);
		}
		
		catch(Exception e) { }
		
		// Rotation
		Point2f src_center(m_map.cols/2.0F, m_map.rows/2.0F);
		Mat rot_mat = getRotationMatrix2D(src_center, 180.0, 1.0);
		warpAffine(m_map, m_map, rot_mat, m_map.size());
	}
		
	// Affichage des échelles circulaires
	char texte[50];
	float taille_texte = 0.4;
	Scalar couleur_echelles(220, 220, 220);
	for(float j = 1.0 ; j < 30.0 ; j ++)
	{
		float rayon = coefficient_affichage * j;
		circle(m_map, Point(LARGEUR_MAPPING / 2, HAUTEUR_MAPPING / 2), rayon, couleur_echelles, 1);
		sprintf(texte, "%dm", (int)j);
		rayon *= cos(M_PI / 4.0);
		putText(m_map, string(texte), Point((LARGEUR_MAPPING / 2) + rayon, (HAUTEUR_MAPPING / 2) - rayon), FONT_HERSHEY_SIMPLEX, taille_texte, couleur_echelles);
	}
	
	// Affichage de l'origine
	taille_pointeur = 20;
	line(m_map, Point(LARGEUR_MAPPING / 2, HAUTEUR_MAPPING / 2 - taille_pointeur * 1.5), Point(LARGEUR_MAPPING / 2, HAUTEUR_MAPPING / 2 + taille_pointeur), Scalar(150, 150, 150), 1, 8, 0);
	line(m_map, Point(LARGEUR_MAPPING / 2 - taille_pointeur, HAUTEUR_MAPPING / 2), Point(LARGEUR_MAPPING / 2 + taille_pointeur, HAUTEUR_MAPPING / 2), Scalar(150, 150, 150), 1, 8, 0);
	
	// Localisation des points de données
	line(m_map, Point(0, (HAUTEUR_MAPPING / 2) + HAUTEUR_MAPPING * sin(MOOSDeg2Rad(-70.))), Point(LARGEUR_MAPPING / 2, HAUTEUR_MAPPING / 2), Scalar(150, 150, 150), 1, 8, 0);
	line(m_map, Point(0, (HAUTEUR_MAPPING / 2) - HAUTEUR_MAPPING * sin(MOOSDeg2Rad(-160.))), Point(LARGEUR_MAPPING / 2, HAUTEUR_MAPPING / 2), Scalar(150, 150, 150), 1, 8, 0);
	
	// Affichage d'informations
	if(!points_obstacles.empty())
	{
		sprintf(texte, "Dist = %.2fm   Angle = %.2f", distance, angle);
		putText(m_map, string(texte), Point(10, HAUTEUR_MAPPING - 10), FONT_HERSHEY_SIMPLEX, taille_texte, Scalar(50, 50, 50));
	}
	
	imshow("Mapping", m_map);
	waitKey(1);
	
	return(true);
}
コード例 #4
0
void Calibration::applyRotation(){
	Mat aux;
	warpAffine(inputImage, aux, getRotationMatrix2D(Point2f(inputImage.cols/2, inputImage.rows/2), rotation, 1.0), inputImage.size());
	inputImage = aux;
}
コード例 #5
0
ファイル: lpr_v4.cpp プロジェクト: jefby/LPR_Project
int main(int argc,char**argv)
{
	int scale = 1;
	int delta = 0;
	int ddepth = CV_16S;
//	check the number of parameter
	if(argc !=2)
	{	
		printf("please follow like this\n");
		printf("exe[] img_name\n");
		return -1;
	}
//	reads image
	img_src = imread(argv[1]);
//	check whether read operation is ok or not 
	if(img_src.data == NULL)
	{	
		printf("could not open or find the image!\n");
		return -1;
	}
//	use Gaussian blur to reduce the noise
	GaussianBlur(img_src,img_src,Size(3,3),0,0,BORDER_DEFAULT);

//	convert source image to gray image
	cvtColor(img_src,img_gray,CV_BGR2GRAY);
//	sobel in x direction
	Sobel(img_gray,grad_x,ddepth,1,0,3,scale,delta,BORDER_DEFAULT);
	convertScaleAbs(grad_x,abs_grad_x);

//	use sobel in y direction
	Sobel(img_gray,grad_y,ddepth,0,1,3,scale,delta,BORDER_DEFAULT);
	convertScaleAbs(grad_y,abs_grad_y);
//	add weight,and 
	addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0,grad);

//	use threshold to binarition and threshold select use the OTSU method
	threshold(grad,img_bin_thre,0,255,THRESH_BINARY|THRESH_OTSU);
//	first Dilate,second erode
	Mat element = getStructuringElement(MORPH_RECT,Size(2*1+1,2*1+1),Point(-1,-1));
	for(int i = 0;i < 3; i++)
	{
		morphologyEx(img_bin_thre,img_bin_thre,MORPH_OPEN,element);
		morphologyEx(img_bin_thre,img_bin_thre,MORPH_CLOSE,element);
	}
//	origin method ,this is worse than morphologyEx 
	
//	dilate(img_bin_thre,img_bin_thre,element);
//	namedWindow("dilated",CV_WINDOW_NORMAL);
//	imshow("dilated",img_bin_thre);
//	erode(img_bin_thre,img_bin_thre,element);
//	namedWindow("erode",CV_WINDOW_NORMAL);
//	imshow("erode",img_bin_thre);

//	find contour,in here must use the binarition image
//	define 
	vector<Vec4i> hierarchy;
	vector< vector<Point> >contours;
//	use function
	findContours(img_bin_thre,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE,Point(0,0));
//	please change min and the max area value based on reality
	int min_area = 100000;
	int max_area = 300000;
	Rect mRect;
	int tempArea;
//	define the color drawing contour
	Scalar color = Scalar(255,255,0);
	Mat drawing = Mat::zeros(img_bin_thre.size(),CV_8UC1);
	for(int i = 0;i < contours.size();i++)
	{
//	get the minimum rectangle of the contours
		mRect = boundingRect(contours[i]);
//	computer the square of mRect
		tempArea = mRect.height * mRect.width;
//	for debug
//		printf("tempArea.height:%d\ttempArea.width:%d\ttempArea.area=%d\n",mRect.height,mRect.width,tempArea);
//	filter area which meet the requirement
		if(((double)mRect.width/(double)mRect.height) > 2.0 && (tempArea > min_area) && ((double)mRect.width/(double)mRect.height < 4) && (tempArea < max_area))
//	draw contours
		{
			drawContours(drawing,contours,i,color,2,8,hierarchy);
//	here use 2 image ,one is just from image which be processed by threshold,the other is the original gray image,if you just use first,you 
//	may not 
			getRectSubPix(img_bin_thre,Size(mRect.width,mRect.height),Point(mRect.x+mRect.width/2,mRect.y\
					 +mRect.height/2),img_get_rect);	
			getRectSubPix(img_gray,Size(mRect.width,mRect.height),Point(mRect.x+mRect.width/2,mRect.y\
					 +mRect.height/2),img_get_rect_new);
		}
	}
	if(img_get_rect.data == NULL)
	{
		printf("img_get rect is null\n");
		return -1;
	}
	if(img_get_rect_new.data == NULL)
	{
		printf("img_get_rect_new is null!\n");
		return -1;
	}

//	use the HoughLinesP

//	define lines
	vector<Vec4i> lines;
//	Mat color_dst;
//	img_lines = img_get_rect.clone();
	cvtColor(img_get_rect,img_lines,CV_GRAY2BGR);
//	check the line in image img_get_rect
	HoughLinesP(img_get_rect,lines,1,CV_PI/180,200,200,10);
	printf("lines.size()=%d\n",lines.size());
	
	int distance = 0;
//	int theta;
	double temp_slope = 0,slope;
	int res_x1,res_y1,res_x2,res_y2;
//	define map vector for computer the line used frequency
//	vector <int,int> ivect;//first is the number of this line , next is the longest distance 
//	map <double,ivect> imap;
	int delta_x,delta_y;

	
	std::vector <dou_int> ivec;
	std::vector <dou_int>::iterator iter;

	for(size_t i = 0;i < lines.size();i++)
	{
		Vec4i l = lines[i];
		line(img_lines,Point(l[0],l[1]),Point(l[2],l[3]),Scalar(0,0,255),3);
//	find tilt angle
		if(l[2]-l[0] == 0)
			;
		else
		{
//	computer this line 's slope
//	delta_x / delta_y
			delta_y = (l[3]-l[1]);
			delta_x = (l[2]-l[0]);
			
			distance = delta_y*delta_y+delta_x*delta_x;
			temp_slope = ((double)delta_y)/((double)(delta_x));
			printf("in i=%d,delta_y=%d,delta_x=%d\n",i,delta_y,delta_x);

			for(iter = ivec.begin();iter != ivec.end();iter++)
			{
//	if in one line,num++,update the max length
				if(abs(iter->slope - temp_slope) < (double)0.01)
				{
					iter->num++;
					if(iter->maxlength < distance)
					{
						iter->maxlength = distance;
						iter->v0 = Point(l[0],l[1]);
						iter->v1 = Point(l[2],l[3]);
					}
					break;
				}
			}
//	not find this slope ,must add it by hand 
			if(iter == ivec.end())
			{
				ivec.push_back(dou_int(temp_slope,distance,1,Point(l[0],l[1]),Point(l[2],l[3])));	
			}
		}
	}
	int max = 0;
	int j = 0;
	int index = 0;
	dou_int res;

	for(j=0,iter = ivec.begin();iter != ivec.end();j++,iter++)
	{
		if(iter->num > max)
		{
			max = iter->num;
			index = j;
		}
	}
	printf("index is %d\n",index);
	for(j=0,iter = ivec.begin();iter != ivec.end() && j <= index;j++,iter++)
	{
		if(j == index)
		{
			res = dou_int(iter->slope,iter->maxlength,iter->num,iter->v0,iter->v1);
			printf("slope is %f\n",iter->slope);
			break;
		}
	}
//	drawing the tilt line
	line(img_lines,res.v0,res.v1,Scalar(255,255,0),1);


	Mat img_lines_out;
	Point center = Point(img_lines.cols/2,img_lines.rows/2);
	double angle =(double)(180/CV_PI)*(double)atan(res.slope);
	printf("angle is :%f\n",angle);
	Mat rot_mat = getRotationMatrix2D(center,angle,1.0);
	warpAffine(img_lines,img_lines_out,rot_mat,img_lines.size());
	Mat img_rect;
	warpAffine(img_get_rect_new,img_rect,rot_mat,img_get_rect_new.size());

	cvtColor(img_lines_out,img_lines_out,CV_BGR2GRAY);
	printf("img_clip's channel is:%d\n",img_lines_out.channels());
	threshold(img_lines_out,img_lines_out,10,255,THRESH_BINARY | THRESH_OTSU);

	Mat img_clip;
	int up,down;

	if(-1 != remove_Border_Vertical(img_lines_out,up,down))
	{
		printf("up=%d,down=%d\n",up,down);
		getRectSubPix(img_lines_out,Size(img_lines_out.cols,down-up),Point(img_lines_out.cols/2,up+(down-up)/2),img_clip);
		namedWindow("line_clip",CV_WINDOW_NORMAL);
		imshow("line_clip",img_clip);
		getRectSubPix(img_rect,Size(img_rect.cols,down-up),Point(img_rect.cols/2,up+(down-up)/2),img_clip);
		namedWindow("new_clip",CV_WINDOW_NORMAL);
		imshow("new_clip",img_clip);
	}
//	binarition OTSU
	threshold(img_clip,img_clip,10,255,THRESH_BINARY | THRESH_OTSU);
	namedWindow("newrect",CV_WINDOW_NORMAL);
	imshow("newrect",img_clip);

	parting_char(img_clip);
	
	waitKey(0);
	return 0;
}
コード例 #6
0
ファイル: FRID.cpp プロジェクト: Nicatio/Nicatio
FRID::FRID(
		InputArray						src,
		int								type)
{

	uint _order=0;

	Mat _src = src.getMat();

	int w = _src.cols;
	int h = _src.rows;
	int sz;

	if (type == FRID_16)
		_order = 17;
	else if (type == FRID_24)
		_order = 25;
	else if (type == FRID_16F)
		_order = 17;
	else if (type == FRID_16F2)
			_order = 33;

	_type = type;

	//orderMap = new Mat[_order];

	if (type == FRID_16) {
		w4=w-4;
		h4=h-4;
		sz=w4*h4;
		for(uint i=0; i<_order; i++)
			Mat(_src, Rect(order_FRID16 [i][0], order_FRID16 [i][1], w4, h4)).copyTo(orderMap[i]);

	} else if (type == FRID_24) {
		w4=w-8;
		h4=h-8;
		sz=w4*h4;
		for(uint i=0; i<_order; i++)
			Mat(_src, Rect(order_FRID24 [i][0], order_FRID24 [i][1], w4, h4)).copyTo(orderMap[i]);
	} else if (type == FRID_16F) {
		//w4=w-4;
		//h4=h-4;
		w4=w;
		h4=h;
		sz=w4*h4;
		for(uint i=0; i<_order; i++) {

	    	Mat resampled;
			Mat affinematrix = (Mat_<float>(2,3) << 1, 0, order_FRID16F_cos[i]*7, 0, 1, order_FRID16F_sin[i]*7);// Mat(2,3,CV_32F,affinematrix_);
			//warpAffine(_src, resampled, affinematrix, _src.size(), INTER_LINEAR);
			warpAffine(_src, orderMap[i], affinematrix, _src.size(), INTER_LINEAR);
			//Mat(resampled, Rect(2, 2, w4, h4)).copyTo(orderMap[i]);

			stringstream d;
			d<<"br_"<<i<<".bmp";
			imwrite(d.str().c_str(),orderMap[i]);
		}
	} else if (type == FRID_16F2) {
		w4=w-4;
		h4=h-4;
		sz=w4*h4;
		for(uint i=0; i<16; i++) {

	    	Mat resampled;
			Mat affinematrix = (Mat_<float>(2,3) << 1, 0, order_FRID16F_cos[i]*3, 0, 1, order_FRID16F_sin[i]*3);// Mat(2,3,CV_32F,affinematrix_);
			warpAffine(_src, resampled, affinematrix, _src.size(), INTER_LINEAR);
			Mat(resampled, Rect(2, 2, w4, h4)).copyTo(orderMap[i]);
			//stringstream d;
			//d<<"br_"<<i<<".bmp";
			//imwrite(d.str().c_str(),orderMap[i]);
		}
		for(uint i=16; i<33; i++) {

			Mat resampled;
			Mat affinematrix = (Mat_<float>(2,3) << 1, 0, order_FRID16F_cos[i]*11, 0, 1, order_FRID16F_sin[i]*11);// Mat(2,3,CV_32F,affinematrix_);
			warpAffine(_src, resampled, affinematrix, _src.size(), INTER_LINEAR);
			Mat(resampled, Rect(2, 2, w4, h4)).copyTo(orderMap[i]);
			//stringstream d;
			//d<<"br_"<<i<<".bmp";
			//imwrite(d.str().c_str(),orderMap[i]);
		}
	}
}
コード例 #7
0
ファイル: aff_trans.cpp プロジェクト: 007Indian/opencv
void AffineTransformerImpl::warpImage(InputArray transformingImage, OutputArray output,
                                      int flags, int borderMode, const Scalar& borderValue) const
{
    CV_Assert(!affineMat.empty());
    warpAffine(transformingImage, output, affineMat, transformingImage.getMat().size(), flags, borderMode, borderValue);
}
コード例 #8
0
ファイル: mapstitch.cpp プロジェクト: Hoopsel/aau_multi_robot
StitchedMap::StitchedMap(Mat &img1, Mat &img2, int max_trans, int max_rotation, float max_pairwise_distance, cv::Mat oldTransform)
{
  // load images, TODO: check that they're grayscale
  image1 = img1.clone();
  image2 = img2.clone();
  if(image1.size != image2.size)
      cv::resize(image2,image2,image1.size());
  works = true;
  // create feature detector set.
  OrbFeatureDetector detector;
  OrbDescriptorExtractor dexc;
  BFMatcher dematc(NORM_HAMMING, false);

  // 1. extract keypoint          s
  detector.detect(image1, kpv1);
  detector.detect(image2, kpv2);

  // 2. extract descriptors
  dexc.compute(image1, kpv1, dscv1);
  dexc.compute(image2, kpv2, dscv2);

  // 3. match keypoints
  if(kpv1.size() == 0|| kpv2.size() == 0)
  {
      ROS_WARN("No KPV");
      works = false;
      return;
  }
//  ROS_INFO("Kpv1:%i entries\t Kpv2:%i entries",kpv1.size(),kpv2.size());
  dematc.match(dscv1, dscv2, matches);

  // 4. find matching point pairs with same distance in both images
  for (size_t i=0; i<matches.size(); i++) {
    KeyPoint a1 = kpv1[matches[i].queryIdx],
             b1 = kpv2[matches[i].trainIdx];

    if (matches[i].distance > 30)
      continue;

    for (size_t j=0; j<matches.size(); j++) {
      KeyPoint a2 = kpv1[matches[j].queryIdx],
               b2 = kpv2[matches[j].trainIdx];

      if (matches[j].distance > 30)
        continue;

      if ( fabs(norm(a1.pt-a2.pt) - norm(b1.pt-b2.pt)) > max_pairwise_distance ||
           fabs(norm(a1.pt-a2.pt) - norm(b1.pt-b2.pt)) == 0)
        continue;


      coord1.push_back(a1.pt);
      coord1.push_back(a2.pt);
      coord2.push_back(b1.pt);
      coord2.push_back(b2.pt);


      fil1.push_back(a1);
      fil1.push_back(a2);
      fil2.push_back(b1);
      fil2.push_back(b2);
    }
  }

   // cv::imwrite("img1.pgm",image1);
   // cv::imwrite("img2.pgm",image2);
  // 5. find homography
 // ROS_INFO("Found %i matches",matches.size());
  if(coord1.size() < 1)
  {
      ROS_WARN("Problem by transforming map,this migth just an start up problem \n Coord1:%lu",coord1.size());
      works = false;
      return;
  }

  ROS_DEBUG("Compute estimateRigid");
  H = estimateRigidTransform(coord2, coord1,false);
  if(H.empty())
  {
      ROS_WARN("H contain no data, cannot find valid transformation");
      works = false;
      return;
  }
  //ROS_DEBUG("H: size:%lu|empty:%i",H.size,H.empty());

  rotation = 180./M_PI*atan2(H.at<double>(0,1),H.at<double>(1,1));
  transx   = H.at<double>(0,2);
  transy   = H.at<double>(1,2);
  scalex   = sqrt(pow(H.at<double>(0,0),2)+pow(H.at<double>(0,1),2));
  scaley   = sqrt(pow(H.at<double>(1,0),2)+pow(H.at<double>(1,1),2));
  ROS_DEBUG("H: transx:%f|transy%f|scalex:%f,scaley:%f|rotation:%f",transx,transy,scalex,scaley,rotation);
  //first_x_trans = transx;
  //first_y_trans = transy;
  float scale_change = 0.05;

  if(scalex > 1 + scale_change || scaley > 1 + scale_change)
  {
      ROS_WARN("Map should not scale change is to lagre");
      works = false;
      return;
  }
  if(scalex < 1 - scale_change|| scaley < 1 - scale_change)
  {
      ROS_WARN("Map should not scale change is to small");
      works = false;
      return;
  }
  if(max_trans != -1)
  {
      if(transx > max_trans || transy > max_trans)
      {
          ROS_WARN("Map should not trans so strong");
          works = false;
          return;
      }
  }
  if(max_rotation != -1)
  {
      if(rotation > max_rotation || rotation < -1 * max_rotation)
      {
          ROS_WARN("Map should not rotate so strong");
          works = false;
          return;
      }
  }
  cur_trans = H;
  ROS_DEBUG("Finished estimateRigid");
  //evaluade transformation
  //evaluate
  if(works)
  {
      Mat tmp (image2.size(),image2.type());
      Mat thr;

      Mat image(image2.size(), image2.type());
      warpAffine(image2,image,H,image.size());
      addWeighted(image,.5,image1,.5,0.0,image);

      warpAffine(image2,tmp,H,image2.size());
      addWeighted(tmp,.5,image1,.5,0.0,image);
      //cvtColor(image1,tmp,CV_BGR2GRAY);
      threshold(tmp,thr,0,255,THRESH_BINARY);
      Mat K=(Mat_<uchar>(5,5)<<   0,  0,  1,  0,  0,\
                                     0,  0,  1,  0,  0,\
                                     1,  1,  1,  1,  1,\
                                     0,  0,  1,  0,  0,\
                                     0,  0,  1,  0,  0);

      erode(thr,thr,K,Point(-1,-1),1,BORDER_CONSTANT);
         vector< vector <Point> > contours; // Vector for storing contour
         vector< Vec4i > hierarchy;
         findContours( thr, contours, hierarchy,CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
         for( int i = 0; i< contours.size(); i++ )
         {
            Rect r= boundingRect(contours[i]);
            rects2.push_back(r);
            rectangle(tmp,Point(r.x,r.y), Point(r.x+r.width,r.y+r.height), Scalar(0,0,255),2,8,0);
          }//Opened contour

         Mat thr1;
         //cvtColor(image1,tmp,CV_BGR2GRAY);
         threshold(image1,thr1,0,255,THRESH_BINARY);
         erode(thr1,thr1,K,Point(-1,-1),1,BORDER_CONSTANT);
            vector< vector <Point> > contours1; // Vector for storing contour
            vector< Vec4i > hierarchy1;
            findContours( thr1, contours1, hierarchy1,CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

            for( int i = 0; i< contours1.size(); i++ ){
             Rect r= boundingRect(contours1[i]);
             rects1.push_back(r);
             //rectangle(image1,Point(r.x,r.y), Point(r.x+r.width,r.y+r.height), Scalar(0,0,255),2,8,0);
             }//Opened contour
         vector<Rect> near1,near2;
         int offset = 5;
         for(int i = 0; i < rects1.size(); i++)
         {
             Rect ri = rects1.at(i);
             if(ri.x == 1 && ri.y == 1)
                 continue;
             for(int j = 0; j < rects2.size();j++)
             {
                 Rect rj = rects2.at(j);
                 if(ri.x < rj.x + offset && ri.x > rj.x -offset)
                 {
                     if(ri.y < rj.y + offset && ri.y > rj.y -offset)
                     {
                         near1.push_back(ri);
                         near2.push_back(rj);
                     }
                 }
             }
         }
         double eudis = 0;
         double disX,disY;
         for(int i = 0; i < near1.size(); i++)
         {
             Rect ri = near1.at(i);
             Rect rj = near2.at(i);
             disX = ri.x - rj.x;
             disY = ri.y - rj.y;
             if(disX < 0)
                 disX = disX * (-1);
             if(disY < 0)
                 disY = disY * (-1);
             eudis += sqrt((disX*disX)+(disY*disY));
         }
         if(near1.size() < 2)
             eudis = -1;
         else
             eudis = eudis / near1.size();
         ROS_DEBUG("EudisNew:%f\t near1Size:%lu:\toldTran:%i",eudis,near1.size(),oldTransform.empty());
         //calc EudisOld

         Mat thr3,tmp1;
         //cvtColor(image1,tmp,CV_BGR2GRAY);
         if(oldTransform.empty())
             return;
         warpAffine(image2,tmp1,oldTransform,image2.size());
         threshold(tmp1,thr3,0,255,THRESH_BINARY);

         erode(thr3,thr3,K,Point(-1,-1),1,BORDER_CONSTANT);
            vector< vector <Point> > contours3; // Vector for storing contour
            vector< Vec4i > hierarchy3;
            findContours( thr3, contours3, hierarchy3,CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

            for( int i = 0; i< contours3.size(); i++ ){
             Rect r= boundingRect(contours3[i]);
             rects3.push_back(r);
            }//Opened contour
            near1.clear();
            near2.clear();
            for(int i = 0; i < rects1.size(); i++)
            {
                Rect ri = rects1.at(i);
                if(ri.x == 1 && ri.y == 1)
                    continue;
                for(int j = 0; j < rects3.size();j++)
                {
                    Rect rj = rects3.at(j);
                    if(ri.x < rj.x + offset && ri.x > rj.x -offset)
                    {
                        if(ri.y < rj.y + offset && ri.y > rj.y -offset)
                        {
                            near1.push_back(ri);
                            near2.push_back(rj);
                        }
                    }
                }
            }
            double eudisOLD = 0;
            for(int i = 0; i < near1.size(); i++)
            {
                Rect ri = near1.at(i);
                Rect rj = near2.at(i);
                disX = ri.x - rj.x;
                disY = ri.y - rj.y;
                if(disX < 0)
                    disX = disX * (-1);
                if(disY < 0)
                    disY = disY * (-1);
                eudisOLD += sqrt((disX*disX)+(disY*disY));
            }
            if(near1.size() < 2)
                eudis = -1;
            else
                eudisOLD = eudisOLD / near1.size();
            //if(eudisOLD < eudis)
               // works = false;
            ROS_WARN("EudisOLD:%f\t near1Size:%lu:|works:%i",eudis,near1.size(),works);
            //calc EudisOld
         /*  for(int i = 0; i < rects1.size(); i++)
           {
               Rect r = rects1.at(i);
               rectangle(image1,Point(r.x,r.y), Point(r.x+r.width,r.y+r.height), Scalar(0,0,255),2,8,0);
            }*/

  }
  return;
}
コード例 #9
0
ファイル: DetectRegions.cpp プロジェクト: heliang219/Demos
vector<Plate> DetectRegions::segment(Mat input){
    vector<Plate> output;

    //convert image to gray
    Mat img_gray; //= *new Mat(input.size().width,input.size().height, CV_8UC1);
    cvtColor(input, img_gray, CV_BGR2GRAY);
    blur(img_gray, img_gray, Size(5,5));

    //Finde vertical lines. Car plates have high density of vertical lines
    Mat img_sobel;
    Sobel(img_gray, img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT);
    if(showSteps)
        imshow("Sobel", img_sobel);

    //threshold image
    Mat img_threshold;
    threshold(img_sobel, img_threshold, 0, 255, CV_THRESH_OTSU+CV_THRESH_BINARY);
    if(showSteps)
        imshow("Threshold", img_threshold);

    //Morphplogic operation close
    Mat element = getStructuringElement(MORPH_RECT, Size(17, 3) );
    morphologyEx(img_threshold, img_threshold, CV_MOP_CLOSE, element);
    if(showSteps)
        imshow("Close", img_threshold);

    //Find contours of possibles plates
    vector< vector< Point> > contours;
    findContours(img_threshold,
            contours, // a vector of contours
            CV_RETR_EXTERNAL, // retrieve the external contours
            CV_CHAIN_APPROX_NONE); // all pixels of each contours

    //Start to iterate to each contour founded
    vector<vector<Point> >::iterator itc= contours.begin();
    vector<RotatedRect> rects;

    //Remove patch that are no inside limits of aspect ratio and area.    
    while (itc!=contours.end()) {
        //Create bounding rect of object
        RotatedRect mr= minAreaRect(Mat(*itc));
        if( !verifySizes(mr)){
            itc= contours.erase(itc);
        }else{
            ++itc;
            rects.push_back(mr);
        }
    }

    // Draw blue contours on a white image
    cv::Mat result;
    input.copyTo(result);
    cv::drawContours(result,contours,
            -1, // draw all contours
            cv::Scalar(255,0,0), // in blue
            1); // with a thickness of 1

    for(int i=0; i< rects.size(); i++){

        //For better rect cropping for each posible box
        //Make floodfill algorithm because the plate has white background
        //And then we can retrieve more clearly the contour box
        circle(result, rects[i].center, 3, Scalar(0,255,0), -1);
        //get the min size between width and height
        float minSize=(rects[i].size.width < rects[i].size.height)?rects[i].size.width:rects[i].size.height;
        minSize=minSize-minSize*0.5;
        //initialize rand and get 5 points around center for floodfill algorithm
        srand ( time(NULL) );
        //Initialize floodfill parameters and variables
        Mat mask;
        mask.create(input.rows + 2, input.cols + 2, CV_8UC1);
        mask= Scalar::all(0);
        int loDiff = 30;
        int upDiff = 30;
        int connectivity = 4;
        int newMaskVal = 255;
        int NumSeeds = 10;
        Rect ccomp;
        int flags = connectivity + (newMaskVal << 8 ) + CV_FLOODFILL_FIXED_RANGE + CV_FLOODFILL_MASK_ONLY;
        for(int j=0; j<NumSeeds; j++){
            Point seed;
            seed.x=rects[i].center.x+rand()%(int)minSize-(minSize/2);
            seed.y=rects[i].center.y+rand()%(int)minSize-(minSize/2);
            circle(result, seed, 1, Scalar(0,255,255), -1);
            int area = floodFill(input, mask, seed, Scalar(255,0,0), &ccomp, Scalar(loDiff, loDiff, loDiff), Scalar(upDiff, upDiff, upDiff), flags);
        }
        if(showSteps)
            imshow("MASK", mask);
        //cvWaitKey(0);

        //Check new floodfill mask match for a correct patch.
        //Get all points detected for get Minimal rotated Rect
        vector<Point> pointsInterest;
        Mat_<uchar>::iterator itMask= mask.begin<uchar>();
        Mat_<uchar>::iterator end= mask.end<uchar>();
        for( ; itMask!=end; ++itMask)
            if(*itMask==255)
                pointsInterest.push_back(itMask.pos());

        RotatedRect minRect = minAreaRect(pointsInterest);

        if(verifySizes(minRect)){
            // rotated rectangle drawing 
            Point2f rect_points[4]; minRect.points( rect_points );
            for( int j = 0; j < 4; j++ )
                line( result, rect_points[j], rect_points[(j+1)%4], Scalar(0,0,255), 1, 8 );    

            //Get rotation matrix
            float r= (float)minRect.size.width / (float)minRect.size.height;
            float angle=minRect.angle;    
            if(r<1)
                angle=90+angle;
            Mat rotmat= getRotationMatrix2D(minRect.center, angle,1);

            //Create and rotate image
            Mat img_rotated;
            warpAffine(input, img_rotated, rotmat, input.size(), CV_INTER_CUBIC);

            //Crop image
            Size rect_size=minRect.size;
            if(r < 1)
                swap(rect_size.width, rect_size.height);
            Mat img_crop;
            getRectSubPix(img_rotated, rect_size, minRect.center, img_crop);
            
            Mat resultResized;
            resultResized.create(33,144, CV_8UC3);
            resize(img_crop, resultResized, resultResized.size(), 0, 0, INTER_CUBIC);
            //Equalize croped image
            Mat grayResult;
            cvtColor(resultResized, grayResult, CV_BGR2GRAY); 
            blur(grayResult, grayResult, Size(3,3));
            grayResult=histeq(grayResult);
            if(saveRegions){ 
                stringstream ss(stringstream::in | stringstream::out);
                ss << "tmp/" << filename << "_" << i << ".jpg";
                imwrite(ss.str(), grayResult);
            }
            output.push_back(Plate(grayResult,minRect.boundingRect()));
        }
    }       
    if(showSteps) 
        imshow("Contours", result);

    return output;
}
コード例 #10
0
ファイル: preprocessFace.cpp プロジェクト: izzoh/Fasby
// Create a grayscale face image that has a standard size and contrast & brightness.
// "srcImg" should be a copy of the whole color camera frame, so that it can draw the eye positions onto.
// If 'doLeftAndRightSeparately' is true, it will process left & right sides seperately,
// so that if there is a strong light on one side but not the other, it will still look OK.
// Performs Face Preprocessing as a combination of:
//  - geometrical scaling, rotation and translation using Eye Detection,
//  - smoothing away image noise using a Bilateral Filter,
//  - standardize the brightness on both left and right sides of the face independently using separated Histogram Equalization,
//  - removal of background and hair using an Elliptical Mask.
// Returns either a preprocessed face square image or NULL (ie: couldn't detect the face and 2 eyes).
// If a face is found, it can store the rect coordinates into 'storeFaceRect' and 'storeLeftEye' & 'storeRightEye' if given,
// and eye search regions into 'searchedLeftEye' & 'searchedRightEye' if given.
Mat getPreprocessedFace(Mat &srcImg, int desiredFaceWidth, CascadeClassifier &faceCascade, CascadeClassifier &eyeCascade1, CascadeClassifier &eyeCascade2, bool doLeftAndRightSeparately, Rect *storeFaceRect, Point *storeLeftEye, Point *storeRightEye, Rect *searchedLeftEye, Rect *searchedRightEye)
{
    // Use square faces.
    int desiredFaceHeight = desiredFaceWidth;

    // Mark the detected face region and eye search regions as invalid, in case they aren't detected.
    if (storeFaceRect)
        storeFaceRect->width = -1;
    if (storeLeftEye)
        storeLeftEye->x = -1;
    if (storeRightEye)
        storeRightEye->x= -1;
    if (searchedLeftEye)
        searchedLeftEye->width = -1;
    if (searchedRightEye)
        searchedRightEye->width = -1;

    // Find the largest face.
    Rect faceRect;
    detectLargestObject(srcImg, faceCascade, faceRect);

    // Check if a face was detected.
    if (faceRect.width > 0) {

        // Give the face rect to the caller if desired.
        if (storeFaceRect)
            *storeFaceRect = faceRect;

        Mat faceImg = srcImg(faceRect);    // Get the detected face image.

        // If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
        Mat gray;
        if (faceImg.channels() == 3) {
            cvtColor(faceImg, gray, CV_BGR2GRAY);
        }
        else if (faceImg.channels() == 4) {
            cvtColor(faceImg, gray, CV_BGRA2GRAY);
        }
        else {
            // Access the input image directly, since it is already grayscale.
            gray = faceImg;
        }

        // Search for the 2 eyes at the full resolution, since eye detection needs max resolution possible!
        Point leftEye, rightEye;
        detectBothEyes(gray, eyeCascade1, eyeCascade2, leftEye, rightEye, searchedLeftEye, searchedRightEye);

        // Give the eye results to the caller if desired.
        if (storeLeftEye)
            *storeLeftEye = leftEye;
        if (storeRightEye)
            *storeRightEye = rightEye;

        // Check if both eyes were detected.
        if (leftEye.x >= 0 && rightEye.x >= 0) {

            // Make the face image the same size as the training images.

            // Since we found both eyes, lets rotate & scale & translate the face so that the 2 eyes
            // line up perfectly with ideal eye positions. This makes sure that eyes will be horizontal,
            // and not too far left or right of the face, etc.

            // Get the center between the 2 eyes.
            Point2f eyesCenter = Point2f( (leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f );
            // Get the angle between the 2 eyes.
            double dy = (rightEye.y - leftEye.y);
            double dx = (rightEye.x - leftEye.x);
            double len = sqrt(dx*dx + dy*dy);
            double angle = atan2(dy, dx) * 180.0/CV_PI; // Convert from radians to degrees.

            // Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
            const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
            // Get the amount we need to scale the image to be the desired fixed size we want.
            double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
            double scale = desiredLen / len;
            // Get the transformation matrix for rotating and scaling the face to the desired angle & size.
            Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
            // Shift the center of the eyes to be the desired center between the eyes.
            rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
            rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

            // Rotate and scale and translate the image to the desired angle & size & position!
            // Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
            Mat warped = Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, Scalar(128)); // Clear the output image to a default grey.
            warpAffine(gray, warped, rot_mat, warped.size());
            //imshow("warped", warped);

            // Give the image a standard brightness and contrast, in case it was too dark or had low contrast.
            if (!doLeftAndRightSeparately) {
                // Do it on the whole face.
                equalizeHist(warped, warped);
            }
            else {
                // Do it seperately for the left and right sides of the face.
                equalizeLeftAndRightHalves(warped);
            }
            //imshow("equalized", warped);

            // Use the "Bilateral Filter" to reduce pixel noise by smoothing the image, but keeping the sharp edges in the face.
            Mat filtered = Mat(warped.size(), CV_8U);
            bilateralFilter(warped, filtered, 0, 20.0, 2.0);
            //imshow("filtered", filtered);

            // Filter out the corners of the face, since we mainly just care about the middle parts.
            // Draw a filled ellipse in the middle of the face-sized image.
            Mat mask = Mat(warped.size(), CV_8U, Scalar(0)); // Start with an empty mask.
            Point faceCenter = Point( desiredFaceWidth/2, cvRound(desiredFaceHeight * FACE_ELLIPSE_CY) );
            Size size = Size( cvRound(desiredFaceWidth * FACE_ELLIPSE_W), cvRound(desiredFaceHeight * FACE_ELLIPSE_H) );
            ellipse(mask, faceCenter, size, 0, 0, 360, Scalar(255), CV_FILLED);
            //imshow("mask", mask);

            // Use the mask, to remove outside pixels.
            Mat dstImg = Mat(warped.size(), CV_8U, Scalar(128)); // Clear the output image to a default gray.
            /*
            namedWindow("filtered");
            imshow("filtered", filtered);
            namedWindow("dstImg");
            imshow("dstImg", dstImg);
            namedWindow("mask");
            imshow("mask", mask);
            */
            // Apply the elliptical mask on the face.
            filtered.copyTo(dstImg, mask);  // Copies non-masked pixels from filtered to dstImg.
            //imshow("dstImg", dstImg);

            return dstImg;
        }
        /*
        else {
            // Since no eyes were found, just do a generic image resize.
            resize(gray, tmpImg, Size(w,h));
        }
        */
    }
    return Mat();
}
コード例 #11
0
Mat strechImg(Mat &img, double scalex, double scaley){
    Mat trans_mat = (Mat_<double>(2,3) << scalex, 0, 0, 0, scaley, 0);
    warpAffine(img,img,trans_mat,img.size());
    return trans_mat;
}
コード例 #12
0
Mat translateImg(Mat &img, int offsetx, int offsety){
    Mat trans_mat = (Mat_<double>(2,3) << 1, 0, offsetx, 0, 1, offsety);
    warpAffine(img,img,trans_mat,img.size());
    return trans_mat;
}
コード例 #13
0
//multi
bool FaceAlignment::alignStasm4(cv::Mat& inputImg, std::vector<cv::Mat>& alignedImg, std::vector<std::vector<cv::Point2d> >& landmarkPoints, std::vector<cv::Point2d>& centersOfLandmarks) {

    //cv::Mat im_gray;
    //cv::cvtColor(inputImg,im_gray,CV_RGB2GRAY);

    //commonTool.log("Performing Active Shape Models with Stasm 4.0 to find features in face.");

    //cv::namedWindow( "test", CV_WINDOW_NORMAL );
    //cv::imshow( "test", inputImg );

    std::vector<std::vector<cv::Point2d> > landmarkList;
    bool success = detectLandmarks(inputImg, landmarkList, centersOfLandmarks);
    if (!success) {
        return false;
    }

    int numberOfFaces = landmarkList.size();
    //commonTool.log(QString("Found number of faces in a single frame --> %1").arg(numberOfFaces));

    for (int i=0;i<numberOfFaces;i++) {
        std::vector<cv::Point2d> points = landmarkList.at(i);

        //commonTool.log(QString("Extracted %1 landmarks.").arg(points.size()));

        //DEBUG
        //commonTool.showImageAndLandMarks(QString("Before Alignment"), inputImg, points, CV_RGB(255, 0, 0), *mutex);
        //DEBUG

        cv::Mat resizedImg;
        cv::Mat rotatedImg = cv::Mat( 250, 200, inputImg.type() );
        double ratio = 50/sqrt(pow(points.at(LEFT_EYE_INDEXV4).x - points.at(RIGHT_EYE_INDEXV4).x, 2) + pow(points.at(LEFT_EYE_INDEXV4).y - points.at(RIGHT_EYE_INDEXV4).y, 2));
        cv::resize(inputImg, resizedImg, cv::Size(), ratio, ratio, CV_INTER_LINEAR );
        //resize all the point
        for (int i = 0; i < (int)points.size(); i++){
            points.at(i) = points.at(i) * ratio;
        }

        //DEBUG
        //commonTool.showImageAndLandMarks(QString("Scaling: %1").arg(ratio), inputImg, points);
        //DEBUG

        /* for rotation */
        double degree = (atan((points.at(LEFT_EYE_INDEXV4).y - points.at(RIGHT_EYE_INDEXV4).y)/(points.at(LEFT_EYE_INDEXV4).x - points.at(RIGHT_EYE_INDEXV4).x)) * 180)/PI;
        cv::Mat rotationMat = cv::getRotationMatrix2D(points.at(NOSE_INDEXV4), degree, 1.0);
        warpAffine( resizedImg, rotatedImg, rotationMat, cv::Size(resizedImg.cols, resizedImg.rows) , cv::INTER_LINEAR, cv::BORDER_REPLICATE);

        //rotation all the point
        for (int i = 0; i < (int)points.size(); i++){
            points.at(i) = rotatePoint(points.at(i), rotationMat);
        }

        //DEBUG
        //commonTool.showImageAndLandMarks(QString("after rotation"), inputImg, points);
        //DEBUG

        //make sure the small image will not outside
        cv::copyMakeBorder(rotatedImg, rotatedImg, IMAGE_ROW, IMAGE_ROW, IMAGE_COL, IMAGE_COL, cv::BORDER_REPLICATE);
        for (int i = 0; i < (int)points.size(); i++){
            points.at(i).x = points.at(i).x + IMAGE_COL;
            points.at(i).y = points.at(i).y + IMAGE_ROW;
        }

    // Mo did this: bad way, not checking all points!
    //    cv::circle(rotatedImg, points.at(LEFT_EYE_INDEX), 5, cv::Scalar(255, 0, 0));
    //    cv::circle(rotatedImg, points.at(RIGHT_EYE_INDEX), 5, cv::Scalar(0, 0, 255));
    //    cv::circle(rotatedImg, points.at(NOSE_INDEX), 5, cv::Scalar(0, 255, 0));
    //
    //    cv::imshow("rotatedImg", rotatedImg);
    //    cv::waitKey(0);
    //

        //DEBUG
        //commonTool.showImageAndLandMarks(QString("after copymakeborder"), rotatedImg, points);
        //DEBUG
        cv::Rect ROI = cv::Rect(points.at(LEFT_EYE_INDEXV4).x - 75.0, points.at(LEFT_EYE_INDEXV4).y - 125.0, IMAGE_COL, IMAGE_ROW);
        //There was a bug here previously
        cv::Point2d leftEye(points.at(LEFT_EYE_INDEXV4));
        for (int i = 0; i < (int)points.size(); i++){
            points.at(i).x = points.at(i).x - (leftEye.x - 75.0);
            points.at(i).y = points.at(i).y - (leftEye.y - 125.0);
        }
        alignedImg.push_back(rotatedImg(ROI));
        landmarkPoints.push_back(points);
        //DEBUG
        //commonTool.showImageAndLandMarks(QString("After Alignment"), alignedImg, points);
        //DEBUG
    }

    return true;

}
コード例 #14
0
// Create a grayscale face image that has a standard size and contrast & brightness.
// "srcImg" should be a copy of the whole color camera frame, so that it can draw the eye positions onto.
// If 'doLeftAndRightSeparately' is true, it will process left & right sides seperately,
// so that if there is a strong light on one side but not the other, it will still look OK.
// Performs Face Preprocessing as a combination of:
//  - geometrical scaling, rotation and translation using Eye Detection,
//  - smoothing away image noise using a Bilateral Filter,
//  - standardize the brightness on both left and right sides of the face independently using separated Histogram Equalization,
//  - removal of background and hair using an Elliptical Mask.
// Returns either a preprocessed face square image or NULL (ie: couldn't detect the face and 2 eyes).
// If a face is found, it can store the rect coordinates into 'storeFaceRect' and 'storeLeftEye' & 'storeRightEye' if given,
// and eye search regions into 'searchedLeftEye' & 'searchedRightEye' if given.
Mat getPreprocessedFace(Mat &srcImg, int desiredFaceWidth, CascadeClassifier &faceCascade, CascadeClassifier &eyeCascade1, CascadeClassifier &eyeCascade2, bool doLeftAndRightSeparately, Rect *storeFaceRect, Point *storeLeftEye, Point *storeRightEye, Rect *searchedLeftEye, Rect *searchedRightEye)
{
    // Use square faces.
    int desiredFaceHeight = desiredFaceWidth;

    // Mark the detected face region and eye search regions as invalid, in case they aren't detected.
    if (storeFaceRect)
        storeFaceRect->width = -1;
    if (storeLeftEye)
        storeLeftEye->x = -1;
    if (storeRightEye)
        storeRightEye->x= -1;
    if (searchedLeftEye)
        searchedLeftEye->width = -1;
    if (searchedRightEye)
        searchedRightEye->width = -1;

    // Find the largest face.
    Rect faceRect;
    detectLargestObject(srcImg, faceCascade, faceRect);    //¼ì²âÈËÁ³
	//if (faceRect.x>0)
		//cout <<"face detected"<<endl;
    // Check if a face was detected.
    if (faceRect.width > 0) {

        // Give the face rect to the caller if desired.
        if (storeFaceRect)
            *storeFaceRect = faceRect;

        Mat faceImg = srcImg(faceRect);    // Get the detected face image.

        // If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
        Mat gray;
        if (faceImg.channels() == 3) {
            cvtColor(faceImg, gray, CV_BGR2GRAY);
        }
        else if (faceImg.channels() == 4) {
            cvtColor(faceImg, gray, CV_BGRA2GRAY);
        }
        else {
            // Access the input image directly, since it is already grayscale.
            gray = faceImg;
        }

        // Search for the 2 eyes at the full resolution, since eye detection needs max resolution possible!
        Point leftEye, rightEye;
        detectBothEyes(gray, eyeCascade1, eyeCascade2, leftEye, rightEye, searchedLeftEye, searchedRightEye);

        // Give the eye results to the caller if desired.
        if (storeLeftEye)
            *storeLeftEye = leftEye;
        if (storeRightEye)
            *storeRightEye = rightEye;

        // Check if both eyes were detected.
        if (leftEye.x >= 0 && rightEye.x >= 0) 
		{

            // Make the face image the same size as the training images.

            // Since we found both eyes, lets rotate & scale & translate the face so that the 2 eyes
            // line up perfectly with ideal eye positions. This makes sure that eyes will be horizontal,
            // and not too far left or right of the face, etc.

            // Get the center between the 2 eyes.
            Point2f eyesCenter = Point2f( (leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f );
            // Get the angle between the 2 eyes.
            double dy = (rightEye.y - leftEye.y);
            double dx = (rightEye.x - leftEye.x);
            double len = sqrt(dx*dx + dy*dy);
            double angle = atan2(dy, dx) * 180.0/CV_PI; // Convert from radians to degrees.

            // Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
            const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
            // Get the amount we need to scale the image to be the desired fixed size we want.
            double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
            double scale = desiredLen / len;
            // Get the transformation matrix for rotating and scaling the face to the desired angle & size.
            Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
            // Shift the center of the eyes to be the desired center between the eyes.
            rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
            rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

            // Rotate and scale and translate the image to the desired angle & size & position!
            // Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
            Mat warped = Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, Scalar(128)); // Clear the output image to a default grey.
            warpAffine(gray, warped, rot_mat, warped.size());
          
            return warped;
        }
        /*
        else {
            // Since no eyes were found, just do a generic image resize.
            resize(gray, tmpImg, Size(w,h));
        }
        */
    }
    return Mat();
}
コード例 #15
0
void *affine_loop(void *number)
{

/*
	if(set_single_core_affinity()!=EXIT_SUCCESS)
	{
		perror("Core Affinity");
	}
*/

	int block_size;
	int max_files;
	int i=0;
	int section=0;
	float pos[8]={0,0,1,0,0,1,1,1};

	Point2f srcTri[4];
	Point2f dstTri[4];

	max_files=3601;
	block_size=max_files/4;

	Mat rot_mat( 2, 3, CV_32FC1 );
	Mat warp_mat( 2, 3, CV_32FC1 );


	// Output variables
	Mat src, warp_dst, warp_rotate_dst;

	struct thread_limits *ptr_obj = (struct thread_limits *) number;
	int start_loop = ptr_obj->start_no;
	int stop_loop  = ptr_obj->stop_no;

	/*------------------------- Starting the loop --------------------------*/

	for (i=start_loop; i<=stop_loop; i++)
	{

		/*------------------------- Loading the Image --------------------------*/

		if(option==1)
		{
			// Select the right frame
			sprintf(frame_name2,"Sobel_frame_no_%05u.ppm",i);
			// Load the Image
			src = imread( frame_name2, 1 );
		}

		else
		{
			sprintf(frame_name,"Frame_no_%05u.ppm",i);
			src = imread( frame_name, 1 );
		}

		/*---------------------- Affine Transform : Warp -----------------------*/

		// Setting up the output image parameters

		warp_dst = Mat::zeros( src.rows, src.cols, src.type() );


		/*---------------------- Change the parameter values ----------------------*/

	
	
		switch(section)
		{

			case 0:
			{

				pos[1]=pos[1]+0.001;
				pos[2]=pos[2]-0.001;
				pos[4]=pos[4]+0.001;
				pos[7]=pos[7]-0.001;
		
			
				// Setting parameters for matrix computation

				srcTri[0] = Point2f( 0,0 );
				srcTri[1] = Point2f( src.cols - 1, 0 );
				srcTri[2] = Point2f( 0, src.rows - 1 );
				srcTri[3] = Point2f( src.cols - 1, src.rows - 1 );

				dstTri[0] = Point2f( src.cols*pos[0], src.rows*pos[1] );
				dstTri[1] = Point2f( src.cols*pos[2], src.rows*pos[3] );
				dstTri[2] = Point2f( src.cols*pos[4], src.rows*pos[5] );
				dstTri[3] = Point2f( src.cols*pos[6], src.rows*pos[7] );
			
				section=i/block_size;

				//printf("Case 0: %u\t %f %f %f %f %f %f %f %f\n",i,pos[0],pos[1],pos[2],pos[3],pos[4],pos[5],pos[6],pos[7]);

				break;
			}

			case 1:
			{

				pos[0]=pos[0]+0.001;
				pos[3]=pos[3]+0.001;
				pos[5]=pos[5]-0.001;
				pos[6]=pos[6]-0.001;
		
			
				// Setting parameters for matrix computation

				srcTri[0] = Point2f( 0,0 );
				srcTri[1] = Point2f( src.cols - 1, 0 );
				srcTri[2] = Point2f( 0, src.rows - 1 );
				srcTri[3] = Point2f( src.cols - 1, src.rows - 1 );

				dstTri[0] = Point2f( src.cols*pos[0], src.rows*pos[1] );
				dstTri[1] = Point2f( src.cols*pos[2], src.rows*pos[3] );
				dstTri[2] = Point2f( src.cols*pos[4], src.rows*pos[5] );
				dstTri[3] = Point2f( src.cols*pos[6], src.rows*pos[7] );
			
				section=i/block_size;

				//printf("Case 1: %u\t %f %f %f %f %f %f %f %f\n",i,pos[0],pos[1],pos[2],pos[3],pos[4],pos[5],pos[6],pos[7]);

				break;
			}
		
			case 2:
			{
			
				pos[1]=pos[1]-0.001;
				pos[2]=pos[2]+0.001;
				pos[4]=pos[4]-0.001;
				pos[7]=pos[7]+0.001;
		
			
				// Setting parameters for matrix computation

				srcTri[0] = Point2f( 0,0 );
				srcTri[1] = Point2f( src.cols - 1, 0 );
				srcTri[2] = Point2f( 0, src.rows - 1 );
				srcTri[3] = Point2f( src.cols - 1, src.rows - 1 );

				dstTri[0] = Point2f( src.cols*pos[0], src.rows*pos[1] );
				dstTri[1] = Point2f( src.cols*pos[2], src.rows*pos[3] );
				dstTri[2] = Point2f( src.cols*pos[4], src.rows*pos[5] );
				dstTri[3] = Point2f( src.cols*pos[6], src.rows*pos[7] );
			
				section=i/block_size;

				//printf("Case 2: %u\t %f %f %f %f %f %f %f %f\n",i,pos[0],pos[1],pos[2],pos[3],pos[4],pos[5],pos[6],pos[7]);

				break;
			}
		

			case 3:
			{

				pos[0]=pos[0]-0.001;
				pos[3]=pos[3]-0.001;
				pos[5]=pos[5]+0.001;
				pos[6]=pos[6]+0.001;
		
			
				// Setting parameters for matrix computation

				srcTri[0] = Point2f( 0,0 );
				srcTri[1] = Point2f( src.cols - 1, 0 );
				srcTri[2] = Point2f( 0, src.rows - 1 );
				srcTri[3] = Point2f( src.cols - 1, src.rows - 1 );

				dstTri[0] = Point2f( src.cols*pos[0], src.rows*pos[1] );
				dstTri[1] = Point2f( src.cols*pos[2], src.rows*pos[3] );
				dstTri[2] = Point2f( src.cols*pos[4], src.rows*pos[5] );
				dstTri[3] = Point2f( src.cols*pos[6], src.rows*pos[7] );

			
				section=i/block_size;

				//printf("Case 3: %u\t %f %f %f %f %f %f %f %f\n",i,pos[0],pos[1],pos[2],pos[3],pos[4],pos[5],pos[6],pos[7]);

				break;
			}

			default:
			{
				//printf("Value: %d\n",section);
				//perror("Default switch() case");
				break;
			}
		}
		



		// Calculate the Affine Transform matrix

		warp_mat = getAffineTransform( srcTri, dstTri );


		// Applying the Affine Transform to the src image

		warpAffine( src, warp_dst, warp_mat, warp_dst.size() );



		/*-------------------- Affine Transform : Rotate -----------------------*/

		// Compute the Rotation Matrix Parameters

		Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
		double angle = ROTATION_ANGLE;
		double scale = ISOTROPIC_SCALE_FACTOR;

		// Generate the Rotation Matrix

		rot_mat = getRotationMatrix2D( center, angle, scale );

		// Rotate the Image

		warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );


		/*------------------------- Storing the Image ---------------------------*/


		sprintf(frame_name3,"Affine_frame_no_%05u.ppm",i);

		// Storing the Image

		imwrite(frame_name3, warp_dst);

	}
	// End of 'for' loop

	return NULL;
}