Mat RoadWatcher::Draw_RoadLines_On_Matrix(Mat originalFrame, Mat linesMatrix, int frameLocation) { vector<Vec4i> linesVector; Mat cimg; Canny(linesMatrix, cimg, 50, 200, 3); HoughLinesP(cimg, linesVector, 1, CV_PI/180, Threshold, 50, 10); for (size_t j = 0; j < linesVector.size(); j++) { Vec4i l = linesVector[j]; l[1] += frameLocation; l[3] += frameLocation; line(originalFrame, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA); } return originalFrame; }
/* * Returns a vector of the lines obtained from the Hough Probabilistic Transform. * Horizontal lines are automatically removed and not returned (values can be adjusted based on HOUGH_PROB_MIN_ANGLE and HOUGH_PROB_MAX_ANGLE. */ vector<Line> RoadDetection::getHoughProbLines(Mat frame) { vector<Vec4i> lines; vector<Line> filteredLines; HoughLinesP(frame, lines, 1, CV_PI / 180, HOUGH_PROB_THRESH, HOUGH_PROB_MIN_LINE_LENGTH, HOUGH_PROB_MAX_LINE_GAP); for (size_t i = 0; i < lines.size(); i++) { Point pt1 = Point(lines[i][0], lines[i][1]); Point pt2 = Point(lines[i][2], lines[i][3]); Line line = Line(pt1, pt2); if (line.angle > HOUGH_PROB_MIN_ANGLE && line.angle < HOUGH_PROB_MAX_ANGLE) { filteredLines.push_back(line); } } return filteredLines; }
Mat RoadWatcher::Calculate_And_Draw_RoadLines(Mat frame) { Mat half_frame, dst, cdst, editedFrame, blackHueg, blackProb; int lowerFrameloc; editedFrame = frame.clone(); half_frame = frame( Range( frame.rows/2 +50, frame.rows - 1 ), Range( 0, frame.cols - 1 ) ); //copy the half frame into these 2 frames, afterwards make them completely black. So that we can use these frames to bitwise and. blackHueg = half_frame.clone(); blackProb = half_frame.clone(); blackHueg.setTo(Scalar(0,0,0)); blackProb.setTo(Scalar(0,0,0)); //define where the lanes should be located on the main frame. lowerFrameloc = frame.rows/2 +50; //convert the color + canny to prepare for (probablistic) houghlines cvtColor(dst, cdst, CV_GRAY2BGR); Canny(half_frame, dst, 50, 200, 3); threshold(dst,cdst,128,255,THRESH_BINARY_INV); vector<Vec2f> houghlines; vector<Vec4i> probLines; // detect lines HoughLines(dst, houghlines, 1, CV_PI/180, Threshold, 0, 0 ); HoughLinesP(dst, probLines, 1, CV_PI/180, Threshold, 50, 10 ); blackHueg = Draw_HoughLines(blackHueg, houghlines); blackProb = Draw_Probablistic_HoughLines(blackProb, probLines); Mat linesToDraw = bitwise_And(blackHueg, blackProb); Mat returner = Draw_RoadLines_On_Matrix(editedFrame, linesToDraw, lowerFrameloc); return returner; }
bool LineDetector::GetLines(Mat &rawHSV, Mat & fieldMask, Mat &guiImg, bool SHOWGUI, const Mat &lineBinary, vector<LineSegment> &resLines) { int MIN_LINE_DOUBLE_VOTE = params.line.lineVoteDouble->get(); int MIN_LINE_VOTE = params.line.lineVote->get(); int MIN_LINE_COLOR_VOTE = params.line.lineVoteColor->get(); vector<Vec4i> linesP; HoughLinesP(lineBinary, linesP, 1, M_PI / 45, 20, params.line.MinLineLength->get(), 20); for (size_t i = 0; i < linesP.size(); i++) { Vec4i lP = linesP[i]; LineSegment tmpLine(Point2d(lP[0], lP[1]), Point2d(lP[2], lP[3])); vector<cv::Point2d> midds = tmpLine.GetMidPoints(3); //2^3+1 = 16 int lineVoter = 0; int vote_for_double = 0; int vote_for_color = 0; uchar *dataImg = fieldMask.data; // printf("size= %d\n", midds.size()); for (size_t j = 0; j < midds.size(); j++) { int jumpMin = params.line.jumpMin->get(); int jumpMax = params.line.jumpMin->get(); double distanceToZero = GetDistance( cv::Point2d((params.camera.width->get() / 2), (params.camera.height->get())), midds[j]); LineInterpolation interP( LineSegment(cv::Point2d(0, jumpMax), cv::Point2d(params.camera.height->get(), jumpMin))); double jump; if (!interP.GetValue(distanceToZero, jump)) { //printh(CRed, "Error In Programming!"); continue; } LineSegment tocheck = tmpLine.PerpendicularLineSegment(jump, midds[j]); cv::LineIterator it(lineBinary, tocheck.P1, tocheck.P2, 8); vector<uchar> buf(it.count); int currentCounter = 0; for (int k = 0; k < it.count; k++, ++it) { uchar val=*(*it); if ( val > 10) { vote_for_double++; currentCounter++; } if (currentCounter >= 2) break; } cv::LineIterator itHSV(rawHSV, tocheck.P1, tocheck.P2, 8); vector<uchar> bufHSV(itHSV.count); for (int k = 0; k < itHSV.count; k++, ++itHSV) { cv::Vec3b hsvC = (cv::Vec3b) *itHSV; if (hsvC[0] >= params.line.h0->get() && hsvC[0] <= params.line.h1->get() && hsvC[1] >= params.line.s0->get() && hsvC[1] <= params.line.s1->get() && hsvC[2] >= params.line.v0->get() && hsvC[2] <= params.line.v1->get()) { vote_for_color++; break; } } int safeToShow = 0; if (tocheck.P1.x >= 0 && tocheck.P1.y >= 0 && tocheck.P1.x < params.camera.width->get() && tocheck.P1.y < params.camera.height->get()) { safeToShow++; uchar* pixelP = dataImg + ((((int) tocheck.P1.y * params.camera.width->get()) + (int) tocheck.P1.x) * 1); if (*pixelP > 50) lineVoter++; } if (tocheck.P2.x >= 0 && tocheck.P2.y >= 0 && tocheck.P2.x < params.camera.width->get() && tocheck.P2.y < params.camera.height->get()) { safeToShow++; uchar* pixelP = dataImg + ((((int) tocheck.P2.y * params.camera.width->get()) + (int) tocheck.P2.x) * 1); if (*pixelP > 50) lineVoter++; } if (safeToShow >= 2) { if (SHOWGUI && params.debug.showLineD->get()) { cv::line(guiImg, tocheck.P1, tocheck.P2, yellowColor(), 1); } } } if (lineVoter > MIN_LINE_VOTE && vote_for_double > MIN_LINE_DOUBLE_VOTE && vote_for_color > MIN_LINE_COLOR_VOTE) resLines.push_back(tmpLine); } return resLines.size() > 0; }
void lineROI(int &roi_k, cv::Mat img, cv::Mat img_draw, std::vector<std::vector<int> > &linePoints, std::vector<float> &lineAngles) { cv::Rect roi; //roi where line will be detected int dh, dw; // roi size dw = 120; dh = 120; std::cout<<"roi_k"<<roi_k<<std::endl; //create roi if (roi_k == 0) { // the first roi is a fixed window dw = 120; dh = 320; roi = cv::Rect(0, 0.5*img.rows-0.5*dh, dw, dh); cv::rectangle(img_draw, roi, cv::Scalar( 0, 55, 255 ), +1, 4 ); // draw roi std::cout<<"roi0draw"<<std::endl; } else {// control roi overflow for others roi if(linePoints[roi_k-1][3] + 0.5*dh > img.rows) { std::cout<<"if1"<<std::endl; roi = cv::Rect(linePoints[roi_k-1][2], linePoints[roi_k-1][3], dw, img.rows - linePoints[roi_k-1][3]); } else if(linePoints[roi_k-1][3] - 0.5*dh < 0) { std::cout<<"if2"<<std::endl; roi = cv::Rect(linePoints[roi_k-1][2], 0, dw, dh); } else { std::cout<<"if3"<<std::endl; roi = cv::Rect(linePoints[roi_k-1][2], linePoints[roi_k-1][3] - 0.5*dh, dw, dh); } std::cout<<"roidraw"<<std::endl; cv::rectangle(mapDraw, roi, cv::Scalar( 0, 255, 0 ), +1, 4 ); std::cout<<"roidraw"<<std::endl; } // line detection by Hough algorithm std::vector<cv::Vec4i> lines; // create vector for line points storage std::cout<<"hough"<<std::endl; HoughLinesP(img(roi), lines, 1, CV_PI/180, 50, 50, 20 ); std::cout<<"hough"<<std::endl; std::cout<<"hough # lines: "<<lines.size()<<std::endl; if (lines.size() > 0) { // calculate best line (average line for while...) int p1x, p1y, p2x, p2y; // points of line average if (lineParam(roi, lines, p1x, p1y, p2x, p2y) > 0) // if a useful line was found... { float alpha = 999; //if no line is detected, alpha=999 std::vector<int> linerow; linerow.push_back(p1x); linerow.push_back(p1y); linerow.push_back(p2x); linerow.push_back(p2y); std::cout<<"linerow"<<" "<<linerow[0]<<" "<<linerow[1]<<" "<<linerow[2]<<" "<<linerow[3]<<" "<<std::endl; linePoints.push_back(linerow); // save average line points in vector linePoints alpha = atan((float)(p2y-p1y)/(p2x-p1x)); // calculate angle with vertical lineAngles.push_back(alpha); // save angle in vector lineAngles std::cout<<"Angle"<<roi_k<<": "<< alpha*(180/3.1416) <<std::endl; //print angle cv::line(mapDraw, cv::Point (p1x,p1y), cv::Point(p2x,p2y), cv::Scalar(0, 0, 255), 2, 8); //draw mean line std::cout<<"Printing average line in roi "<<roi_k<<std::endl; for(int i = roi_k; i < roi_k+1; i++) { std::cout<<"Line"<<i<<": "<<linePoints[i][0]<<" "<<linePoints[i][1]<<" "<<linePoints[i][2]<<" "<<linePoints[i][3]<<" "<<std::endl; } } else{roi_k = 4;}// if no useful line was found, stop seeking for line... } else{roi_k = 4;}// if no line was found by Hough, stop seeking for line... }
vector<Point2f> find_corners(Mat &src, unsigned int rows, unsigned int cols) { vector<cv::Vec4i> slines; vector<par_line> par_lines; vector<par_line> borders; bool new_corners = false; vector<Point2f> corners; vector<Point2f> quad_pts; Mat temp; blur(src, temp, Size(5,5)); Canny(temp, temp, 100, 100, 3); int erosion_type = 1; int erosion_size = 1; Mat element = getStructuringElement( erosion_type, Size( 2*erosion_size + 1, 2*erosion_size+1 ), Point( erosion_size, erosion_size ) ); dilate(temp, temp, element); HoughLinesP(temp, slines, 1, CV_PI/360, 120,100, 10); if ( slines.size() < 4 ) { ///cout << "Hough: Znaleziono mniej niż 4 linie."; new_corners = false; } else { for( unsigned int i = 0; i < slines.size(); i++ ) { Vec4i l = slines[i]; par_line tmp_line; /// Pionowa linia - b na dużą wartość if( abs(l[2]-l[0]) == 0 ){ // znak (chyba) OK tmp_line.b = -(l[3]-l[1])/abs(l[3]-l[1])*1.e15; } else { tmp_line.b = l[1] - (double)(l[3]-l[1])/((double)(l[2]-l[0]))*l[0]; } tmp_line.atana = atan2((double)(l[3]-l[1]),((double)(l[2]-l[0]))); tmp_line.len = sqrt(pow( (double)(l[0]-l[2]), 2.0 ) + pow( (double)(l[1]-l[3]), 2.0 )); par_lines.push_back(tmp_line); } /// Uśrednione linie będące krawędziami kartki /// Dla każdej linii znajdź taką, która mieści się w zakresie +- 10 stopni /// Pierwsza linia trafia od razu borders.push_back(par_lines[0]); for( unsigned int i = 1; i < par_lines.size(); i++ ) { bool found_similiar = false; for ( unsigned int j = 0; j < borders.size(); j++ ) { /// Nowy odcinek podobny do któregoś z istniejących if ( abs(abs(par_lines[i].atana) - abs(borders[j].atana)) < 10.0*3.14159/180.0 && abs(par_lines[i].b - borders[j].b) < 150.0 ) { /// Nowa wartość jako średnia ważona borders[j].atana = (borders[j].atana*borders[j].len + par_lines[i].atana*par_lines[i].len) / (borders[j].len + par_lines[i].len); borders[j].b = (borders[j].b*borders[j].len + par_lines[i].b*par_lines[i].len) / (borders[j].len + par_lines[i].len); /// Zapisz nową długość uśrednionej prostej borders[j].len = borders[j].len + par_lines[i].len; found_similiar = true; } } /// Jeżeli żaden element nie był podobny, dodaj nową krawędź if ( !found_similiar ) { borders.push_back(par_lines[i]); } } if ( borders.size() < 4 ) { ///cout << "Zbudowano mniej niż 3 boki obwiedni."; new_corners = false; } else { // usuwanie najkrótszych linii while(borders.size() > 4) { unsigned char i = 0; double minlen = 1.e30; // nieskończoność for(unsigned char j = 0; j < borders.size(); j++) { if(borders[i].len > borders[j].len) { i = j; minlen = borders[j].len; } } borders.erase(borders.begin()+i); } /// Znajdź narożniki for (unsigned int i = 0; i < borders.size(); i++){ for (unsigned int j = i+1; j < borders.size(); j++){ /// Znajdź przecięcie między nierównoległymi do siebie brzegami kartki if( abs(abs(borders[i].atana)-abs(borders[j].atana)) > 45.0*3.14159/180.0 ) { Point2f p; p.x = (borders[i].b - borders[j].b) / ((tan(borders[j].atana) - tan(borders[i].atana))); p.y = p.x * tan(borders[i].atana) + borders[i].b; corners.push_back(p); } } } if ( corners.size() < 4 ) { new_corners = false; } else { /// Oblicz środek masy Point2f center(0,0); for ( unsigned int i = 0; i < corners.size(); i++ ) { center += corners[i]; } center *= (1. / corners.size()); /// Posortuj narożniki sortCorners(corners, center); new_corners = true; } } } /// Jeżeli to pierwszy przebieg, skopiuj narożniki if( corners_old.size() == 0 ) { corners_old = corners; } /// Jeżeli są nowe narożniki if( new_corners ) { /// Co 20 klatkę odświeżaj narożniki, aby uniknąć przekrzywienia po obrocie i powrocie if( refresh_corners > 20 ){ corners_old = corners; refresh_corners = 0; } else { refresh_corners += 1; bool close_corner_found [4]; for( int i = 0; i < 4; i++ ) { close_corner_found[i] = false; Point2f c = corners_old[i]; for( int j = 0; j < 4; j++ ) { Point2f k = corners[j]; if( abs(k.y - c.y) < 100 && abs(k.x - c.x) < 100 ) { close_corner_found[i] = true; } } } if ( close_corner_found[0] && close_corner_found[1] && close_corner_found[2] && close_corner_found[3] ) { corners_old = corners; } } } if( corners_old.size() == 4 ) { return corners_old; } else { vector<Point2f> empty; empty.clear(); return empty; } }
void HoughDetectEdge::houghLines(cv::Mat &gray, std::vector<std::vector<cv::Vec4i>> &lines) { double const THETA = 30.0 / 180.0; // lines[0] top // lines[1] bottom // lines[2] left // lines[3] right lines.clear(); std::vector<cv::Vec4i> tmplines; HoughLinesP(gray, tmplines, 1, CV_PI / 180, 50, 20, 10); std::vector<cv::Vec4i> ups; std::vector<cv::Vec4i> downs; std::vector<cv::Vec4i> lefts; std::vector<cv::Vec4i> rights; for (size_t i = 0; i < tmplines.size(); ++i) { cv::Vec4i &line = tmplines[i]; //cv::line(gray, cv::Point(line[0], line[1]), cv::Point(line[2], line[3]), cv::Scalar(255 * (i <= 1), 0, 255 * (i>1)), 1, CV_AA); int detaX = abs(line[0] - line[2]); int detaY = abs(line[1] - line[3]); if (detaX > detaY && atan(1.0 * detaY / detaX) < THETA) //the direction of horizon { if (std::max(line[1], line[3]) < gray.rows / 3) { ups.emplace_back(line); continue; } if (std::max(line[1], line[3]) > gray.rows * 2 / 3) { downs.emplace_back(line); continue; } } if (detaX < detaY && atan(1.0 * detaX / detaY) < THETA) { if (std::max(line[0], line[2]) < gray.cols / 3) { lefts.emplace_back(line); continue; } if (std::max(line[0], line[2]) > gray.cols * 2 / 3) { rights.emplace_back(line); continue; } } } lines.emplace_back(ups); lines.emplace_back(downs); lines.emplace_back(lefts); lines.emplace_back(rights); //return lines.size() == 4; #ifdef _DEBUG cv::Mat cdst; cv::cvtColor(gray, cdst, CV_GRAY2BGR); for (size_t i = 0; i < lines.size(); ++i) { for (size_t j = 0; j < lines[i].size(); ++j) { cv::Vec4i& l = lines[i][j]; cv::line(cdst, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255 * (i<=1), 0, 255*(i>1)), 1, CV_AA); } } cv::imshow("lines", cdst); cv::waitKey(); #endif //end _DEBUG }
vector<Vec4i> BookSegmenter::extractLines(Mat input){ vector<Vec4i> lines; HoughLinesP(input, lines, 1, CV_PI / 180, 50, 50, 10); return lines; }
void AndarPelaParedeAteLinha::execute(Robotino *robotino) { float Vx = 200, Vy, w, distParede; float erroDist = 0; int paredeAlvo = robotino->paredeAlvo(); static State<Robotino> * voltar; static float a = std::sin(60*PI/180)/std::sin(80*PI/180); static float cos20 = std::cos(20*PI/180); static float K = R*(a-1); static float erro_int = 0; float e1 = robotino->irDistance(Robotino::IR_ESQUERDO_1); float e2 = robotino->irDistance(Robotino::IR_ESQUERDO_2); float ref_e1 = e2*a+K; float d1 = robotino->irDistance(Robotino::IR_DIREITO_1); float d2 = robotino->irDistance(Robotino::IR_DIREITO_2); float ref_d1 = 1.15*(d2*a+K); float distancia_da_esquerda, distancia_da_direita; float erro; vector<Vec4i> lines; Vec4i l, l2; Mat img, cdst; int num_linha = 0; int min_Hough = 70, dist_Hough = 50; int min_canny =150 , max_canny = 3*min_canny; distParede = robotino->getRefDistParede(); distParede += R; img = robotino->getImage(); cvtColor( img, cdst, CV_BGR2GRAY ); Canny( cdst, cdst, (double)min_canny, (double)max_canny, 3 ); convertScaleAbs(cdst, cdst); //cv::imshow("Canny",cdst); //cv::waitKey(1); threshold(cdst, cdst, (double)5, (double)255, CV_THRESH_BINARY); HoughLinesP(cdst, lines, 1, CV_PI/180, min_Hough, min_Hough, dist_Hough ); cvtColor( cdst, cdst, CV_GRAY2BGR ); if (paredeAlvo == Robotino::NORTEN90 || paredeAlvo == Robotino::OESTE0 || paredeAlvo == Robotino::SUL90 || paredeAlvo == Robotino::LESTE180){ erro = (e1-ref_e1); erro_int += erro*dt; w = Kp*erro+Ki*erro_int; distancia_da_esquerda = ((e1+ref_e1+2*R)*cos20)/2; erroDist = (distancia_da_esquerda) - distParede; Vy = Kpy*erroDist; std::cout << "erro dist: " << erroDist << "\n"; std::cout<< "Esquerda 1: " << e1 << std::endl; std::cout<< "RefEsquerda 1: " << ref_e1 << std::endl; std::cout<< "Esquerda 2: " << e2 << std::endl; std::cout << "Distância da esquerda: " << distancia_da_esquerda << "\n"; if (lines.size() > numeroLinhasMin){ if (paredeAlvo == Robotino::OESTE0) { robotino->setOdometry(robotino->odometryX(),-(distancia_da_esquerda*10+15),0); } if (paredeAlvo == Robotino::NORTEN90) { robotino->setOdometry((robotino->getAlturaMapa())*10 -(distancia_da_esquerda*10+15),robotino->odometryY(),-90); } if (paredeAlvo == Robotino::SUL90) { robotino->setOdometry((distancia_da_esquerda*10+15),robotino->odometryY(),90); } if (paredeAlvo == Robotino::LESTE180) { robotino->setOdometry(robotino->odometryX(),-((robotino->getLarguraMapa())*10 -(distancia_da_esquerda*10+15)),180); } } }else if (paredeAlvo == Robotino::SULN90 || paredeAlvo == Robotino::LESTE0 || paredeAlvo == Robotino::NORTE90 || paredeAlvo == Robotino::OESTE180) { erro = (d1-ref_d1); erro_int += erro*dt; w = -Kp*erro-Ki*erro_int; distancia_da_direita = ((d1+ref_d1+2*R)*cos20)/2; erroDist = distParede - ( distancia_da_direita ); Vy = Kpy*erroDist; std::cout<< "Direita 1: " << d1 << std::endl; std::cout<< "RefDireita 1: " << ref_d1 << std::endl; std::cout<< "Direita 2: " << d2 << std::endl; std::cout << "Distância da direita: " << distancia_da_direita << "\n"; if (lines.size() > numeroLinhasMin){ if (paredeAlvo == Robotino::SULN90) { robotino->setOdometry((distancia_da_direita*10+15),robotino->odometryY(),-90); } if (paredeAlvo == Robotino::LESTE0) { robotino->setOdometry(robotino->odometryX(),-((robotino->getLarguraMapa()) * 10-(distancia_da_direita*10+15)),0); } if (paredeAlvo == Robotino::NORTE90) { robotino->setOdometry((robotino->getAlturaMapa()*10 - (distancia_da_direita*10+15)),robotino->odometryY(),90); } if (paredeAlvo == Robotino::OESTE180) { robotino->setOdometry(robotino->odometryX(),-((distancia_da_direita*10+15)),180); } } } if(distParede > 99){ robotino->setVelocity(Vx,0,0); }else{ robotino->setVelocity(Vx,Vy,w); } for( size_t i = 0; i < lines.size(); i++ ){ Vec4i l = lines[i]; //if (l[3] > 100 || l[1] > 100){ num_linha++; //} } if (num_linha > numeroLinhasMin){ robotino->setVelocity(0,0,0); robotino->change_state(robotino->previous_state()); } }
int main(int argc,char**argv) { int scale = 1; int delta = 0; int ddepth = CV_16S; // check the number of parameter if(argc !=2) { printf("please follow like this\n"); printf("exe[] img_name\n"); return -1; } // reads image img_src = imread(argv[1]); // check whether read operation is ok or not if(img_src.data == NULL) { printf("could not open or find the image!\n"); return -1; } // use Gaussian blur to reduce the noise GaussianBlur(img_src,img_src,Size(3,3),0,0,BORDER_DEFAULT); // convert source image to gray image cvtColor(img_src,img_gray,CV_BGR2GRAY); // sobel in x direction Sobel(img_gray,grad_x,ddepth,1,0,3,scale,delta,BORDER_DEFAULT); convertScaleAbs(grad_x,abs_grad_x); // use sobel in y direction Sobel(img_gray,grad_y,ddepth,0,1,3,scale,delta,BORDER_DEFAULT); convertScaleAbs(grad_y,abs_grad_y); // add weight,and addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0,grad); // use threshold to binarition and threshold select use the OTSU method threshold(grad,img_bin_thre,0,255,THRESH_BINARY|THRESH_OTSU); // first Dilate,second erode Mat element = getStructuringElement(MORPH_RECT,Size(2*1+1,2*1+1),Point(-1,-1)); for(int i = 0;i < 3; i++) { morphologyEx(img_bin_thre,img_bin_thre,MORPH_OPEN,element); morphologyEx(img_bin_thre,img_bin_thre,MORPH_CLOSE,element); } // origin method ,this is worse than morphologyEx // dilate(img_bin_thre,img_bin_thre,element); // namedWindow("dilated",CV_WINDOW_NORMAL); // imshow("dilated",img_bin_thre); // erode(img_bin_thre,img_bin_thre,element); // namedWindow("erode",CV_WINDOW_NORMAL); // imshow("erode",img_bin_thre); // find contour,in here must use the binarition image // define vector<Vec4i> hierarchy; vector< vector<Point> >contours; // use function findContours(img_bin_thre,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE,Point(0,0)); // please change min and the max area value based on reality int min_area = 100000; int max_area = 300000; Rect mRect; int tempArea; // define the color drawing contour Scalar color = Scalar(255,255,0); Mat drawing = Mat::zeros(img_bin_thre.size(),CV_8UC1); for(int i = 0;i < contours.size();i++) { // get the minimum rectangle of the contours mRect = boundingRect(contours[i]); // computer the square of mRect tempArea = mRect.height * mRect.width; // for debug // printf("tempArea.height:%d\ttempArea.width:%d\ttempArea.area=%d\n",mRect.height,mRect.width,tempArea); // filter area which meet the requirement if(((double)mRect.width/(double)mRect.height) > 2.0 && (tempArea > min_area) && ((double)mRect.width/(double)mRect.height < 4) && (tempArea < max_area)) // draw contours { drawContours(drawing,contours,i,color,2,8,hierarchy); // here use 2 image ,one is just from image which be processed by threshold,the other is the original gray image,if you just use first,you // may not getRectSubPix(img_bin_thre,Size(mRect.width,mRect.height),Point(mRect.x+mRect.width/2,mRect.y\ +mRect.height/2),img_get_rect); getRectSubPix(img_gray,Size(mRect.width,mRect.height),Point(mRect.x+mRect.width/2,mRect.y\ +mRect.height/2),img_get_rect_new); } } if(img_get_rect.data == NULL) { printf("img_get rect is null\n"); return -1; } if(img_get_rect_new.data == NULL) { printf("img_get_rect_new is null!\n"); return -1; } // use the HoughLinesP // define lines vector<Vec4i> lines; // Mat color_dst; // img_lines = img_get_rect.clone(); cvtColor(img_get_rect,img_lines,CV_GRAY2BGR); // check the line in image img_get_rect HoughLinesP(img_get_rect,lines,1,CV_PI/180,200,200,10); printf("lines.size()=%d\n",lines.size()); int distance = 0; // int theta; double temp_slope = 0,slope; int res_x1,res_y1,res_x2,res_y2; // define map vector for computer the line used frequency // vector <int,int> ivect;//first is the number of this line , next is the longest distance // map <double,ivect> imap; int delta_x,delta_y; std::vector <dou_int> ivec; std::vector <dou_int>::iterator iter; for(size_t i = 0;i < lines.size();i++) { Vec4i l = lines[i]; line(img_lines,Point(l[0],l[1]),Point(l[2],l[3]),Scalar(0,0,255),3); // find tilt angle if(l[2]-l[0] == 0) ; else { // computer this line 's slope // delta_x / delta_y delta_y = (l[3]-l[1]); delta_x = (l[2]-l[0]); distance = delta_y*delta_y+delta_x*delta_x; temp_slope = ((double)delta_y)/((double)(delta_x)); printf("in i=%d,delta_y=%d,delta_x=%d\n",i,delta_y,delta_x); for(iter = ivec.begin();iter != ivec.end();iter++) { // if in one line,num++,update the max length if(abs(iter->slope - temp_slope) < (double)0.01) { iter->num++; if(iter->maxlength < distance) { iter->maxlength = distance; iter->v0 = Point(l[0],l[1]); iter->v1 = Point(l[2],l[3]); } break; } } // not find this slope ,must add it by hand if(iter == ivec.end()) { ivec.push_back(dou_int(temp_slope,distance,1,Point(l[0],l[1]),Point(l[2],l[3]))); } } } int max = 0; int j = 0; int index = 0; dou_int res; for(j=0,iter = ivec.begin();iter != ivec.end();j++,iter++) { if(iter->num > max) { max = iter->num; index = j; } } printf("index is %d\n",index); for(j=0,iter = ivec.begin();iter != ivec.end() && j <= index;j++,iter++) { if(j == index) { res = dou_int(iter->slope,iter->maxlength,iter->num,iter->v0,iter->v1); printf("slope is %f\n",iter->slope); break; } } // drawing the tilt line line(img_lines,res.v0,res.v1,Scalar(255,255,0),1); Mat img_lines_out; Point center = Point(img_lines.cols/2,img_lines.rows/2); double angle =(double)(180/CV_PI)*(double)atan(res.slope); printf("angle is :%f\n",angle); Mat rot_mat = getRotationMatrix2D(center,angle,1.0); warpAffine(img_lines,img_lines_out,rot_mat,img_lines.size()); Mat img_rect; warpAffine(img_get_rect_new,img_rect,rot_mat,img_get_rect_new.size()); cvtColor(img_lines_out,img_lines_out,CV_BGR2GRAY); printf("img_clip's channel is:%d\n",img_lines_out.channels()); threshold(img_lines_out,img_lines_out,10,255,THRESH_BINARY | THRESH_OTSU); Mat img_clip; int up,down; if(-1 != remove_Border_Vertical(img_lines_out,up,down)) { printf("up=%d,down=%d\n",up,down); getRectSubPix(img_lines_out,Size(img_lines_out.cols,down-up),Point(img_lines_out.cols/2,up+(down-up)/2),img_clip); namedWindow("line_clip",CV_WINDOW_NORMAL); imshow("line_clip",img_clip); getRectSubPix(img_rect,Size(img_rect.cols,down-up),Point(img_rect.cols/2,up+(down-up)/2),img_clip); namedWindow("new_clip",CV_WINDOW_NORMAL); imshow("new_clip",img_clip); } // binarition OTSU threshold(img_clip,img_clip,10,255,THRESH_BINARY | THRESH_OTSU); namedWindow("newrect",CV_WINDOW_NORMAL); imshow("newrect",img_clip); parting_char(img_clip); waitKey(0); return 0; }
void FieldLineDetector::findTransformation(cv::Mat& src, cv::Mat& imgDst, std::vector<cv::Point2f>& modelBots, cv::Mat& H) { this->botPosField = modelBots; Mat imgBw; blur(src, imgBw, Size(5, 5)); cvtColor(imgBw, imgBw, CV_BGR2GRAY); Mat imgEdges; Canny(imgBw, imgEdges, 50, 100, 3); // imshow("bw", imgBw); // imshow("edges", imgEdges); std::vector<cv::Vec4i> lines; HoughLinesP(imgEdges, lines, 1, CV_PI / 180, min_threshold + p_trackbar, minLineLength, maxLineGap); // Expand the lines little bit (by scaleFactor) for (int i = 0; i < lines.size(); i++) { cv::Vec4i v = lines[i]; cv::Point2f p1 = Point2f(v[0], v[1]); cv::Point2f p2 = Point2f(v[2], v[3]); cv::Point2f p1p2 = p2 - p1; float length = norm(p1p2); cv::Point2f scaleP2 = p2 + p1p2 * (scaleFactor / 10.0f); cv::Point2f scaleP1 = p1 - p1p2 * (scaleFactor / 10.0f); lines[i][0] = scaleP1.x; lines[i][1] = scaleP1.y; lines[i][2] = scaleP2.x; lines[i][3] = scaleP2.y; } createThresholdedImg(src); // do line detection! detectCorners(lines); filterCorners(); findNeighbors(lines); findCornerMapping(mappedEdges); for (int i = 0; i < mappedEdges.size(); i++) { cout << (*mappedEdges[i]) << endl; } findFieldMatch(mappedEdges, H); if (imgDst.cols > 0) { // Draw lines for (int i = 0; i < lines.size(); i++) { cv::Vec4i v = lines[i]; cv::line(imgDst, cv::Point(v[0], v[1]), cv::Point(v[2], v[3]), cv::Scalar(0, 255, 0), 2); } // draw corners for (int i = 0; i < cornerBuffer.size(); i++) { cv::circle(imgDst, cornerBuffer.at(i), 1, cv::Scalar(255, 0, 0), 2); } // draw filtered corners for (int i = 0; i < detectedCorners.size(); i++) { circle(imgDst, detectedCorners[i]->point, (int) 20, Scalar(0, 255, 255), 1); } // draw detected corner coordinates for (int i = 0; i < detectedCorners.size(); i++) { stringstream ss; ss << detectedCorners[i]->point; putText(imgDst, ss.str(), detectedCorners[i]->point + Point2f(0, 10), FONT_HERSHEY_PLAIN, 1, Scalar(250, 0, 0)); } } }
void HoughTransformClass::findCircles(cv::Mat inputSource) { cv::Mat inputSource_gray;// = inputSource.clone(); cv::Mat cannyOutput; if( !inputSource.data ) { return; } std::cout << inputSource.rows << "::" << inputSource.cols << std::endl; std::cout << inputSource_gray.rows << "::" << inputSource_gray.cols << std::endl; /// Convert it to gray cv::cvtColor(inputSource, inputSource_gray, CV_BGR2GRAY ); cv::Canny(inputSource, cannyOutput, 100, 200, 3); //cv::blur(inputFrame, cannyOutput, cv::Size(3,3)); /// Reduce the noise so we avoid false circle detection cv::GaussianBlur( inputSource_gray, inputSource_gray, cv::Size(9, 9), 2,2 ); cv::GaussianBlur( cannyOutput, cannyOutput, cv::Size(9, 9), 5,5 ); std::vector<cv::Vec4i> lines; HoughLinesP(inputSource_gray, lines, 1, CV_PI/180, 100, 50, 10 ); for( size_t i = 0; i < lines.size(); i++ ) { cv::Vec4i l = lines[i]; line( inputSource, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(0,0,255), 3, CV_AA); } // std::vector<cv::Vec2f> lines; // HoughLines(inputSource_gray, lines, 1, CV_PI/180, 100, 0, 0 ); // for( size_t i = 0; i < lines.size(); i++ ) // { // float rho = lines[i][0], theta = lines[i][1]; // cv::Point pt1, pt2; // double a = cos(theta), b = sin(theta); // double x0 = a*rho, y0 = b*rho; // pt1.x = cvRound(x0 + 1000*(-b)); // pt1.y = cvRound(y0 + 1000*(a)); // pt2.x = cvRound(x0 - 1000*(-b)); // pt2.y = cvRound(y0 - 1000*(a)); // line( inputSource, pt1, pt2, cv::Scalar(0,0,255), 3, CV_AA); // } // std::vector<cv::Vec3f> circles; // /// Apply the Hough Transform to find the circles // HoughCircles( cannyOutput, circles, CV_HOUGH_GRADIENT, 1, inputSource_gray.rows/8, 30,50, 0, 0 ); // /// Draw the circles detected // for( size_t i = 0; i < circles.size(); i++ ) // { // cv::Point center(cvRound(circles[i][0]), cvRound(circles[i][1])); // int radius = cvRound(circles[i][2]); // // circle center // circle( inputSource, center, 3, cv::Scalar(0,255,0), -1, 8, 0 ); // // circle outline // circle( inputSource, center, radius, cv::Scalar(0,0,255), 3, 8, 0 ); // } /// Show your results cv::namedWindow( "Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE ); cv::imshow( "Hough Circle Transform Demo", inputSource ); cv::namedWindow( "canny Circle Transform Demo", CV_WINDOW_AUTOSIZE ); cv::imshow( "canny Circle Transform Demo", cannyOutput ); cv::waitKey(0); return ; }
void StairDetection::Probabilistic_Hough(cv::InputArray src, cv::OutputArray output) { HoughLinesP(src, output, 2, CV_PI / 720.0 * 1.0, min_Houghthreshold + houghThreshold, min_HoughLinelength, min_HoughLinegap); }
cv::Mat LineFilter::filter(cv::Mat src, int mode){ cv::Mat dst; //cv::Mat cdst = cv::Mat(src.clone()); cv::Mat cdst = src.clone(); Canny(src, dst, 50, 200, 3); cvtColor(dst, cdst, CV_GRAY2BGR); if (mode == 0){ std::vector<cv::Vec2f> lines; //detects lines HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 ); linesEq.clear(); float x1 = 0, x2 = 0, y1 = 0, y2 = 0; //draws the lines detected //println("-----------\n"); for( size_t i = 0; i < lines.size(); i++ ){ float rho = lines[i][0], theta = lines[i][1]; cv::Point pt1, pt2; double a = cos(theta), b = sin(theta); double x0 = a*rho, y0 = b*rho; pt1.x = cvRound(x0 + 1000*(-b)); pt1.y = cvRound(y0 + 1000*(a)); pt2.x = cvRound(x0 - 1000*(-b)); pt2.y = cvRound(y0 - 1000*(a)); x1 = pt1.x; y1 = pt1.y; x2 = pt2.x; y2 = pt2.y; //equation of line std::vector<float> eq; //y = mx+b //B MIGHT BE USELESS, NEED FURTHER TESTING bool safeMath = true; float M = 0, B = 0; if (x2-x1 < 5){ //straight line safeMath = false; M = INFINITY; B = INFINITY; } if (safeMath){ //avoid div by 0 error M = (y2-y1) / (x2-x1); B = y2 - M*x2; } bool repeat = false; //check if there is a similar line already for (std::vector<float> lines: linesEq){ //vert line situations if (M == INFINITY && lines[0] == INFINITY){ //check their x values if (std::abs(lines[2] - ((x1+x2)/2)) < maxDiff){ repeat = true; break; } } //check if m is almost vertical else if (std::abs(M) > maxSlope && lines[0] == INFINITY){ //std::cout<<"almost vert "; //std::cout<<std::abs(lines[2] - ((x1+x2)/2))<<std::endl; if (std::abs(lines[2] - ((x1+x2)/2) ) < maxDiff){ repeat = true; break; } } else if (M == INFINITY && std::abs(lines[0])> maxSlope){ //std::cout<<"almost vert II "; //std::cout<<std::abs(lines[2] - ((x1+x2)/2))<<std::endl; if (std::abs(lines[2] - ((x1+x2)/2) ) < maxDiff){ repeat = true; break; } } //check if m is too similar or not, b is too different to check else if (std::abs(lines[0] - M) < maxDiff){ if (M > 15){ //vertical lines //check if the intersection point is near the average x if (std::abs((B-lines[1])/(lines[0]-M))-(x1+x2)/2 < maxDiff){ repeat = true; break; } }else{ //horziontal lines //print("horz "); //println((y1+y2)/2); float x = (B-lines[1])/(lines[0]-M); float y = x * M + B; if (x < cdst.size().width && y < cdst.size().height){ repeat = true; break; } } } } if (!repeat){ eq.push_back(M); eq.push_back(B); //print(M); //print(" "); //print(B); //print(" "); //print((x1+x2)/2); if (std::abs(M) < 0.5){ //aprox horizontal line eq.push_back(y2); //give it the y value print(y2); print(" horz line"); } if (std::abs(M) > maxSlope){ //vertical line eq.push_back(x2); //x value print(x2); print(" vertal line"); } //println(""); linesEq.push_back(eq); line(cdst, pt1, pt2, cv::Scalar(0,0,255), 3, CV_AA); //drawing the line } } } else{ std::vector<cv::Vec4i> lines; HoughLinesP(dst, lines, 1, CV_PI/180, 50, 50, 10 ); for( size_t i = 0; i < lines.size(); i++ ){ cv::Vec4i l = lines[i]; line(cdst, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(0,0,255), 3, CV_AA); } } return cdst; }
int main (int argc, const char * argv[]) { clock_t start = clock(); const char* filename = argc >= 2 ? argv[1] : "images/road3.png"; cout << "running opencv with " << filename << endl; // create image matrix // loading image in non-grayscale causes an error Mat src = imread(filename, IMREAD_GRAYSCALE); if (src.empty()) { //help(); cout << "cannot open " << filename << endl; return -1; } // beginning time: clock_t canny_start = clock(); // create destination matrix Mat dst, cdst; // use Canny for edge-detection // source, destinaton, threshold1, threshold2, aperturesize=3, L2gradient=false blur(dst, dst, Size(3,3)); Canny(src, dst, CANNY_T1, CANNY_T2, CANNY_APERTURE); cvtColor(dst, cdst, COLOR_GRAY2RGB); // time of canny clock_t canny_end = clock(); double canny_time = (double)(canny_end-canny_start)/CLOCKS_PER_SEC; // ================ PROBABILISTIC HOUGH LINE TRANSFORM ================== // creates line segments // dst: edge-detector output (should be grayscale) // lines: vector to store lines found; // rho: resolution of parameter r in pixels (using 1) // theta: resolution of parameter theta in radians (using 1 degree) // threshold: The minimum number of intersections to “detect” a line // minLinLength: The minimum number of points that can form a line. Lines with less than this number of points are disregarded. // maxLineGap: The maximum gap between two points to be considered in the same line. clock_t hough_start = clock(); vector<Vec4i> lines; HoughLinesP(dst, lines, 1, CV_PI/180, HLINES_THRESH, HLINES_MINLINE, HLINES_MINGAP); // filter out horizontal lines remove_horizontal(&lines); remove_skylines(&lines, dst.rows); // time of HoughLinesP() clock_t hough_end = clock(); double hough_time = (double)(hough_end-hough_start)/CLOCKS_PER_SEC; // -------------------------- clock_t lines_start = clock(); vector<Vec4i> lane_lines = combine_lines(lines); lane_lines = extend_lines(lane_lines, dst.cols, dst.rows); // time of lane_lines, extend_lines() clock_t lines_end = clock(); double lines_time = (double)(lines_end-lines_start)/CLOCKS_PER_SEC; // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=- //cout << "size of lines: " << lines.size() << endl; //cout << "size of lane_lines: " << lane_lines.size() << endl; //cout << "width: " << dst.cols << " height: " << dst.rows << endl; //line(cdst, Point(0,0), Point(100,100), Scalar(255,255,255), 2, CV_AA); // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=- clock_t draw_start = clock(); // display result: for( size_t i = 0; i < lines.size(); i++ ) { Vec4i l = lines[i]; //line( cdst, Point(l[X1], l[Y1]), Point(l[X2], l[Y2]), Scalar(255,0,0), 1, CV_AA); // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=- //cout << i << " (" << l[X1] << "," << l[Y1] << ") \t(" << l[X2] << "," << l[Y2] << ")" << endl; // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=- } //cout << "------" << endl; // display "lane lines" for( size_t i = 0; i < lane_lines.size(); i++ ) { Vec4i l = lane_lines[i]; line( cdst, Point(l[X1], l[Y1]), Point(l[X2], l[Y2]), Scalar(0,255,255), 2, LINE_AA); // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=- //cout << i << " (" << l[X1] << "," << l[Y1] << ") \t(" << l[X2] << "," << l[Y2] << ")" << endl; // -=-=-=-=-=-=-=-=-=-=-=-=- DEBUGGING -=-=-=-=-=-=-=-=-=-=-=-=- } cout << endl; // depending on # of lines, draw either one or two lanes if (lane_lines.size() > 2) cdst = draw_2lanes(cdst, lane_lines); else cdst = draw_1lane(cdst, lane_lines); // time for drawing lines clock_t draw_end = clock(); double draw_time = (double)(draw_end-draw_start)/CLOCKS_PER_SEC; // -------------------------- clock_t image_start = clock(); // create output image: .png file vector<int> compression_params; compression_params.push_back(IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); // 0-9 for png quality imwrite("images/output.png", cdst, compression_params); // time for generating the image and total time clock_t end = clock(); double image_time = (double)(end-image_start)/CLOCKS_PER_SEC; double total_time = (double)(end-start)/CLOCKS_PER_SEC; // -------------------------- // display time results: cout << "canny time: " << canny_time << " s" << endl; cout << "hough time: " << hough_time << " s" << endl; cout << "lines time: " << lines_time << " s" << endl; cout << "draw time: " << draw_time << " s" << endl; cout << "img time: " << image_time << " s" << endl; cout << "TOTAL TIME: " << total_time << " s" << endl; cout << "\ndone" << endl; return 0; }
static double doLineDetection(cv::Mat img) { const float MAX_ANGLE = 1.5; const float GRADIENT_MATCH_ERROR = 0.3; //1 radian error margin const float CIRC_RADIUS = 1; const int CANNY_KERNEL_SIZE = 3; //apply a Canny filter so we get edges of lines cv::Mat cannyImg = img; //cv::Canny(img, cannyImg, 50, 200, CANNY_KERNEL_SIZE); #ifdef DEBUG cv::imshow("canny", cannyImg); #endif std::vector<cv::Vec4i> lines; HoughLinesP(cannyImg, lines, RESOLUTION_PX, RESOLUTION_DEG, MIN_THRESHOLD, MIN_LINE_LENGTH, MAX_LINE_GAP); #ifdef DEBUG cv::Mat debugImg; cv::cvtColor(cannyImg, debugImg, CV_GRAY2RGB); //draw all the lines and display for( size_t i = 0; i < lines.size(); i++ ) { cv::Vec4i l = lines[i]; cv::line(debugImg, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(0,0,255), 3, CV_AA); } unsigned char lineColour = 0; #endif //calculate the angles of the lines std::vector<double> angles; std::vector<cv::Vec4i> goodLines; for(size_t i =0; i < lines.size(); i++) { const cv::Vec4i l = lines[i]; //gradient is (y1-y2)/(x1-x2) double angle = gradient(l); #ifdef DEBUG ROS_INFO("%f gradient", angle); #endif if (fabs(angle) < MAX_ANGLE) { angles.push_back(angle); goodLines.push_back(l); #ifdef DEBUG lineColour+=5; cv::line(debugImg, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(lineColour,255,0), 3, CV_AA); #endif } } #ifdef DEBUG cv::imshow("lines", debugImg); ROS_INFO("Next img"); #endif if(angles.size() > 0) { std::vector<double> chosenAngles; for(int i = 0; i < angles.size(); ++i) { for(int j = 0; j < angles.size(); ++j) { if(fabs(angles[i] - angles[j]) < GRADIENT_MATCH_ERROR) { bool found = false; cv::Vec4i goodLine; //there are four possible lines, we want the one with the matching gradient (if it exists) for(int k = 0; k < 2; ++k) { for(int m = 0; m < 2; ++m) { cv::Vec4i l1(goodLines[i][k*2], goodLines[i][(k*2)+1], goodLines[j][m*2], goodLines[j][(m*2)+1]); if(fabs(angles[i] - gradient(l1)) < GRADIENT_MATCH_ERROR) { found = true; goodLine = l1; break; } } } if(!found) { ROS_INFO("Line not valid"); } else { //check if the line passes through the center (ish) //use the shortest distance from point to line formula //http://math.stackexchange.com/questions/275529/check-if-line-intersects-with-circles-perimeter const int centerY = cannyImg.size().height/2; const int centerX = cannyImg.size().height/2; const int numerator = fabs( (goodLine[2] - goodLine[0])*centerX + (goodLine[1] - goodLine[3])*centerY + (goodLine[0] - goodLine[2])*goodLine[1] + (goodLine[3] - goodLine[1])*goodLine[0] ); const int denominator = sqrt( pow(goodLine[2] - goodLine[0], 2) + pow(goodLine[1] - goodLine[3], 2) ); if((numerator/denominator) <= CIRC_RADIUS) { chosenAngles.push_back(gradient(goodLine)); ROS_INFO("Good line found m: %f, x1: %d, y1 %d, x2 %d, y2 %d", angles[i], goodLine[0], goodLine[1], goodLine[2], goodLine[3]); #ifdef DEBUG cv::line(debugImg, cv::Point(goodLine[0], goodLine[1]), cv::Point(goodLine[2], goodLine[3]), cv::Scalar(255,0,0), 3, CV_AA); #endif } } } #ifdef DEBUG cv::imshow("good lines", debugImg); #endif } } #ifdef DEBUG_WAIT cv::waitKey(); #endif if(chosenAngles.size() > 0) { double minGradient = chosenAngles[0]; for(int i = 0; i < chosenAngles.size(); ++i) { if(fabs(minGradient) > fabs(chosenAngles[i]) ){ minGradient = chosenAngles[i]; } } return minGradient; } else { ROS_ERROR("No Good lines found"); return std::numeric_limits<double>::infinity() * -1; } } else { #ifdef DEBUG_WAIT cv::waitKey(); #endif ROS_ERROR("No lines found"); return std::numeric_limits<double>::infinity(); } }
void CAutomaticBreakingDeviceTABFallOffDetector::DetectError(vector<SErrorInfoArray>& errorInfoArray, vector<Mat>& resultImageArray, /*vector<PositionOfComponents>& positionOfComponents, */int statues /*= 0*/) { //将车厢的方向统一 flip(m_srcImageArray[2], m_srcImageArray[2], 1); flip(m_srcImageArray[3], m_srcImageArray[3], 1); flip(resultImageArray[m_interstedIndex[2]], resultImageArray[m_interstedIndex[2]], 1); flip(resultImageArray[m_interstedIndex[3]], resultImageArray[m_interstedIndex[3]], 1); int count = 0; //找到的脱轨自动制动装置拉环的个数 int flag[4] = {0,0,0,0}; for (int i = 0; i <4; ++i) { Rect roiRect; roiRect.x = 0; roiRect.y = m_srcImageArray[i].rows/5; roiRect.width = m_srcImageArray[i].cols/4+60; roiRect.height = m_srcImageArray[i].rows/5*3; float maxMatchVal = -1.0; Point matchPoint; int templateIndex = 0; for (int j = 0; j != m_templateImageArray.size(); ++j) { Mat compareResult; //匹配结果矩阵 //定义查找移动杠杆的ROI Mat searchTruckLiveLever_ROI; if (!IsValidRectInMat(m_srcImageArray[i], roiRect)) { continue; } searchTruckLiveLever_ROI = m_srcImageArray[i](roiRect); compareResult.create(searchTruckLiveLever_ROI.rows - m_templateImageArray[j].rows + 1, searchTruckLiveLever_ROI.cols - m_templateImageArray[j].cols + 1, CV_32FC1); matchTemplate(searchTruckLiveLever_ROI, m_templateImageArray[j], compareResult, CV_TM_CCOEFF_NORMED); //模板匹配 double minVal,maxVal; //矩阵中最小值,最大值 Point minLoc; //最小值的坐标 Point matchLoc; //最匹配值的坐标 minMaxLoc(compareResult, &minVal, &maxVal, &minLoc, &matchLoc, cv::Mat()); //寻找最匹配的点 if (maxVal > maxMatchVal) { maxMatchVal = maxVal; matchPoint.x = matchLoc.x + roiRect.x; matchPoint.y =roiRect.y+matchLoc.y; templateIndex = j; } } //如果最高匹配值大于0.6则视为匹配 if (maxMatchVal >= 0.45) { ++count; flag[i] = 1; if (statues == 1) { Mat ROI= m_srcImageArray[i](cv::Rect(matchPoint.x, matchPoint.y, m_templateImageArray[templateIndex].cols, m_templateImageArray[templateIndex].rows)); //抠出夹扣螺栓大概位置 Mat ROICanny= m_srcImageArray[i](cv::Rect(matchPoint.x, matchPoint.y, m_templateImageArray[templateIndex].cols, m_templateImageArray[templateIndex].rows)).clone(); Canny(ROICanny, ROICanny,20, 120); vector<Vec4i> lines; HoughLinesP(ROICanny,lines,1,CV_PI/180,10,30, 150); std::vector<cv::Vec4i>::const_iterator it= lines.begin(); if (lines.size()==0) { } else { double tanb,tanc,tanResult,tanResult2,tanResult3; tanc=0; tanb=0; while (it!=lines.end()) { cv::Point pt1((*it)[0]+matchPoint.x,(*it)[1]+matchPoint.y); cv::Point pt2((*it)[2]+matchPoint.x,(*it)[3]+matchPoint.y); double m_i=(*it)[0]-(*it)[2]; double m_i2=abs(m_i); if(m_i2>tanc){ tanc=m_i2; tanb=abs((*it)[1]-(*it)[3]); } ++it; } tanResult=tanb/tanc; tanResult2 = atan(tanResult); tanResult3=tanResult2*180.0/3.141592654; if(tanResult3<=30){ } else { SErrorInfoArray sei; sei.errorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_FALLOFF; sei.realErrorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_FALLOFF; sei.errorImageFile = m_srcImageFileArray[i]; switch (i) { case 0: sei.leftPos = 0; sei.rightPos = 300; sei.topPos = 500; sei.bottomPos = 800; sei.confidence = 61; sei.errorImageIndex = m_interstedIndex[0]; rectangle(resultImageArray[m_interstedIndex[0]], Point(0,500),Point(300,800),Scalar(0,0,255),2); break; case 1: sei.leftPos = 1100; sei.rightPos = 1399; sei.topPos = 500; sei.bottomPos = 900; sei.confidence = 67; sei.errorImageIndex = m_interstedIndex[1]; rectangle(resultImageArray[m_interstedIndex[0]], Point(1100,500),Point(1399,900),Scalar(0,0,255),2); break; case 2: sei.leftPos = 1100; sei.rightPos = 1399; sei.topPos = 500; sei.bottomPos = 900; sei.confidence = 62; sei.errorImageIndex = m_interstedIndex[2]; rectangle(resultImageArray[m_interstedIndex[0]], Point(0,500),Point(300,800),Scalar(0,0,255),2); break; case 3: sei.leftPos = 0; sei.rightPos = 300; sei.topPos = 500; sei.bottomPos = 800; sei.confidence = 63; sei.errorImageIndex = m_interstedIndex[3]; rectangle(resultImageArray[m_interstedIndex[0]], Point(1100,500),Point(1399,900),Scalar(0,0,255),2); break; } errorInfoArray.push_back(sei); continue; } } } } } if (count < 2 && statues == 0) { if (flag[0] == 1 || flag[2] == 1) { if (flag[0] == 0) { SErrorInfoArray sei; sei.errorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_LOST; sei.realErrorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_LOST; sei.errorImageFile = m_srcImageFileArray[0]; sei.leftPos = 0; sei.rightPos = 300; sei.topPos = 500; sei.bottomPos = 800; sei.confidence = 61; sei.errorImageIndex = m_interstedIndex[0]; rectangle(resultImageArray[m_interstedIndex[0]], Point(0,500),Point(300,800),Scalar(0,0,255),2); sei.errorImageIndex = m_interstedIndex[0]; errorInfoArray.push_back(sei); } if (flag[2] == 0) { SErrorInfoArray sei; sei.errorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_LOST; sei.realErrorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_LOST; sei.errorImageFile = m_srcImageFileArray[1]; sei.leftPos = 1100; sei.rightPos = 1399; sei.topPos = 500; sei.bottomPos = 900; sei.errorImageIndex = m_interstedIndex[2]; rectangle(resultImageArray[m_interstedIndex[0]], Point(0,500),Point(300,800),Scalar(0,0,255),2); sei.confidence = 67; sei.errorImageIndex = m_interstedIndex[1]; errorInfoArray.push_back(sei); } } else { if (flag[1] == 0) { SErrorInfoArray sei; sei.errorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_LOST; sei.realErrorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_LOST; sei.errorImageFile = m_srcImageFileArray[2]; sei.leftPos = 1100; sei.rightPos = 1399; sei.topPos = 500; sei.bottomPos = 900; sei.errorImageIndex = m_interstedIndex[1]; rectangle(resultImageArray[m_interstedIndex[0]], Point(1100,500),Point(1399,900),Scalar(0,0,255),2); sei.confidence = 62; sei.errorImageIndex = m_interstedIndex[2]; errorInfoArray.push_back(sei); } if (flag[3] == 0) { SErrorInfoArray sei; sei.errorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_LOST; sei.realErrorMask |= DEMO_ERROR_DERAILMENT_AUTOMATIC_BREAKING_DEVICE_TAB_LOST; sei.errorImageFile = m_srcImageFileArray[3]; sei.leftPos = 0; sei.rightPos = 300; sei.topPos = 500; sei.bottomPos = 800; sei.errorImageIndex = m_interstedIndex[3]; rectangle(resultImageArray[m_interstedIndex[0]], Point(1100,500),Point(1399,900),Scalar(0,0,255),2); sei.confidence = 63; sei.errorImageIndex = m_interstedIndex[3]; errorInfoArray.push_back(sei); } } } //将方向调回原来的方向 flip(m_srcImageArray[2], m_srcImageArray[2], 1); flip(m_srcImageArray[3], m_srcImageArray[3], 1); flip(resultImageArray[m_interstedIndex[2]], resultImageArray[m_interstedIndex[2]], 1); flip(resultImageArray[m_interstedIndex[3]], resultImageArray[m_interstedIndex[3]], 1); }
int main(int argc, char** argv) { int height ,width ,step ,channels; int same, lighter; float thresh; uchar *dataB, *dataG, *dataR, *dataGray, *dataD; uchar b1, g1, r1, b2, g2, r2; int w = 3; int th = 50; int idx1, idx2; cv::Mat img = cv::imread(argv[1]); height = img.rows; width = img.cols; cv::namedWindow("Image0", cv::WINDOW_NORMAL); cv::Mat textImg(1000, 1200, CV_8UC1, cv::Scalar(255)); cv::putText(textImg, "Original Image:", cv::Point(400, 500), cv::FONT_HERSHEY_SIMPLEX, 2, cv::Scalar(0, 0, 0)); //cv::imshow("Image0", textImg); //cv::waitKey(); //cv::imshow("Image0", img); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Apply SUSAN algorithm to detect edge and cross.", cv::Point(200, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); std::vector<cv::Mat> imgChannels; cv::split(img, imgChannels); cv::Mat dstSusan(height, width, CV_8UC1, cv::Scalar(0)); cv::Mat grayImg(height, width, CV_8UC1, cv::Scalar(0)); step = imgChannels[0].step[0]; dataB = imgChannels[0].data; dataG = imgChannels[1].data; dataR = imgChannels[2].data; dataGray = grayImg.data; dataD= dstSusan.data; for (int x = w; x < width-w; x++) { for (int y = w; y < height-w; y++) { same = 0; idx1 = x + y * step; b1 = dataB[idx1]; g1 = dataG[idx1]; r1 = dataR[idx1]; for (int u = 0; u < w+1; u++) { for (int v = 0; v < w+1; v++) { if (u + v == 0) { continue; } idx2 = (x+u) + (y+v) * step; b2 = dataB[idx2]; g2 = dataG[idx2]; r2 = dataR[idx2]; if (calc_dist(b1, g1, r1, b1, g2, r2) < th) { same += 1; } idx2 = (x-u) + (y+v) * step; b2 = dataB[idx2]; g2 = dataG[idx2]; r2 = dataR[idx2]; if (u != 0 && calc_dist(b1, g1, r1, b1, g2, r2) < th) { same += 1; } idx2 = (x+u) + (y-v) * step; b2 = dataB[idx2]; g2 = dataG[idx2]; r2 = dataR[idx2]; if (v != 0 && calc_dist(b1, g1, r1, b1, g2, r2) < th) { same += 1; } idx2 = (x-u) + (y-v) * step; b2 = dataB[idx2]; g2 = dataG[idx2]; r2 = dataR[idx2]; if (u != 0 && v != 0 && calc_dist(b1, g1, r1, b1, g2, r2) < th) { same += 1; } } } dataD[idx1] = uchar(255.0 * float(same) / ((2*w+1) * (2*w+1) - 1)); if (dataD[idx1] < 128) { dataD[idx1] = 255; } else { dataD[idx1] = 0; } } } //cv::imshow("Image0", dstSusan); cv::imwrite("outimg_1.jpg", dstSusan); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Apply Hough algorithm to detect lines.", cv::Point(300, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::waitKey(); //cv::imshow("Image0", textImg); //cv::waitKey(); //Hough line detection std::vector<cv::Vec4i> lines; HoughLinesP(dstSusan, lines, 1, CV_PI/180, 80, 500, 20); double thetaSum = 0.0; int thetaNum = 0; double theta; for(size_t i = 0; i < lines.size(); i++) { cv::Vec4i l = lines[i]; cv::line(img, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(186,88,255), 1, CV_AA); if (l[0] == l[2]) { theta = CV_PI / 2; } else { theta = std::atan(-double(l[3]-l[1]) / (l[2] - l[0])); } if (theta >= -CV_PI / 4 && theta <= CV_PI / 4) { thetaSum += theta; thetaNum += 1; } } theta = -thetaSum / thetaNum * 180 / CV_PI; //cv::imshow("Image0", img); cv::imwrite("outimg_2.jpg", img); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); std::ostringstream textStr; textStr << "Find " << lines.size() << " lines."; cv::putText(textImg, textStr.str(), cv::Point(500, 400), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); textStr.str(std::string()); textStr.clear(); textStr << "Rotating angle is " << theta << " degree."; cv::putText(textImg, textStr.str(), cv::Point(350, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Next: Rotating the image.", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 700), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); img.release(); img = cv::imread(argv[1]); imgChannels[0].release(); imgChannels[1].release(); imgChannels[2].release(); imgChannels.clear(); cv::Mat rotateImg(height, width, CV_8UC3); cv::Point2f center; center.x = float(width / 2.0 + 0.5); center.y = float(height / 2.0 + 0.5); cv::Mat affineMat = getRotationMatrix2D(center, theta, 1); cv::warpAffine(img,rotateImg, affineMat, cv::Size(width, height), CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS); //cv::imshow("Image0", rotateImg); cv::imwrite("outimg_3.jpg", rotateImg); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Transform the image to gray scale.", cv::Point(300, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); cv::split(rotateImg, imgChannels); dataB = imgChannels[0].data; dataG = imgChannels[1].data; dataR = imgChannels[2].data; step = imgChannels[0].step[0]; //imgChannels[2].setTo(cv::Scalar(0)); for (int x = 0; x < rotateImg.cols; x++) { for (int y = 0; y < rotateImg.rows; y++) { int idx = x + y * step; if (dataB[idx] < dataG[idx] && dataB[idx] < dataR[idx]) { dataG[idx] = dataB[idx]; dataR[idx] = dataB[idx]; } if (dataG[idx] < dataB[idx] && dataG[idx] < dataR[idx]) { dataB[idx] = dataG[idx]; dataR[idx] = dataG[idx]; } if (dataR[idx] < dataB[idx] && dataR[idx] < dataG[idx]) { dataB[idx] = dataR[idx]; dataG[idx] = dataR[idx]; } } } cv::Mat filterRedImg(rotateImg.rows, rotateImg.cols, CV_8UC3, cv::Scalar::all(255)); cv::merge(imgChannels, filterRedImg); cv::cvtColor(filterRedImg, grayImg, CV_BGR2GRAY); //cv::imshow("Image0", grayImg); cv::imwrite("outimg_4.jpg", grayImg); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Clean the noise.", cv::Point(450, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); step = grayImg.step[0]; for (int x = 0; x < width; x++) { for (int y = 0; y < height; y++) { int idx = x + y * step; if (grayImg.data[idx] > 100) //if(!is_gray(dataB[idx], dataG[idx], dataR[idx])) { grayImg.data[idx] = 255; } } } //cv::imshow("Image0", grayImg); cv::imwrite("outimg_5.jpg", grayImg); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Digitizing the curves.", cv::Point(400, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); cv::Mat newImg(height, width, CV_8UC3, cv::Scalar::all(255)); SegmentFactory segFactory = SegmentFactory(0); std::vector<Segment *> segments; segFactory.makeSegments(grayImg, segments); std::vector<Segment *>::iterator itr; for (itr = segments.begin(); itr != segments.end(); itr++) { Segment *seg = *itr; std::vector<SegmentLine *>::iterator itr_l; for (itr_l = seg->m_lines.begin(); itr_l != seg->m_lines.end(); itr_l++) { SegmentLine *line = *itr_l; cv::line(newImg, cv::Point(line->m_x1, line->m_y1), cv::Point(line->m_x2, line->m_y2), cv::Scalar(186,88,255), 1, CV_AA); std::cout << line->m_x1 << ", " << line->m_y1 << ", " << line->m_x2 << ", " << line->m_y2 << std::endl; } } //cv::imshow("Image0", newImg); cv::imwrite("outimg_6.jpg", newImg); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Done.", cv::Point(550, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); return 0; }