/** Main function*/ int main(int argc, char** argv) { Vec2f Fu, Fv; // Images cv::Mat inputImg, imgGRAY; cv::Mat outputImg; // Other variables cv::VideoCapture video; cv::Size procSize; char *videoFileName = 0; char *imageFileName = 0; int procWidth = -1; int procHeight = -1; int numVps = 2; int numFramesCalib = 40; int houghThreshold = 140; bool useCamera = true; bool playMode = true; bool stillImage = false; bool stillVideo = false; bool manual = false; //variable to print a trajectory //vector<Vec2f> trajectories; //readPoitsFile("trajectories.txt", &trajectories); // Parse arguments for(int i=1; i<argc; i++){ const char* s = argv[i]; if(strcmp(s, "-video" ) == 0){ // Input video is a video file videoFileName = argv[++i]; useCamera = false; } else if(strcmp(s,"-image") == 0){ // Input is a image file imageFileName = argv[++i]; stillImage = true; useCamera = false; } else if(strcmp(s, "-resizedWidth") == 0){ procWidth = atoi(argv[++i]); } else if(strcmp(s, "-still" ) == 0){ const char* ss = argv[++i]; if(strcmp(ss, "ON") == 0 || strcmp(ss, "on") == 0 || strcmp(ss, "TRUE") == 0 || strcmp(ss, "true") == 0 || strcmp(ss, "YES") == 0 || strcmp(ss, "yes") == 0 ) stillVideo = true; } else if(strcmp(s, "-manual" ) == 0){ const char* ss = argv[++i]; if(strcmp(ss, "ON") == 0 || strcmp(ss, "on") == 0 || strcmp(ss, "TRUE") == 0 || strcmp(ss, "true") == 0 || strcmp(ss, "YES") == 0 || strcmp(ss, "yes") == 0 ) manual = true; } else if(strcmp(s, "-play" ) == 0){ const char* ss = argv[++i]; if(strcmp(ss, "OFF") == 0 || strcmp(ss, "off") == 0 || strcmp(ss, "FALSE") == 0 || strcmp(ss, "false") == 0 || strcmp(ss, "NO") == 0 || strcmp(ss, "no") == 0 || strcmp(ss, "STEP") == 0 || strcmp(ss, "step") == 0) playMode = false; } else if(strcmp(s, "-houghThreshold") == 0){ houghThreshold = atoi(argv[++i]); } else if(strcmp(s, "-help" ) == 0){ help(); } } // Open video input if(useCamera) video.open(0); else{ if(!stillImage) video.open(videoFileName); } // Check video input int width = 0, height = 0, fps = 0, fourcc = 0; if(!stillImage){ if( !video.isOpened() ){ printf("ERROR: can not open camera or video file\n"); return -1; } else{ // Show video information width = (int) video.get(CV_CAP_PROP_FRAME_WIDTH); height = (int) video.get(CV_CAP_PROP_FRAME_HEIGHT); fps = (int) video.get(CV_CAP_PROP_FPS); fourcc = (int) video.get(CV_CAP_PROP_FOURCC); if(!useCamera) printf("Input video: (%d x %d) at %d fps, fourcc = %d\n", width, height, fps, fourcc); else printf("Input camera: (%d x %d) at %d fps\n", width, height, fps); } } else{ inputImg = cv::imread(imageFileName); if(inputImg.empty()) return -1; width = inputImg.cols; height = inputImg.rows; printf("Input image: (%d x %d)\n", width, height); playMode = false; } // Resize if(procWidth != -1){ procHeight = height*((double)procWidth/width); procSize = cv::Size(procWidth, procHeight); printf("Resize to: (%d x %d)\n", procWidth, procHeight); } else procSize = cv::Size(width, height); // Create and init MSAC MSAC msac; msac.init(procSize); //create mouse structs mouseDataVP mdVP; mdVP.uDone = false; mdVP.clicked = false; mouseDataCrop mdCrop; Vec4f previousVP; Vec4f averageVP; Vec4f vp; vector<Vec4f> vpVector; vector<Vec4f> stillVPS; //Mat top(1000,1000,CV_8UC3); int frameNum=0; for(;;) { if(!stillImage) { frameNum++; // Get current image video >> inputImg; } if(inputImg.empty()) break; // Resize to processing size cv::resize(inputImg, inputImg, procSize); // Color Conversion if(inputImg.channels() == 3) { cv::cvtColor(inputImg, imgGRAY, CV_BGR2GRAY); inputImg.copyTo(outputImg); } else { inputImg.copyTo(imgGRAY); cv::cvtColor(inputImg, outputImg, CV_GRAY2BGR); } //////////////////////////// // Process //////////////////////////// //get third frame for manual calibration if(manual && frameNum == 3){ mdVP.image = inputImg.clone(); vp = manualCalibration(&mdVP); } //add frame to average vps if(!manual && stillVideo && frameNum < numFramesCalib){ vp = automaticCalibration(msac, numVps, imgGRAY, outputImg, houghThreshold); if (validVPS(vp)) stillVPS.push_back(vp); } //average vps if (!manual && stillVideo && frameNum == numFramesCalib) { for (int i = 0; i < stillVPS.size(); i++) { vp += stillVPS[i]; } vp /= (int)stillVPS.size(); if (!useCamera) { video.open(videoFileName); continue; } } //automatic calibration if (!manual && !stillVideo) vp = automaticCalibration(msac, numVps, imgGRAY, outputImg, houghThreshold); if (!stillVideo && vpVector.size() < 30) vpVector.push_back(vp); else if(!stillVideo){ vpVector.erase(vpVector.begin()); vpVector.push_back(vp); averageVP = Vec4f(0,0,0,0); for (int i = 0; i < 30; i++) { averageVP += vpVector[i]; } averageVP /= 30; vp = averageVP; } //avoid vp swap position if (frameNum != 0 && pointDistance(Vec2f(previousVP[0], previousVP[1]), Vec2f(vp[0],vp[1])) > pointDistance(Vec2f(previousVP[0], previousVP[1]), Vec2f(vp[2],vp[3])) && pointDistance(Vec2f(previousVP[2], previousVP[3]), Vec2f(vp[2],vp[3])) > pointDistance(Vec2f(previousVP[2], previousVP[3]), Vec2f(vp[0],vp[1]))){ Vec4f temp(vp); vp[0] = vp[2]; vp[1] = vp[3]; vp[2] = temp[0]; vp[3] = temp[1]; } previousVP = Vec4f(vp); //top view calculation if (validVPS(vp)){ Fu = Vec2f(vp[0], vp[1]); Fv = Vec2f(vp[2], vp[3]); TopView tv(inputImg, Fu, Fv, &mdCrop); tv.drawAxis(outputImg, Point(0,0)); tv.generateTopImage(); //allows to crop top view tv.cropTopView(); tv.setOrigin(Vec2f(444,325)); tv.setScaleFactor(Vec2f(444,325), Vec2f(505, 149), 5.0); //Vec2f point = tv.toGroundPlaneCoord(Vec2f(464, 268)); /*vector<Vec2f> b; b = tv.toTopViewCoordinates(trajectories); for (int k = 0; k < b.size(); k++) { circle(tv.topImage, Point(b[k][0], b[k][1]), 2, Scalar(255,0,0)); } for (int k = 0; k < trajectories.size(); k++) { Vec2f a = tv.toGroundPlaneCoord(trajectories[k]); circle(top, Point(a[0] * 100 + 100, a[1] * 100), 2, Scalar(255,0,0)); }*/ //imshow("Plane Coord", top); imshow( "Top View", tv.topImage); //top = Scalar(0,0,0); } imshow("Original", outputImg); if(playMode) cv::waitKey(1); else cv::waitKey(0); char q = (char)waitKey(1); if( q == 27 ){ printf("\nStopped by user request\n"); break; } if(stillImage) break; }
/** Main function*/ int main(int argc, char** argv) { // Images cv::Mat inputImg, imgGRAY; cv::Mat outputImg; // Other variables char *videoFileName = 0; char *imageFileName = 0; cv::VideoCapture video; bool useCamera = true; int mode = MODE_NIETO; int numVps = 1; bool playMode = true; bool stillImage = false; bool verbose = false; int procWidth = -1; int procHeight = -1; cv::Size procSize; // Start showing help help(); // Parse arguments if(argc < 2) return -1; for(int i=1; i<argc; i++) { const char* s = argv[i]; if(strcmp(s, "-video" ) == 0) { // Input video is a video file videoFileName = argv[++i]; useCamera = false; } else if(strcmp(s,"-image") == 0) { // Input is a image file imageFileName = argv[++i]; stillImage = true; useCamera = false; } else if(strcmp(s, "-resizedWidth") == 0) { procWidth = atoi(argv[++i]); } else if(strcmp(s, "-verbose" ) == 0) { const char* ss = argv[++i]; if(strcmp(ss, "ON") == 0 || strcmp(ss, "on") == 0 || strcmp(ss, "TRUE") == 0 || strcmp(ss, "true") == 0 || strcmp(ss, "YES") == 0 || strcmp(ss, "yes") == 0 ) verbose = true; } else if(strcmp(s, "-play" ) == 0) { const char* ss = argv[++i]; if(strcmp(ss, "OFF") == 0 || strcmp(ss, "off") == 0 || strcmp(ss, "FALSE") == 0 || strcmp(ss, "false") == 0 || strcmp(ss, "NO") == 0 || strcmp(ss, "no") == 0 || strcmp(ss, "STEP") == 0 || strcmp(ss, "step") == 0) playMode = false; } else if(strcmp(s, "-mode" ) == 0) { const char* ss = argv[++i]; if(strcmp(ss, "LS") == 0) mode = MODE_LS; else if(strcmp(ss, "NIETO") == 0) mode = MODE_NIETO; else { perror("ERROR: Only LS or NIETO modes are supported\n"); } } else if(strcmp(s,"-numVps") == 0) { numVps = atoi(argv[++i]); } } // Open video input if( useCamera ) video.open(0); else { if(!stillImage) video.open(videoFileName); } // Check video input int width = 0, height = 0, fps = 0, fourcc = 0; if(!stillImage) { if( !video.isOpened() ) { printf("ERROR: can not open camera or video file\n"); return -1; } else { // Show video information width = (int) video.get(CV_CAP_PROP_FRAME_WIDTH); height = (int) video.get(CV_CAP_PROP_FRAME_HEIGHT); fps = (int) video.get(CV_CAP_PROP_FPS); fourcc = (int) video.get(CV_CAP_PROP_FOURCC); if(!useCamera) printf("Input video: (%d x %d) at %d fps, fourcc = %d\n", width, height, fps, fourcc); else printf("Input camera: (%d x %d) at %d fps\n", width, height, fps); } } else { inputImg = cv::imread(imageFileName); if(inputImg.empty()) return -1; width = inputImg.cols; height = inputImg.rows; printf("Input image: (%d x %d)\n", width, height); playMode = false; } // Resize if(procWidth != -1) { procHeight = height*((double)procWidth/width); procSize = cv::Size(procWidth, procHeight); printf("Resize to: (%d x %d)\n", procWidth, procHeight); } else procSize = cv::Size(width, height); // Create and init MSAC MSAC msac; msac.init(mode, procSize, verbose); int frameNum=0; for( ;; ) { if(!stillImage) { printf("\n-------------------------\nFRAME #%6d\n", frameNum); frameNum++; // Get current image video >> inputImg; } if( inputImg.empty() ) break; // Resize to processing size cv::resize(inputImg, inputImg, procSize); // Color Conversion if(inputImg.channels() == 3) { cv::cvtColor(inputImg, imgGRAY, CV_BGR2GRAY); inputImg.copyTo(outputImg); } else { inputImg.copyTo(imgGRAY); cv::cvtColor(inputImg, outputImg, CV_GRAY2BGR); } // ++++++++++++++++++++++++++++++++++++++++ // Process // ++++++++++++++++++++++++++++++++++++++++ processImage(msac, numVps, imgGRAY, outputImg); // View imshow("Output", outputImg); if(playMode) cv::waitKey(1); else cv::waitKey(0); char q = (char)waitKey(1); if( q == 27 ) { printf("\nStopped by user request\n"); break; } if(stillImage) break; }
/** This function contains the actions performed for each image*/ void processImage(MSAC &msac, int numVps, cv::Mat &imgGRAY, cv::Mat &outputImg) { cv::Mat imgCanny; // Canny cv::Canny(imgGRAY, imgCanny, 180, 120, 3); // Hough vector<vector<cv::Point> > lineSegments; vector<cv::Point> aux; #ifndef USE_PPHT vector<Vec2f> lines; cv::HoughLines( imgCanny, lines, 1, CV_PI/180, 200); for(size_t i=0; i< lines.size(); i++) { float rho = lines[i][0]; float theta = lines[i][1]; double a = cos(theta), b = sin(theta); double x0 = a*rho, y0 = b*rho; Point pt1, pt2; pt1.x = cvRound(x0 + 1000*(-b)); pt1.y = cvRound(y0 + 1000*(a)); pt2.x = cvRound(x0 - 1000*(-b)); pt2.y = cvRound(y0 - 1000*(a)); aux.clear(); aux.push_back(pt1); aux.push_back(pt2); lineSegments.push_back(aux); line(outputImg, pt1, pt2, CV_RGB(0, 0, 0), 1, 8); } #else vector<Vec4i> lines; int houghThreshold = 70; if(imgGRAY.cols*imgGRAY.rows < 400*400) houghThreshold = 100; cv::HoughLinesP(imgCanny, lines, 1, CV_PI/180, houghThreshold, 10,10); while(lines.size() > MAX_NUM_LINES) { lines.clear(); houghThreshold += 10; cv::HoughLinesP(imgCanny, lines, 1, CV_PI/180, houghThreshold, 10, 10); } for(size_t i=0; i<lines.size(); i++) { Point pt1, pt2; pt1.x = lines[i][0]; pt1.y = lines[i][1]; pt2.x = lines[i][2]; pt2.y = lines[i][3]; line(outputImg, pt1, pt2, CV_RGB(0,0,0), 2); /*circle(outputImg, pt1, 2, CV_RGB(255,255,255), CV_FILLED); circle(outputImg, pt1, 3, CV_RGB(0,0,0),1); circle(outputImg, pt2, 2, CV_RGB(255,255,255), CV_FILLED); circle(outputImg, pt2, 3, CV_RGB(0,0,0),1);*/ // Store into vector of pairs of Points for msac aux.clear(); aux.push_back(pt1); aux.push_back(pt2); lineSegments.push_back(aux); } #endif // Multiple vanishing points std::vector<cv::Mat> vps; // vector of vps: vps[vpNum], with vpNum=0...numDetectedVps std::vector<std::vector<int> > CS; // index of Consensus Set for all vps: CS[vpNum] is a vector containing indexes of lineSegments belonging to Consensus Set of vp numVp std::vector<int> numInliers; std::vector<std::vector<std::vector<cv::Point> > > lineSegmentsClusters; // Call msac function for multiple vanishing point estimation msac.multipleVPEstimation(lineSegments, lineSegmentsClusters, numInliers, vps, numVps); for(int v=0; v<vps.size(); v++) { printf("VP %d (%.3f, %.3f, %.3f)", v, vps[v].at<float>(0,0), vps[v].at<float>(1,0), vps[v].at<float>(2,0)); fflush(stdout); double vpNorm = cv::norm(vps[v]); if(fabs(vpNorm - 1) < 0.001) { printf("(INFINITE)"); fflush(stdout); } printf("\n"); } // Draw line segments according to their cluster msac.drawCS(outputImg, lineSegmentsClusters, vps); }
vector<vector<vector<Point> > > Nieto::vp(const Mat1b &inBinaryFrameFiltrado, Mat3b &inVanishingPointImage, const Rect &roi, const int maxNumLines, int houghThresholdInicial, int houghStep, double houghMinLineLength, double houghMaxLineGap) { double tempoInicio = static_cast<double>(getTickCount()); // pega somente a regi�o de interesse Mat1b mascaraRoi = Mat1b(inBinaryFrameFiltrado.size(), uchar(0)); mascaraRoi(Rect(roi.x, roi.y, roi.width, roi.height)).setTo(255); Mat1b binaryFrameFiltradoMasked; cv::bitwise_and(inBinaryFrameFiltrado, mascaraRoi, binaryFrameFiltradoMasked); if (display) cvtColor(inBinaryFrameFiltrado, inVanishingPointImage, CV_GRAY2BGR); // Hough vector<vector<Point> > lineSegments; vector<Point> aux; vector<Vec4i> lines; int houghThreshold = houghThresholdInicial; cv::HoughLinesP(binaryFrameFiltradoMasked, lines, 1, CV_PI / 180, houghThreshold, houghMinLineLength, houghMaxLineGap); while (lines.size() > maxNumLines) { lines.clear(); houghThreshold += houghStep; cv::HoughLinesP(binaryFrameFiltradoMasked, lines, 1, CV_PI / 180, houghThreshold, houghMinLineLength, houghMaxLineGap); } for (size_t i = 0; i<lines.size(); i++) { Point pt1, pt2; pt1.x = lines[i][0]; pt1.y = lines[i][1]; pt2.x = lines[i][2]; pt2.y = lines[i][3]; // modificado int dx = abs(pt2.x - pt1.x); int dy = abs(pt2.y - pt1.y); if (1.0*dx / 3.0 > dy) { if (display) line(inVanishingPointImage, pt1, pt2, CV_RGB(0, 0, 255), 1); // continue; } else{ if (display) line(inVanishingPointImage, pt1, pt2, CV_RGB(0, 255, 0), 1); } // Store into vector of pairs of Points for msac aux.clear(); aux.push_back(pt1); aux.push_back(pt2); lineSegments.push_back(aux); } // Multiple vanishing points vector<Mat> vps; // vector of vps: vps[vpNum], with vpNum=0...numDetectedVps vector<vector<int> > CS; // index of Consensus Set for all vps: CS[vpNum] is a vector containing indexes of lineSegments belonging to Consensus Set of vp numVp vector<int> numInliers; vector<vector<vector<Point> > > lineSegmentsClusters; MSAC msac; bool msac_verbose = false; int numVps = 1; msac.init(MODE_NIETO, inBinaryFrameFiltrado.size(), msac_verbose); msac.multipleVPEstimation(lineSegments, lineSegmentsClusters, numInliers, vps, numVps); // Call msac function for multiple vanishing point estimation msac.drawCS(inVanishingPointImage, lineSegmentsClusters, vps); // Draw line segments according to their cluster // sa�das this->houghLines = lineSegmentsClusters; this->vanishingPoint = vps; // calcula o tempo de execu��o double tempoFim = static_cast<double>(getTickCount()); double tempoExecutando = ((tempoFim - tempoInicio) / getTickFrequency()) * 1000; // exibe as sa�das definidas (texto e/ou imagem) if (verbose) cout << "- nieto.vp: " << tempoExecutando << " ms" << endl; if (display) imshow("Vanishing Point", inVanishingPointImage); return { lineSegments }; }