Exemple #1
0
// Main execute funcion---------------------------------------------------------------------------------
vector<FP> QrDetectorMod::find() {
	Mat gray = Mat(image.rows, image.cols, CV_8UC1);
	Mat edges(image.size(), CV_MAKETYPE(image.depth(), 1));
	cvtColor(image, gray, CV_BGR2GRAY);
	Canny(gray, edges, 100, 200, 3);
	//imshow("edges", edges);
	vector<vector<Point>> contours;
	vector<Point> approx;

	//findContours(edges, contours, RETR_LIST, CHAIN_APPROX_NONE); 
	uchar** arr = matToArr(edges); /* trik use pointers for images*/ findContours_(arr, &contours);
	/*for (int i = 0; i < contours.size() - 1; i++){
		vector<Point> fst = contours[i];
		for (int j = i + 1; j < contours.size() - 1; j++){
			vector<Point> scd = contours[j];
			double endbeg = dist(fst[fst.size() - 1], scd[0]);
			double endend = dist(fst[fst.size() - 1], scd[scd.size() - 1]);
			double begbeg = dist(fst[0], scd[0]);
			double begend = dist(fst[0], scd[scd.size() - 1]);
			
			if (endbeg < 2){
				fst.insert(fst.end(), scd.begin(), scd.end());
				contours[i] = fst;
				contours.erase(contours.begin() + j);
			}
			if (begbeg < 2){
				reverse(fst.begin(), fst.end());
				fst.insert(fst.end(), scd.begin(), scd.end());
				contours[i] = fst;
				contours.erase(contours.begin() + j);
			}
			else
			if (endend < 2){
				fst.insert(fst.end(), scd.begin(), scd.end());
				contours[i] = fst;
				contours.erase(contours.begin() + j);
			}
			else
			if (begend < 2){
				scd.insert(scd.end(), fst.begin(), fst.end());
				contours[j] = scd;
				contours.erase(contours.begin() + i);
			}

		}
	}*/
	/*RNG rng(12345);
	for each (vector<Point> c in contours){
		Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		for each(Point p in c) circle(image, p, 1, color, -1);
	}*/

	for (int i = 0; i < contours.size(); i++)
	{
		approx = approximate(contours[i]);
		//for each (Point p in approx) circle(image, Point(p.x, p.y), 1, Scalar(0, 0, 255), -1); // for degug
		if (approx.size() == 4){
			//drawContours(image, contours, i, Scalar(255, 0, 0), CV_FILLED); //for debug
			if (isQuad(&approx) && abs(area(approx)) > 10){
				if (!inOtherContour(&approx)){
					quadList.push_back(vector<Point>(approx));
				}
			}
		}
	}

	if (quadList.size() < 2){
		return vector<FP>();
	}
	vector<FP> fps;
	for each(vector<Point> quad in quadList){

		Point min = minCoord(quad);
		Point max = maxCoord(quad);
		int x = min.x - 0.7*(max.x - min.x),
			y = min.y - 0.7*(max.y - min.y);
		if (x < 0) x = 0; if (y < 0) y = 0;
			
		int	w = 2.8 * (max.x - min.x),
			h = 2.8 * (max.y - min.y);
		if (h > 0.7*image.rows || w > 0.7*image.cols) continue;
		if (x + w > gray.cols) w = gray.cols - x - 1;
		if (h + y > gray.rows) h = gray.rows - y - 1;

		Mat partImg = gray(Rect(x, y, w, h));
		threshold(partImg, partImg, 128, 255, THRESH_OTSU); 
		int dif = quad[4].y - y;
		if (dif >= h || dif <= 0) continue;
		if (singleHorizontalCheck(partImg, dif)) {
			fps.push_back(FP(quad[4].x, quad[4].y, module));
			}
		else {
			if (fullHorizontalCheck(partImg)) {
				fps.push_back(FP(quad[4].x, quad[4].y, module));				}
		}
			//imshow("Parts", partImg);//for debug
			//waitKey(1200);//for debug
		}
Exemple #2
0
int main( int argc, char** argv ) {
	/// Load an image
	cv::Mat src, greyIm, histeqIm;

	src = cv::imread( argv[1] );

	if( !src.data ) {
		printf("Input file? No? ouuuupsss thooooorryyyyy\n");
		return -1;
	}


	cv::Size s = src.size();
	int rows = s.height;
	int cols = s.width;
	// Setup a rectangle to define your region of interest
	cv::Rect myROI(0, rows/2, cols, rows/2);

	// Crop the full image to that image contained by the rectangle myROI
	// Note that this doesn't copy the data
	cv::Mat croppedImage = src(myROI);

	cv::imwrite("output/1_low_half.jpg", croppedImage);

	cv::cvtColor(croppedImage, greyIm, cv::COLOR_BGR2GRAY);

    cv::Size crop_size = croppedImage.size();
    int crop_rows = crop_size.height;
    int crop_cols = crop_size.width;


	cv::imwrite("output/2_grey_scale.jpg", greyIm);

	cv::equalizeHist( greyIm, histeqIm );

	cv::imwrite("output/3_hist_eq.jpg", histeqIm);	




	std::vector<std::vector<cv::Point> > contours;
    std::vector<cv::Vec4i> hierarchy;

    // Reduce noise with kernel 3x3
    cv::Mat blurIm;
    blur(histeqIm, blurIm, cv::Size(3,3));

	cv::imwrite("output/4_blur.jpg", blurIm);

    // Canny detector
    cv::Mat edgesIm;
    Canny(blurIm, edgesIm, thresh, thresh*ratio, kernel_size);

    cv::imwrite("output/5_edge.jpg", edgesIm);
    
    // Find contours
    cv::findContours(edgesIm, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE, cv::Point(0,0));

    // Approximate contours to polygons + get bounding rects and circles
    std::vector<std::vector<cv::Point> > contours_poly(contours.size());
    std::vector<cv::Rect> boundRect(contours.size());
    std::vector<cv::Point2f>center(contours.size());
    std::vector<float>radius(contours.size());

    for (int i = 0; i < contours.size(); i++) {
        cv::approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 3, true);
        boundRect[i] = cv::boundingRect(cv::Mat(contours_poly[i]));
        cv::minEnclosingCircle((cv::Mat)contours_poly[i], center[i], radius[i]);
    }

    // Draw contours
    int j=0;
    cv::Mat drawing = cv::Mat::zeros(edgesIm.size(), CV_8UC3);
    cv::Mat piece[5], hsvIm[5];
    for (int i = 0; i < contours.size(); i++) {
        if (!((boundRect[i].height >= boundRect[i].width/5) && (boundRect[i].height <= boundRect[i].width/2) 
            && boundRect[i].height<=crop_rows/4 && boundRect[i].width<=crop_cols/2   
            && boundRect[i].height>=crop_rows/10 && boundRect[i].width>=crop_cols/6)) 
        continue;

        cv::Rect roi = boundRect[i];
        piece[j] = croppedImage(roi);
        imwrite("output/contour"+std::to_string(j)+".jpg", piece[j]);
        j++;

        cv::Scalar color = cv::Scalar(rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255));
        cv::drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, cv::Point());
        cv::rectangle(drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0);
        //circle(drawing, center[i], (int)radius[i], color, 2, 8, 0);  
    }

    imwrite("output/6_contours.jpg", drawing);



    int h_bins = 50; int s_bins = 60;
    int histSize[] = { h_bins, s_bins };

    float h_ranges[] = { 0, 180 };
    float s_ranges[] = { 0, 256 };

    const float* ranges[] = { h_ranges, s_ranges };

    int channels[] = { 0, 1 };

    cv::Mat hist[5];

    for (int i=0; i<j; i++){
        cvtColor(piece[i], hsvIm[i], cv::COLOR_BGR2HSV);
        imwrite("output/hsvIm"+std::to_string(i)+".jpg", hsvIm[i]);

        calcHist( &hsvIm[i], 1, channels, cv::Mat(), hist[i], 2, histSize, ranges, true, false );
        //normalize( hsvIm[i], hsvIm[i], 0, 1, cv::NORM_MINMAX, -1, cv::Mat() );
    }












	return 0;
}
/*
 * General videos processing. @TODO Make use of previous frames?
 */
Mat RoadDetection::processVideo(Mat rawFrame)
{
	Mat originalFrame, tmpFrame, grayFrame, blurredFrame, contoursFrame, houghFrame, sectionFrame, drawingFrame;
	Point vanishingPoint;

	vector<Line> houghLines, houghMainLines;
	vector<Point> roadShape;
	vector<Rect> vehicles;

	int sectionOffset;

	// convert to grayscale
	cvtColor(rawFrame, grayFrame, CV_BGR2GRAY);
	equalizeHist(grayFrame, grayFrame);

	// smooth and remove noise
	GaussianBlur(grayFrame, blurredFrame, Size(BLUR_KERNEL, BLUR_KERNEL), 0);

	// edge detection (canny, inverted)
	Canny(blurredFrame, contoursFrame, CANNY_MIN_THRESH, CANNY_MAX_THRESH);
	threshold(contoursFrame, contoursFrame, 128, 255, THRESH_BINARY);

	// hough transform
	houghLines = getHoughLines(contoursFrame, true);

	// vanishing point
	vanishingPoint = getVanishingPoint(houghLines, rawFrame.size());
	sectionOffset = vanishingPoint.y;

	// if we can't find a point, use the one from a previous frame
	if (vanishingPoint.x == 0 && vanishingPoint.y == 0)
	{
		vanishingPoint = previousVanishingPoint;
	}
	// if we can, save it for future use
	else
	{
		previousVanishingPoint = vanishingPoint;
	}

	// section frame (below vanishing point)
	sectionFrame = contoursFrame(CvRect(0, sectionOffset, contoursFrame.cols, contoursFrame.rows - sectionOffset));

	// re-apply hough transform to section frame
	houghLines = getHoughLines(sectionFrame, true);

	// shift lines downwards
	houghLines = shiftLines(houghLines, sectionOffset);

	// best line matches
	houghMainLines = getMainLines(houghLines);

	// update vanishing point according to the new section
	if (houghMainLines.size() >= 2)
	{
		previousLines = houghMainLines;
	}
	else
	{
		houghMainLines = previousLines;
	}

	if (houghMainLines.size() >= 2)
	{
		Point intersection = getLineIntersection(houghMainLines[0], houghMainLines[1]);

		if (intersection.x > 0 && intersection.x < rawFrame.cols && intersection.y > 0 && intersection.y < rawFrame.rows)
		{
			vanishingPoint = intersection;
			sectionOffset = intersection.y;
		}

		// get road shape
		roadShape = getRoadShape(rawFrame, houghMainLines[0], houghMainLines[1], vanishingPoint);
	}

	// limit lines
	houghLines = getLimitedLines(houghLines, sectionOffset);
	houghMainLines = getLimitedLines(houghMainLines, sectionOffset);

	// drawing frame and vehicles
	drawingFrame = rawFrame(CvRect(0, sectionOffset, contoursFrame.cols, contoursFrame.rows - sectionOffset));
	// vehicles = getVehicles(drawingFrame);

	// drawing process
	drawLines(rawFrame, houghLines, Scalar(0, 0, 255), 2, 0);
	drawLines(rawFrame, houghMainLines, Scalar(20, 125, 255), 2, 0);
	drawRoadShape(rawFrame, roadShape, Scalar(20, 125, 255), 0.3);
	drawCircle(rawFrame, vanishingPoint, Scalar(20, 125, 255), 15, -1, 0);
	// drawRects(rawFrame, vehicles, Scalar(255, 0, 0), 1, sectionOffset);

	return rawFrame;
}
RawImage* OpenCvCannyEdgeDetection::ProcessInput(CommandLineArgModel* arg, RawImage* image)
{
	OpenCvCannyEdgeDetectionModel* model = (OpenCvCannyEdgeDetectionModel*)arg->ParsedModel;

	Rectangle roi = model->Roi;
	Mat img = image->CloneToMat(roi);
	vector<Mat> channels;
	
	blur(img, img, Size(model->Window * 3, model->Window * 3));

	switch (model->ColorSpace)
	{
		case GRAY:
			cvtColor(img, img, CV_BGR2GRAY);

			Canny(img, img, 0, model->Threshold, model->Window);

			cvtColor(img, img, CV_GRAY2RGB);
			break;
		case RGB:
			split(img, channels);

			if (model->ColorChannel1)
			{
				Canny(channels[0], channels[0], 0, model->Threshold, model->Window);
			}
			else
			{
				channels[0] = Scalar(0);
			}
			if (model->ColorChannel2)
			{
				Canny(channels[1], channels[1], 0, model->Threshold, model->Window);
			}
			else
			{
				channels[1] = Scalar(0);
			}
			if (model->ColorChannel3)
			{
				Canny(channels[2], channels[2], 0, model->Threshold, model->Window);
			}
			else
			{
				channels[2] = Scalar(0);
			}

			merge(channels, img);
			break;
		case HSI:
			throw runtime_error("HSI is not support with OpenCvCannyEdgeDetection.");
		case YCRCB:
			cvtColor(img, img, CV_BGR2YCrCb);
			
			split(img, channels);

			if (model->ColorChannel1)
			{
				Canny(channels[0], channels[0], 0, model->Threshold, model->Window);
			}
			if (model->ColorChannel2)
			{
				Canny(channels[1], channels[1], 0, model->Threshold, model->Window);
			}
			if (model->ColorChannel3)
			{
				Canny(channels[2], channels[2], 0, model->Threshold, model->Window);
			}

			merge(channels, img);

			cvtColor(img, img, CV_YCrCb2BGR);
			break;
	}

	image->Import(img, roi.X, roi.Y);

	return image;
}
Exemple #5
0
Mat skinDetector::cannySegmentation(Mat img0, int minPixelSize)
{
	// Segments items in gray image (img0)
	// minPixelSize=
	// -1, returns largest region only
	// pixels, threshold for removing smaller regions, with less than minPixelSize pixels
	// 0, returns all detected segments
	
    // LB: Zero pad image to remove edge effects when getting regions....	
    int padPixels=20;
    // Rect border added at start...
    Rect tempRect;
    tempRect.x=padPixels;
    tempRect.y=padPixels;
    tempRect.width=img0.cols;
    tempRect.height=img0.rows;
    Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
    img0.copyTo(img1(tempRect));
    
	// apply your filter
    Canny(img1, img1, 100, 200, 3); //100, 200, 3);

    // find the contours
    vector< vector<Point> > contours;
    findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Mask for segmented regiond
    Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);

    vector<double> areas(contours.size());

	if (minPixelSize==-1)
	{ // Case of taking largest region
		for(int i = 0; i < contours.size(); i++)
			areas[i] = contourArea(Mat(contours[i]));
		double max;
		Point maxPosition;
		minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
		drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
	}
	else
	{ // Case for using minimum pixel size
		for (int i = 0; i < contours.size(); i++)
		{
			if (contourArea(Mat(contours[i]))>minPixelSize)
			drawContours(mask, contours, i, Scalar(1), CV_FILLED);

		}
	}
    // normalize so imwrite(...)/imshow(...) shows the mask correctly!
    normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);


    Mat returnMask;
    returnMask=mask(tempRect);
    
    
    // show the images
    if (verboseOutput)	imshow("Canny Skin: Img in", img0);
    if (verboseOutput)	imshow("Canny Skin: Mask", returnMask);
    if (verboseOutput)	imshow("Canny Skin: Output", img1);
    

    return returnMask;
}
Mat CColor::setColorPosition()
{
    Scalar colorContours;
    drawingContours = Mat::zeros(imgCanny.size(), CV_8UC3);

    ///setColorPosition2Piece(CColorsType::RED);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::RED);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(!fig.isSquare(approx)) {
                poscenter.ChooseSaveColorCenter( CColorsType::RED, contours[i]);

                fig.setContourRectangle(imgSharp, contours[i]);

                colorContours = CV_RGB(255, 0, 0);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
                drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    ///setColorPosition2Piece(CColorsType::GREEN);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::GREEN);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(!fig.isSquare(approx)) {
                poscenter.ChooseSaveColorCenter( CColorsType::GREEN, contours[i]);

                fig.setContourRectangle(imgSharp, contours[i]);

                colorContours = CV_RGB(0, 255, 0);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
                drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    ///setColorPosition2Piece(CColorsType::BLUE);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::BLUE);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(!fig.isSquare(approx)) {
                poscenter.ChooseSaveColorCenter( CColorsType::BLUE, contours[i]);

                fig.setContourRectangle(imgSharp, contours[i]);

                colorContours = CV_RGB(0, 0, 255);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
                drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    ///setColorPosition2Piece(CColorsType::YELLOW);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::YELLOW);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(!fig.isSquare(approx)) {
                poscenter.ChooseSaveColorCenter( CColorsType::YELLOW, contours[i]);

                fig.setContourRectangle(imgSharp, contours[i]);

                colorContours = CV_RGB(255, 255, 0);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
                drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    ///setColorPosition2Piece(CColorsType::BLACK);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::BLACK);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(!fig.isSquare(approx)) {
                poscenter.ChooseSaveColorCenter( CColorsType::BLACK, contours[i]);

                fig.setContourRectangle(imgSharp, contours[i]);

                colorContours = CV_RGB(100, 100, 100);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
                drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }
//    imshow("drawingContours", drawingContours);
    return drawingContours;
}
QVector<CColorsType> CColor::getCorAsked_CorPlaced(CColorsType colorAsked)
{
    Scalar colorContours;

    imgFHSV = Frame2HSV(imgSharp, CColorsType::YELLOW);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++)
    {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if( ! fig.isSquare(approx)) {///                     YELLOW                 ///As peças jogadas são retangulos
                if(colorAsked == CColorsType::YELLOW)  ///Right Answer
                {
                    fig.setContourRectangle(imgSharp, contours[i], CV_RGB(0, 255, 0)); //feedback on screen
                    colorContours = CV_RGB(255, 255, 0);
                    drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());

                    setMessage2Robot(true, colorAsked, CColorsType::YELLOW);
                    return QVector<CColorsType>() << colorAsked << CColorsType::YELLOW;
                }
                else
                {
                    fig.setContourRectangle(imgSharp, contours[i], Scalar(0, 0, 255)); //feedback on screen

                    setMessage2Robot(false, colorAsked, CColorsType::YELLOW);
                    return QVector<CColorsType>() << colorAsked << CColorsType::YELLOW;
                }
            }
        }
    }

    imgFHSV = Frame2HSV(imgSharp, CColorsType::RED);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++)
    {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4) //este if não deve ser necessario
        {
            if( ! fig.isSquare(approx)){///                     RED                 ///As peças jogadas são retangulos
                if(colorAsked == CColorsType::RED)
                {
                    fig.setContourRectangle(imgSharp, contours[i], CV_RGB(0, 255, 0)); //feedback on screen
                    colorContours = CV_RGB(255, 0, 0);
                    drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());

                    setMessage2Robot(true, colorAsked, CColorsType::RED);
                    return QVector<CColorsType>() << colorAsked << CColorsType::RED;
                }
                else
                {
                    fig.setContourRectangle(imgSharp, contours[i], Scalar(0, 0, 255)); //feedback on screen

                    setMessage2Robot(false, colorAsked, CColorsType::RED);
                    return QVector<CColorsType>() << colorAsked << CColorsType::RED;
                }
            }
        }
    }

    imgFHSV = Frame2HSV(imgSharp, CColorsType::GREEN);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++)
    {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4) //este if não deve ser necessario
        {
            if( ! fig.isSquare(approx)){///                     GREEN                 ///As peças jogadas são retangulos
                if(colorAsked == CColorsType::GREEN)
                {
                    fig.setContourRectangle(imgSharp, contours[i], CV_RGB(0, 255, 0)); //feedback on screen
                    colorContours = CV_RGB(0, 255, 0);
                    drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());

                    setMessage2Robot(true, colorAsked, CColorsType::GREEN);
                    return QVector<CColorsType>() << colorAsked << CColorsType::GREEN;
                }
                else
                {
                    fig.setContourRectangle(imgSharp, contours[i], Scalar(0, 0, 255)); //feedback on screen

                    setMessage2Robot(false, colorAsked, CColorsType::GREEN);
                    return QVector<CColorsType>() << colorAsked << CColorsType::GREEN;
                }
            }
        }
    }

    imgFHSV = Frame2HSV(imgSharp, CColorsType::BLUE);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4) //este if não deve ser necessario
        {
            if( ! fig.isSquare(approx)){///                     BLUE                 ///As peças jogadas são retangulos
                if(colorAsked == CColorsType::BLUE)
                {
                    fig.setContourRectangle(imgSharp, contours[i], CV_RGB(0, 255, 0)); //feedback on screen
                    colorContours = CV_RGB(0, 0, 255);
                    drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());


                    setMessage2Robot(true, colorAsked, CColorsType::BLUE);
                    return QVector<CColorsType>() << colorAsked << CColorsType::BLUE;
                }
                else
                {
                    fig.setContourRectangle(imgSharp, contours[i], Scalar(0, 0, 255)); //feedback on screen

                    setMessage2Robot(false, colorAsked, CColorsType::BLUE);
                    return QVector<CColorsType>() << colorAsked << CColorsType::BLUE;
                }
            }
        }
    }

    imgFHSV = Frame2HSV(imgSharp, CColorsType::BLACK);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4) //este if não deve ser necessario
        {
            if( ! fig.isSquare(approx)){///                     BLACK                 ///As peças jogadas são retangulos
                if(colorAsked == CColorsType::BLACK)
                {
                    fig.setContourRectangle(imgSharp, contours[i], CV_RGB(0, 255, 0)); //feedback on screen
                    colorContours = CV_RGB(0, 0, 0);
                    drawContours(imgSharp, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());


                    setMessage2Robot(true, colorAsked, CColorsType::BLACK);
                    return QVector<CColorsType>() << colorAsked << CColorsType::BLACK;
                }
                else
                {
                    fig.setContourRectangle(imgSharp, contours[i], Scalar(0, 0, 255)); //feedback on screen

                    setMessage2Robot(false, colorAsked, CColorsType::BLACK);
                    return QVector<CColorsType>() << colorAsked << CColorsType::BLACK;
                }
            }
        }
    }

    return QVector<CColorsType>() << CColorsType::NONE  << CColorsType::NONE;  ///REVER
}
Exemple #8
0
void DrImage::ConvMatrix(){
  double Average = 0.;
  double Count = 0.;
  Matrice ImIn(NWidth,NHeight);
  for(int h=0;h<NHeight;h++){
    for(int w=0;w<NWidth;w++){
      double Sum = 1.- .3333*(data[0][h*NWidth+w]+data[1][h*NWidth+w]+data[2][h*NWidth+w]);
      //double Sum = 1. - (data[0][h*NWidth+w]+data[1][h*NWidth+w]+data[2][h*NWidth+w]);
      ImIn.Set(w,h,Sum);
      Average += Sum;
      Count += 1.;
    }
  }
  Average /= Count;
  //---------Canny---------------------
  Matematica *Mat = new Matematica;
  Matrice Canny(5,5);
  Canny.FillCanny();
  Canny.Print();
  // Mat->ApplyFilter(&ImIn,&Canny);
  // Mat->ApplyFilter(&ImIn,&Canny);
  //-----------Edge-------------------
  SPLINE Weight;
  Weight.a0 = 0.;
  Weight.a1 = 1.; Weight.a2 = 0.;
  Weight.a3 = 0.; Weight.a4 = 0.;
  Matrice *Mask = new Matrice(Weight,3);
  Mask->Print();
  //Mat->ApplyFilter(&ImIn,Mask);
  // Mask->Transpose();
  // Mat->ApplyFilter(ImIn,Mask);
  //-------------Smooth----------------
  const int NMatSize = 5;
  Matrice GaussEdge(NMatSize,NMatSize);
  GaussEdge.FillGaussian(.5,1.);
  //double LatPos[5] = {.125,-.25,0.,.25,-.125};
  // double LatPos[3] = {-1.,0.,1.};
  // for(int w=0;w<NMatSize;w++){
  //   for(int h=0;h<NMatSize;h++){
  //     GaussEdge.Set(w,h,GaussEdge.Val(w,h)*LatPos[w]);
  //   }
  // }
  GaussEdge.Print();
  //Mat->ApplyFilter(ImIn,&GaussEdge);
  Matrice GaussSmooth(5,5);
  GaussSmooth.FillGaussian(.5,3.);
  // Mat->ApplyFilter(ImIn,&GaussSmooth);
  //------------PixDev------------------
  for(int h=0;h<NHeight;h++){
    for(int w=0;w<NWidth;w++){
      ImIn.Set(w,h,Average-ImIn.Val(w,h));
    }
  }
  int PixelDev = 5;
  double ValStep[3] = {0.,0.,0.};
  int ValNStep[3] = {0,0,0};
  for(int h=0;h<NHeight;h++){
    for(int w=0;w<NWidth;w++){
      ValNStep[0] = w - PixelDev;
      if(ValNStep[0] < 0 ) continue;
      if(ValNStep[0] >= NWidth) continue;
      ValNStep[1] = w;
      ValNStep[2] = w + PixelDev;
      if(ValNStep[2] < 0 ) continue;
      if(ValNStep[2] >= NWidth) continue;
      for(int d=0;d<3;d++){
	ValStep[d] = ImIn.Val(ValNStep[d],h);
 	if(d == 1){
	  ValStep[d] = ValStep[d] > 0. ? ValStep[d] : 0.;
	  continue;
	}
	ValStep[d] = ValStep[d] < 0. ? -ValStep[d] : 0.;
      }
      double Resp = ValStep[0]*ValStep[1]*ValStep[2];
      //double Resp = ValStep[1];
      //ImIn.Set(w,h,Resp);
    } 
  }
  char cImage[160];
  sprintf(cImage,"Canny.png");
  pngwriter ImageOut(NWidth,NHeight,1.0,cImage);
  FILE *Ciccia = fopen("Pos3d.dat","w");
  double NormH = 1./(double)NHeight;
  double NormW = 1./(double)NWidth;
  for(int h=0;h<NHeight;h++){
    for(int w=0;w<NWidth;w++){
      fprintf(Ciccia,"%lf %lf %lf\n",w*NormH,h*NormH,ImIn.Val(w,h));
      ImageOut.plot(w,h,ImIn.Val(w,h),ImIn.Val(w,h),ImIn.Val(w,h));
    }
  }
  fclose(Ciccia);
  ImageOut.close();
}
void ProcessingThread::run()
{
    while(1)
    {
        /////////////////////////////////
        // Stop thread if stopped=TRUE //
        /////////////////////////////////
        stoppedMutex.lock();
        if (stopped)
        {
            stopped=false;
            stoppedMutex.unlock();
            break;
        }
        stoppedMutex.unlock();
        /////////////////////////////////
        /////////////////////////////////

        // Save processing time
        processingTime=t.elapsed();
        // Start timer (used to calculate processing rate)
        t.start();

        // Get frame from queue, store in currentFrame, set ROI
        currentFrame=Mat(imageBuffer->getFrame(),currentROI);

        updateMembersMutex.lock();
        ///////////////////
        // PERFORM TASKS //
        ///////////////////
        // Note: ROI changes will take effect on next frame
        if(resetROIFlag)
            resetROI();
        else if(setROIFlag)
            setROI();
        ////////////////////////////////////
        // PERFORM IMAGE PROCESSING BELOW //
        ////////////////////////////////////
        // Grayscale conversion
        if(grayscaleOn)
            cvtColor(currentFrame,currentFrameGrayscale,CV_BGR2GRAY);
        // Smooth (in-place operations)
        if(smoothOn)
        {
            if(grayscaleOn)
            {
                switch(smoothType)
                {
                    // BLUR
                    case 0:
                        blur(currentFrameGrayscale,currentFrameGrayscale,Size(smoothParam1,smoothParam2));
                        break;
                    // GAUSSIAN
                    case 1:
                        GaussianBlur(currentFrameGrayscale,currentFrameGrayscale,Size(smoothParam1,smoothParam2),smoothParam3,smoothParam4);
                        break;
                    // MEDIAN
                    case 2:
                        medianBlur(currentFrameGrayscale,currentFrameGrayscale,smoothParam1);
                        break;
                }
            }
            else
            {
                switch(smoothType)
                {
                    // BLUR
                    case 0:
                        blur(currentFrame,currentFrame,Size(smoothParam1,smoothParam2));
                        break;
                    // GAUSSIAN
                    case 1:
                        GaussianBlur(currentFrame,currentFrame,Size(smoothParam1,smoothParam2),smoothParam3,smoothParam4);
                        break;
                    // MEDIAN
                    case 2:
                        medianBlur(currentFrame,currentFrame,smoothParam1);
                        break;
                }
            }
        }
        // Dilate
        if(dilateOn)
        {
            if(grayscaleOn)
                dilate(currentFrameGrayscale,currentFrameGrayscale,Mat(),Point(-1,-1),dilateNumberOfIterations);
            else
                dilate(currentFrame,currentFrame,Mat(),Point(-1,-1),dilateNumberOfIterations);
        }
        // Erode
        if(erodeOn)
        {
            if(grayscaleOn)
                erode(currentFrameGrayscale,currentFrameGrayscale,Mat(),Point(-1,-1),erodeNumberOfIterations);
            else
                erode(currentFrame,currentFrame,Mat(),Point(-1,-1),erodeNumberOfIterations);
        }
        // Flip
        if(flipOn)
        {
            if(grayscaleOn)
                flip(currentFrameGrayscale,currentFrameGrayscale,flipCode);
            else
                flip(currentFrame,currentFrame,flipCode);
        }
        // Canny edge detection
        if(cannyOn)
        {
            // Frame must be converted to grayscale first if grayscale conversion is OFF
            if(!grayscaleOn)
                cvtColor(currentFrame,currentFrameGrayscale,CV_BGR2GRAY);

            Canny(currentFrameGrayscale,currentFrameGrayscale,
                  cannyThreshold1,cannyThreshold2,
                  cannyApertureSize,cannyL2gradient);
        }
        ////////////////////////////////////
        // PERFORM IMAGE PROCESSING ABOVE //
        ////////////////////////////////////

        // Convert Mat to QImage: Show grayscale frame [if either Grayscale or Canny processing modes are ON]
        if(grayscaleOn||cannyOn)
            frame=MatToQImage(currentFrameGrayscale);
        // Convert Mat to QImage: Show BGR frame
        else
            frame=MatToQImage(currentFrame);
        updateMembersMutex.unlock();

        // Update statistics
        updateFPS(processingTime);
        currentSizeOfBuffer=imageBuffer->getSizeOfImageBuffer();
        // Inform GUI thread of new frame (QImage)
        emit newFrame(frame);
    }
    qDebug() << "Stopping processing thread...";
}
void SurfaceComputer::init(Mat& image0)
{
	Mat image_bright = image0;
	// Mat image_bright = Mat::zeros(image0.size(), CV_8UC3);

	// for (int i = 0; i < WIDTH_LARGE; ++i)
	// 	for (int j = 0; j < HEIGHT_LARGE; ++j)
	// 	{
	// 		int b = image0.ptr<uchar>(j, i)[0] + 100;
	// 		int g = image0.ptr<uchar>(j, i)[1] + 100;
	// 		int r = image0.ptr<uchar>(j, i)[2] + 100;

	// 		if (b > 255)
	// 			b = 255;
	// 		if (g > 255)
	// 			g = 255;
	// 		if (r > 255)
	// 			r = 255;

	// 		image_bright.ptr<uchar>(j, i)[0] = b;
	// 		image_bright.ptr<uchar>(j, i)[1] = g;
	// 		image_bright.ptr<uchar>(j, i)[2] = r;
	// 	}

    Mat image_canny;
    Canny(image_bright, image_canny, 20, 60, 3);

    y_reflection = HEIGHT_LARGE;

    int intensity_array[HEIGHT_LARGE];
    for (int j = 0; j < HEIGHT_LARGE; ++j)
    {
        int intensity = 0;
        for (int i = 0; i < WIDTH_LARGE; ++i)
            if (image_canny.ptr<uchar>(j, i)[0] > 0)
                ++intensity;
        
        intensity_array[j] = intensity;
    }

    Mat image_histogram = Mat::zeros(HEIGHT_LARGE, WIDTH_LARGE, CV_8UC1);

    vector<Point3f> concave_pt_index_vec;
    vector<Point3f> convex_pt_index_vec;

    Point pt_old = Point(-1, -1);
    Point pt_old_old = Point(-1, -1);

    int index = 0;
    for (int j = 0; j < HEIGHT_LARGE; ++j)
    {
        int i = intensity_array[j];
        low_pass_filter.compute(i, 0.1, "i");

        Point pt = Point(i, j);

        if (pt_old.x != -1)
        {
            line(image_histogram, pt, pt_old, Scalar(254), 1);

            if (pt_old_old.x != -1)
            {
                if (pt_old.x < pt_old_old.x && pt_old.x < pt.x)
                    concave_pt_index_vec.push_back(Point3f(pt_old.x, pt_old.y, index - 1));

                if (pt_old.x > pt_old_old.x && pt_old.x > pt.x)
                    convex_pt_index_vec.push_back(Point3f(pt_old.x, pt_old.y, index - 1));
            }
            pt_old_old = pt_old;
        }
        pt_old = pt;

        ++index;
    }

    int x_diff_sum_max = -1;
    for (Point3f& concave_pt_index : concave_pt_index_vec)
    {
        int before_x_max = -1;
        int after_x_max = -1;
        for (Point3f& convex_pt_index : convex_pt_index_vec)
        {
            if (convex_pt_index.z < concave_pt_index.z)
            {
                if (convex_pt_index.x > before_x_max)
                    before_x_max = convex_pt_index.x;
            }
            else
            {
                if (convex_pt_index.x > after_x_max)
                    after_x_max = convex_pt_index.x;
            }
        }
        if (before_x_max != -1 && after_x_max != -1)
        {
            int x_diff_before = before_x_max - concave_pt_index.x;
            int x_diff_after = after_x_max - concave_pt_index.x;
            int x_diff_sum = (x_diff_before * x_diff_after) + (concave_pt_index.z * 100);

            if (x_diff_sum > x_diff_sum_max)
            {
                x_diff_sum_max = x_diff_sum;
                y_reflection = concave_pt_index.z;
            }
        }
    }
    y_reflection /= 4;

 //    vector<Vec4i> lines(10000);
	// HoughLinesP(image_canny, lines, 1, CV_PI / 180, 20, 50, 10);
 //    for (size_t i = 0; i < lines.size(); ++i)
 //    {
 //        Vec4i l = lines[i];
 //        Point pt0 = Point(l[0], l[1]);
 //        Point pt1 = Point(l[2], l[3]);

 //        if (abs(pt0.y - pt1.y) <= 5 && pt0.y < y_reflection)
 //        	line(image_canny, pt0, pt1, Scalar(127), 3);
 //    }
}
int main(int argc, char ** argv)
{

	string gauss = "Gaussino";
	string canny = "Canny";
	string hough = "Hough";
	string binarizar = "Binarizar";
	string Otsu = "Otsu";
	string image_name = "";
	int number;
	Point min, max, start;

	ofstream myfile;

	myfile.open("data.txt");

	myfile << "ESCREVE QUALQUER COISA\n";
	

	clock_t t1, t2, t3, t4;
	double threshold1, threshold2, thres, minLength, maxGap;
	bool f1, f2, f3, f4, f5, f6, f7, f8, f9;
	string Result;
	ostringstream convert;
	//int i;
	float temp;

	//for (i = 1;  i <= 6; i++){

		//number = i;
		//convert << number;
		//Result = convert.str();
		//image_name = "a" + Result + ".JPG";
		image_name = "a2.JPG";
		//number++;
		//cout << number << endl;
		cout << image_name;


		myfile << image_name;
		myfile << "\n";

		t1 = clock();
		f1 = false;
		f2 = true;
		f3 = false;
		f4 = false;
		f5 = false;
		f6 = true;
		f7 = true;
		if (f7 == true){
			threshold1 = 10;
			threshold2 = 19;
		}
		f8 = false;
		f9 = true;
		if (f9 == true){
			thres = 10;// 40
			minLength = 20; //50
			maxGap = 30; //80

			/*
			CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );

			if ( !capture ) {
			fprintf( stderr, "ERROR: capture is NULL \n" );
			getchar();
			return -1;
			}
			string original = "original.jpg";
			string foto ="img";

			IplImage* frame = cvQueryFrame( capture );
			Mat img(frame);
			Mat I, I1, imge;
			cvtColor(img,imge,CV_RGB2GRAY);
			imge.convertTo(I, CV_8U);
			equalizeHist(I,I1);
			Mat aux = I1;
			savePictures(I1, original, foto);

			*/

			//realiza a leitura e carrega a imagem para a matriz I1
			// a imagem tem apenas 1 canal de cor e por isso foi usado o parametro CV_LOAD_IMAGE_GRAYSCALE
			Mat lara = imread("lara.JPG", CV_LOAD_IMAGE_GRAYSCALE);
			Mat I = imread(image_name, CV_LOAD_IMAGE_GRAYSCALE);
			if (I.empty())
				return -1;
			resize(I, I, lara.size(), 1.0, 1.0, INTER_LINEAR);
			Mat I1;
			//Mat aux = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); 
			equalizeHist(I, I1);


			Mat aux, original;

			aux = I1;

			//ShowImage(I, I1);
			// verifica se carregou e alocou a imagem com sucesso
			if (I1.empty())
				return -1;

			// tipo Size contem largura e altura da imagem, recebe o retorno do metodo .size()
			//imSize = I1.size();

			// Cria uma matriz do tamanho imSize, de 8 bits e 1 canal

			Mat I2 = Mat::zeros(I1.size(), CV_8UC1);


			if (f2 == true) {
				t2 = clock();
				for (int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2)
					GaussianBlur(I1, I1, Size(i, i), 0, 0, BORDER_DEFAULT);
				//ShowImage(aux, I1);
				cout << "Guassiano tempo : ";
				temp = tempo(t2);
				savePictures(I1, image_name, gauss);
				myfile << "Gauss: ";
				myfile << temp;
				myfile << "\n";

			}

			if (f1 == true){
				t2 = clock();
				binarizacao(I1, 125);
				//ShowImage(aux, I1);
				cout << "binarizacao : ";
				temp = tempo(t2);
				savePictures(I1, image_name, binarizar);
				myfile << "Binarizacao: ";
				myfile << temp;
				myfile << "\n";


			}




			if (f3 == true){
				t2 = clock();
				inversao(I1);
				cout << "inversao : ";
				tempo(t2);

			}


			if (f4 == true){
				adaptiveThreshold(I1, I1, 255, ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 0);
			}


			if (f5 == true)
				Laplacian(I1, I1, 125, 1, 1, 0, BORDER_DEFAULT);



			if (f7 == true){
				t2 = clock();
				Canny(I1, I2, threshold1, threshold2, 3, false);
				cout << "canny : ";
				temp = tempo(t2);
				savePictures(I2, image_name, canny);
				myfile << "Canny: " + (int)(temp * 1000);
				myfile << "\n";
			}



			if (f9 == true){
				t2 = clock();
				Hough(I2, aux, thres, minLength, maxGap);
				cout << "hough : ";
				temp = tempo(t2);
				savePictures(aux, image_name, hough);
				myfile << "Hough: ";
				myfile << temp;
				myfile << "\n";
			}

			if (f6 == true){
				t2 = clock();
				threshold_type = THRESH_BINARY;

				threshold(aux, I1, 9, max_BINARY_value, threshold_type);
				cout << "Threshold : ";
				//savePictures(aux, image_name, Otsu);
				temp = tempo(t2);
				myfile << "Threshold/OTSU: ";
				myfile << temp;
				myfile << "\n";
			}


			string name = Otsu + image_name;
			imwrite(name, aux);
			ShowImage(I1, aux);

			t2 = clock();
			max = maxPoint(aux);
			min = minPoint(aux);

			/*start.y = (max.y + min.y) / 2;
			start.x = (max.x + min.x) /2;*/

			start.x = max.x;
			start.y = max.y;

			Point end;

			end.x = start.x;
			end.y = aux.size().height;

			
			MyLine(I, start, end, image_name, 0.3);
			temp = tempo(t2);
			ShowImage(I, aux);

			myfile << "Rota: ";
			myfile << temp;
			myfile << "\n";

			temp = tempo(t1);
			cout << "Final time : ";
			myfile << "Final Time: ";
			myfile << temp;
			myfile << "\n";




			//float angle = Angle(aux, min, 5);

			//cout << angle; 

			

		}

	//}

		
		
		
		myfile.close();
		//ShowImage(aux, I1);

		//imwrite(argv[2], I2); // salva imagem I2 no arquivo definido pelo usuario em argv[2]
	//}
		return 0;
}
void RectangleDetector::findRectangles()
{
    std::cout << "Finding rectangles..." << std::endl;
    // Clear Rectangle Vectors
    allRectangles.clear();
    finalRectangles.clear();


    // Variable Declarations
    cv::Mat pyr;
    cv::Mat timg;
    cv::Mat gray0(image.size(), CV_8U);
    cv::Mat gray;

    cv::pyrDown(image, pyr, cv::Size(image.cols / 2, image.rows / 2));
    cv::pyrUp(pyr, timg, image.size());

    // Variable Declarations
    std::vector<std::vector<cv::Point> > contours;
    int ch[] = {0, 0};

    cv::mixChannels(&image, 1, &gray0, 1, ch, 1); // Extract Channel

    
    // Canny helps to catch squares with gradient shading
    // apply Canny. Take the upper threshold from slider
    // and set the lower to 0 (which forces edges merging)


    Canny(gray0, gray, 0, constList->detectionCannyThreshold, 5);

    // dilate canny output to remove potential
    // holes between edge segments
    dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));

    findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); // Find Contours

    // Variable Declarations
    std::vector<cv::Point> approx;

    // Test Each Contour
    for (size_t i = 0; i < contours.size(); ++i) {
    // approximate contour with accuracy proportional
    // to the contour perimeter
    cv::approxPolyDP(cv::Mat(contours.at(i)), approx, cv::arcLength(cv::Mat(contours.at(i)), true) * 0.02, true);

    // rectangular contours should have 4 vertices after approximation
    // relatively large area (to filter out noisy contours)
    // and be convex.
    // Note: absolute value of an area is used because
    // area may be positive or negative - in accordance with the
    // contour orientation
    if (approx.size() >= 4 &&
    //approx.size() <= 6 &&
    fabs(cv::contourArea(cv::Mat(approx))) > 200// &&
    //cv::isContourConvex(cv::Mat(approx))
    ) {
    double maxCosine = 0;

    for(int j = 2; j < 5; ++j) {
    // find the maximum cosine of the angle between joint edges
    double cosine = fabs(angle(approx.at(j%4), approx.at(j-2), approx.at(j-1)));
    maxCosine = MAX(maxCosine, cosine);
    }

    // if cosines of all angles are small
    // (all angles are ~90 degrees) then write quandrange
    // vertices to resultant sequence
    if(maxCosine < constList->detectionMaxCosine)
    allRectangles.push_back(approx);
    }
    }
    
    
    std::cout << allRectangles.size();
    if (allRectangles.size() == 0)
        foundRectangle = false;


}
HoughLineTransform::HoughLineTransform(Mat srcInput)
{
	src = srcInput;
	Canny(src, dst, 50, 200, 3);
	cvtColor(dst, cdst, CV_GRAY2BGR);
}
Exemple #14
0
int main()
{
	cv::Mat imCalibColor;	
	cv::Mat imCalibGray;	
	cv::vector<cv::vector<cv::Point> > contours;
	cv::vector<cv::Vec4i> hierarchy;
	cv::vector<cv::Point2f> pointQR;
	cv::Mat imCalibNext;
	cv::Mat imQR;
	cv::vector<cv::Mat> tabQR;
	/*cv::vector<cv::Point2f> corners1;
	cv::vector<cv::Point2f> corners2;
	cv::vector<cv::Point2f> corners3;
	cv::vector<cv::Point2f> corners4;
	cv::vector<cv::Point2f> corners5;*/

	double qualityLevel = 0.01;
	double minDistance = 10;
	int blockSize = 3;
	bool useHarrisDetector = false;
	double k = 0.04;
	int maxCorners = 600;

	int A = 0, B= 0, C= 0;
	char key;
	int mark;
	bool patternFound = false;
	
	cv::VideoCapture vcap("../rsc/capture2.avi");

	for (int i = 1; i < 5; i++)
	{
		std::ostringstream oss;
		oss << "../rsc/QrCodes/QR" << i << ".jpg";
		imQR = cv::imread(oss.str());
		cv::cvtColor(imQR, imQR, CV_BGR2GRAY);
		std::cout<< "Bouh!!!!!!" << std::endl;
		tabQR.push_back(imQR);
	}

	do
	{
		while(imCalibColor.empty())
		{
			vcap >> imCalibColor;
		}
		vcap >> imCalibColor;

		cv::Mat edges(imCalibColor.size(),CV_MAKETYPE(imCalibColor.depth(), 1));
		cv::cvtColor(imCalibColor, imCalibGray, CV_BGR2GRAY);
		Canny(imCalibGray, edges, 100 , 200, 3);

		cv::findContours( edges, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
		
		cv::imshow("pointInteret", imCalibColor);

		mark = 0;

		cv::vector<cv::Moments> mu(contours.size());
  		cv::vector<cv::Point2f> mc(contours.size());

		for( int i = 0; i < contours.size(); i++ )
		{	
			mu[i] = moments( contours[i], false ); 
			mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
		}

		for( int i = 0; i < contours.size(); i++ )
		{
			int k=i;
			int c=0;

			while(hierarchy[k][2] != -1)
			{
				k = hierarchy[k][2] ;
				c = c+1;
			}
			if(hierarchy[k][2] != -1)
			c = c+1;

			if (c >= 5)
			{	
				if (mark == 0)		A = i;
				else if  (mark == 1)	B = i;		// i.e., A is already found, assign current contour to B
				else if  (mark == 2)	C = i;		// i.e., A and B are already found, assign current contour to C
				mark = mark + 1 ;
			}
		} 

		if (A !=0 && B !=0 && C!=0)
		{

			cv::Mat imagecropped = imCalibColor;
			cv::Rect ROI(280/*pointQR[0].x*/, 260/*pointQR[0].y*/, 253, 218);
			cv::Mat croppedRef(imagecropped, ROI);
			cv::cvtColor(croppedRef, imagecropped, CV_BGR2GRAY);
			cv::threshold(imagecropped, imagecropped, 180, 255, CV_THRESH_BINARY);

			pointQR.push_back(mc[A]);
			cv::circle(imCalibColor, cv::Point(pointQR[0].x, pointQR[0].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(mc[B]);
			cv::circle(imCalibColor, cv::Point(pointQR[1].x, pointQR[1].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(mc[C]);
			cv::circle(imCalibColor, cv::Point(pointQR[2].x, pointQR[2].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);

			cv::Point2f D(0.0f,0.0f);
			cv::Point2f E(0.0f,0.0f);
			cv::Point2f F(0.0f,0.0f);

			D.x = (mc[A].x + mc[B].x)/2;
			E.x = (mc[B].x + mc[C].x)/2;
			F.x = (mc[C].x + mc[A].x)/2;

			D.y = (mc[A].y + mc[B].y)/2;
			E.y = (mc[B].y + mc[C].y)/2;
			F.y = (mc[C].y + mc[A].y)/2;

			pointQR.push_back(D);
			cv::circle(imCalibColor, cv::Point(pointQR[3].x, pointQR[3].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(E);
			cv::circle(imCalibColor, cv::Point(pointQR[4].x, pointQR[4].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(F);
			cv::circle(imCalibColor, cv::Point(pointQR[5].x, pointQR[5].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);

			patternFound = true;
			std::cout << "patternfound" << std::endl;
			
			cv::SiftFeatureDetector detector;
			cv::vector<cv::KeyPoint> keypoints1, keypoints2;
			detector.detect(tabQR[3], keypoints1);
			detector.detect(imagecropped, keypoints2);

			cv::Ptr<cv::DescriptorExtractor> descriptor = cv::DescriptorExtractor::create("SIFT");
			cv::Mat descriptors1, descriptors2;
			descriptor->compute(tabQR[3], keypoints1, descriptors1 );
			descriptor->compute(imagecropped, keypoints2, descriptors2 );

			cv::FlannBasedMatcher matcher; 
			std::vector< cv::DMatch > matches; 
			matcher.match( descriptors1, descriptors2, matches ); 
			double max_dist = 0; double min_dist = 100;

			for( int i = 0; i < descriptors1.rows; i++ ) 
			{ 
				double dist = matches[i].distance; 
				if( dist < min_dist ) min_dist = dist; 
				if( dist > max_dist ) max_dist = dist; 
			}

			std::vector< cv::DMatch > good_matches;
			for( int i = 0; i < descriptors1.rows; i++ ) 
				if( matches[i].distance <= 2*min_dist ) 
					good_matches.push_back( matches[i]); 
			cv::Mat imgout; 
			drawMatches(tabQR[3], keypoints1, imagecropped, keypoints2, good_matches, imgout); 

			std::vector<cv::Point2f> pt_img1; 
			std::vector<cv::Point2f> pt_img2; 
			for( int i = 0; i < (int)good_matches.size(); i++ ) 
			{ 
				pt_img1.push_back(keypoints1[ good_matches[i].queryIdx ].pt ); 
				pt_img2.push_back(keypoints2[ good_matches[i].trainIdx ].pt ); 
			}
			cv::Mat H = findHomography( pt_img1, pt_img2, CV_RANSAC );

			cv::Mat result; 
			warpPerspective(tabQR[3],result,H,cv::Size(tabQR[3].cols+imagecropped.cols,tabQR[3].rows)); 
			cv::Mat half(result,cv::Rect(0,0,imagecropped.cols,imagecropped.rows)); 
			imagecropped.copyTo(half); 
			imshow( "Result", result );

			break;
		}

		key = (char)cv::waitKey(67);
	}while(patternFound != true && key != 27);

	if(patternFound)
		imCalibNext = imCalibColor;
	
	return patternFound;

}
Exemple #15
0
void KeyPointsDeliverer::extractMouthCornerKeyPoints(Mat &mouthImg, int thresholdDifferenceToAvg, int totalLineCheck, int kp1Break, int kp5Break)
{
    QList<PossibleKeyPoint> possibleKeyPoints;
    PossibleKeyPoint possibleKeyPoint;

    for (int i = 0; i < rMidFinal.cols; ++i) {
        for (int j = rMidFinal.rows-totalLineCheck; j > totalLineCheck/2; --j) {

            int currentDiffToAvg = 0;

            for (int k = 1; k < totalLineCheck/2 + 1; ++k) {
                currentDiffToAvg += rMidFinal.at<uchar>(j-k,i) + rMidFinal.at<uchar>(j+k,i);

            }
            currentDiffToAvg = currentDiffToAvg / totalLineCheck;

            if(currentDiffToAvg > 0){
                currentDiffToAvg = 100 - (rMidFinal.at<uchar>(j,i) * 100 / currentDiffToAvg);
            }

            if(currentDiffToAvg > thresholdDifferenceToAvg){
                possibleKeyPoint.differenceToAvg = currentDiffToAvg;
                possibleKeyPoint.keyPoint.x  = i;
                possibleKeyPoint.keyPoint.y  = j;

                possibleKeyPoints.append(possibleKeyPoint);
            }
        }
    }



    Mat contourImg(rMidFinal.rows, rMidFinal.cols, CV_8UC1, Scalar(0,0,0));
    Point p;
    for (int i = 0; i < possibleKeyPoints.size(); ++i) {
        p = possibleKeyPoints.at(i).keyPoint;

        if(i % 3 == 0)
          circle(rMidFinal, p, 1, Scalar(255,255,255));

        contourImg.at<uchar>(p.y, p.x) = 255;
    }
    Mat _img;
    double otsu_thresh_val = cv::threshold(
                contourImg, _img, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU
                );

    Canny(contourImg, contourImg, otsu_thresh_val*0.5, otsu_thresh_val, 3, true);

    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    findContours( contourImg, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
    for( uint i = 0; i< contours.size(); i++ )
    {
        drawContours( contourImg, contours, i, Scalar(255,255,255), 1, 8, hierarchy, 1, Point() );
    }

    double luminanceMean = 0.0;
    double pseudoHueMean = 0.0;
    double pseudoHuePxl = 0.0;
    int luminancePxl = 0;

    for (int i = 0; i < mouthImg.cols/2; ++i) {
        for (int j = 0; j < mouthImg.rows; ++j) {
            pseudoHuePxl = imageProcessing.pseudoHuePxl(mouthImg, j, i);
            luminancePxl = imageProcessing.luminancePxl(mouthImg, j, i);

            luminanceMean += luminancePxl;
            pseudoHueMean += pseudoHuePxl;
        }
    }
    luminanceMean /= (mouthImg.cols/2*mouthImg.rows);
    pseudoHueMean /= (mouthImg.cols/2*mouthImg.rows);

    QList<PossibleKeyPoint> pKPoints;
    PossibleKeyPoint pKPoint;

    for (int i = mouthImg.cols/2-(mouthImg.cols/2*0.4); i > 0; --i) {
        for (int j = mouthImg.rows-(mouthImg.rows/2*0.4); j > 0; --j) {
            pseudoHuePxl = imageProcessing.pseudoHuePxl(mouthImg, j, i);
            luminancePxl = imageProcessing.luminancePxl(mouthImg, j, i);


            if(contourImg.at<uchar>(j,i) == 255
                    ){
                pKPoint.keyPoint.x = i;
                pKPoint.keyPoint.y = j;
                pKPoints.append(pKPoint);
                break;
            }
        }
    }


    keyPoint1.x = 1000;
    for (int i = 0; i < pKPoints.size(); ++i) {
        int diffY = 0;

        if(i > 0){
            diffY = abs(pKPoints.at(i).keyPoint.y - pKPoints.at(i-1).keyPoint.y);
            //ROS_INFO("diff: %d", abs(pKPoints.at(i).keyPoint.y - pKPoints.at(i-1).keyPoint.y));
        }

        if(diffY > kp1Break){
            break;
        }

        if(keyPoint1.x > pKPoints.at(i).keyPoint.x){
            keyPoint1.x = pKPoints.at(i).keyPoint.x;
            keyPoint1.y = pKPoints.at(i).keyPoint.y;
        }
        //circle(rMidFinal, pKPoints.at(i).keyPoint, 2, Scalar(255,255,255));
    }



    luminanceMean = 0.0;
    pseudoHueMean = 0.0;
    for (int i = mouthImg.cols/2; i < mouthImg.cols; ++i) {
        for (int j = 0; j < mouthImg.rows-(mouthImg.rows/2*0.4); ++j) {
            pseudoHuePxl = imageProcessing.pseudoHuePxl(mouthImg, j, i);
            luminancePxl = imageProcessing.luminancePxl(mouthImg, j, i);

            luminanceMean += luminancePxl;
            pseudoHueMean += pseudoHuePxl;
        }
    }
    luminanceMean /= (mouthImg.cols/2*mouthImg.rows);
    pseudoHueMean /= (mouthImg.cols/2*mouthImg.rows);

    pKPoints.clear();

    for (int i = mouthImg.cols/2+(mouthImg.cols/2*0.3); i < mouthImg.cols; ++i) {
        for (int j = mouthImg.rows; j > 0; --j) {
            pseudoHuePxl = imageProcessing.pseudoHuePxl(mouthImg, j, i);
            luminancePxl = imageProcessing.luminancePxl(mouthImg, j, i);

            if(contourImg.at<uchar>(j,i) == 255
                    ){
                pKPoint.keyPoint.x = i;
                pKPoint.keyPoint.y = j;
                pKPoints.append(pKPoint);
                break;
            }
        }
    }

//    for (int i = 0; i < pKPoints.size(); ++i) {
//        circle(rMidFinal, pKPoints.at(i).keyPoint, 1, Scalar(255,255,255));
//    }

//    for (int i = 0; i < contourImg.cols; ++i) {
//        for (int j = 0; j < contourImg.rows; ++j) {
//            if(contourImg.at<uchar>(j,i) == 255)
//                circle(rMidFinal, Point(i,j), 1, Scalar(255,255,255));
//        }
//    }


    Point kpTmp;
    kpTmp.x = 0;
    kpTmp.y = 0;
    //ROS_INFO("pkPoint size %d", pKPoints.size());
    for (int i = 0; i < pKPoints.size(); ++i) {
        int diffY = 0;

        if(i > 0){
            diffY = abs(pKPoints.at(i).keyPoint.y - pKPoints.at(i-1).keyPoint.y);
            //ROS_INFO("diff: %d", abs(pKPoints.at(i).keyPoint.y - pKPoints.at(i-1).keyPoint.y));
        }

        if(diffY > kp5Break){
            break;
        }

        if(kpTmp.x < pKPoints.at(i).keyPoint.x){
            kpTmp.x = pKPoints.at(i).keyPoint.x;
            keyPoint5.x = pKPoints.at(i).keyPoint.x;
            keyPoint5.y = pKPoints.at(i).keyPoint.y;
        }
        //circle(rMidFinal, pKPoints.at(i).keyPoint, 2, Scalar(255,255,255));
    }

}
void FieldLineDetector::findTransformation(cv::Mat& src, cv::Mat& imgDst,
		std::vector<cv::Point2f>& modelBots, cv::Mat& H)
{
	this->botPosField = modelBots;

	Mat imgBw;
	blur(src, imgBw, Size(5, 5));
	cvtColor(imgBw, imgBw, CV_BGR2GRAY);

	Mat imgEdges;
	Canny(imgBw, imgEdges, 50, 100, 3);

//	imshow("bw", imgBw);
//	imshow("edges", imgEdges);

	std::vector<cv::Vec4i> lines;
	HoughLinesP(imgEdges, lines, 1, CV_PI / 180, min_threshold + p_trackbar,
			minLineLength, maxLineGap);

	// Expand the lines little bit (by scaleFactor)
	for (int i = 0; i < lines.size(); i++)
	{
		cv::Vec4i v = lines[i];
		cv::Point2f p1 = Point2f(v[0], v[1]);
		cv::Point2f p2 = Point2f(v[2], v[3]);
		cv::Point2f p1p2 = p2 - p1;
		float length = norm(p1p2);

		cv::Point2f scaleP2 = p2 + p1p2 * (scaleFactor / 10.0f);
		cv::Point2f scaleP1 = p1 - p1p2 * (scaleFactor / 10.0f);

		lines[i][0] = scaleP1.x;
		lines[i][1] = scaleP1.y;
		lines[i][2] = scaleP2.x;
		lines[i][3] = scaleP2.y;
	}

	createThresholdedImg(src);

	// do line detection!
	detectCorners(lines);
	filterCorners();
	findNeighbors(lines);
	findCornerMapping(mappedEdges);
	for (int i = 0; i < mappedEdges.size(); i++)
	{
		cout << (*mappedEdges[i]) << endl;
	}
	findFieldMatch(mappedEdges, H);

	if (imgDst.cols > 0)
	{
		// Draw lines
		for (int i = 0; i < lines.size(); i++)
		{
			cv::Vec4i v = lines[i];
			cv::line(imgDst, cv::Point(v[0], v[1]), cv::Point(v[2], v[3]),
					cv::Scalar(0, 255, 0), 2);
		}

		// draw corners
		for (int i = 0; i < cornerBuffer.size(); i++)
		{
			cv::circle(imgDst, cornerBuffer.at(i), 1, cv::Scalar(255, 0, 0), 2);
		}

		// draw filtered corners
		for (int i = 0; i < detectedCorners.size(); i++)
		{
			circle(imgDst, detectedCorners[i]->point, (int) 20,
					Scalar(0, 255, 255), 1);
		}

		// draw detected corner coordinates
		for (int i = 0; i < detectedCorners.size(); i++)
		{
			stringstream ss;
			ss << detectedCorners[i]->point;
			putText(imgDst, ss.str(),
					detectedCorners[i]->point + Point2f(0, 10),
					FONT_HERSHEY_PLAIN, 1, Scalar(250, 0, 0));
		}
	}
}
// Process an image containing a line and return the angle with respect to NAO.
double NaoVision::calculateAngleToBlackLine() {
    // Convert image to gray and blur it.
    cvtColor(src, src_gray, CV_BGR2GRAY);
    blur(src_gray, src_gray, Size(3,3));

    if(!local)
        imshow("src", src);

    Mat canny_output;
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;

    // Detect edges using canny.
    Canny(src_gray, canny_output, thresh, thresh * 2, 3);

    // Find contours.
    findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    // Get the moments.
    vector<Moments> mu(contours.size());

    for(int i = 0; i < contours.size(); i++)
        mu[i] = moments(contours[i], false);

    // Get the mass centers.
    vector<Point2f> mc( contours.size());

    for(int i = 0; i < contours.size(); i++)
        mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00);

    // Eliminate contours without area.
    contoursClean.clear();
    int indMax = 0;
    int lengthMax = 0;

    for(int i = 0; i < contours.size(); i++) {
        area = mu[i].m00;
        length = arcLength(contours[i], true);
        punto = mc[i];

        if(area != 0 && length > 200 && punto.x > 0 && punto.y > 0)
            contoursClean.push_back(contours.at(i));
    }

    if(contoursClean.size() != 0) {
        // Get moments and mass for new vector.
        vector<Moments> muClean(contoursClean.size());

        for(int i = 0; i < contoursClean.size(); i++)
            muClean[i] = moments(contoursClean[i], false);

        // Get the mass centers.
        vector<Point2f> mcClean( contoursClean.size());

        for(int i = 0; i < contoursClean.size(); i++)
            mcClean[i] = Point2f(muClean[i].m10/muClean[i].m00, muClean[i].m01/muClean[i].m00);

        for(int i = 0; i < contoursClean.size(); i++) {
            punto = mcClean[i];
            length = arcLength(contoursClean[i], true);
        }

        // Find the longest.
        for(int i = 0; i < contoursClean.size(); i++) {
            length = arcLength(contoursClean[i], true);
            lengthMax = arcLength(contoursClean[indMax], true);

            if(i > 0) {
                if(length  > lengthMax)
                    indMax = i;
            } else
                indMax = 0;
        }

        // Draw contours.
        Mat drawing = Mat::zeros(canny_output.size(), CV_8UC3);

        Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255));
        drawContours( drawing, contoursClean, indMax, color, 2, 8, hierarchy, 0, Point());
        circle(drawing, mcClean[indMax], 4, color, 5, 8, 0 );

        // Calculate the angle of the line.
        angleToALine = getAngleDegrees(contoursClean[indMax], drawing);

        puntoMax = mcClean[indMax];
        lengthMax = arcLength(contoursClean[indMax], true);

        // Show in a window.
        if(!local) {
            namedWindow("Contours", CV_WINDOW_AUTOSIZE);
            imshow("Contours", drawing);

            // Draw grid.
            line(drawing, Point(260,0), Point(260, drawing.rows), Scalar(255,255,255));
            line(drawing, Point(umbral,0), Point(umbral, drawing.rows), Scalar(255,255,255));
            line(drawing, Point((drawing.cols/2),0), Point((drawing.cols/2), drawing.rows), Scalar(255,255,255));
            line(drawing, Point(0,120), Point(320,120), Scalar(255,255,255));
            imshow("Contours", drawing);
        }
    }
    else { // Go straight.
        angleToALine = 90.0;
    }

    return angleToALine;
}
void FindLargest_ProjectionVoxel(int ImageNum, vector<OctVoxel>& Octree, vector<cv::Mat>& Silhouette, Cpixel*** vertexII, CMatrix* ART){

	int thresh = 70;
	int max_thresh = 210;
	RNG rng(12345);

	Mat src_gray;
	Mat drawing;

	double scale(0.7);
	Size ssize;
	CVector M(4);		//Homogeneous coordinates of the vertices(x,y,z,1) world coordinate
	CVector m(4);		//That the image coordinates (normalized) expressed in homogeneous coordinates
	M[3] = 1.0;
	//8 vertices world coordinates of the voxel (x,y,z)
	CVector3d vertexW[8];

	ofstream fout("larget_boundingbox_contour.txt");

	int Boundingbox_line[12][2] = { { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 0 },
	{ 0, 4 }, { 1, 5 }, { 2, 6 }, { 3, 7 },
	{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 4 } };

	//---------------------------------------------------------------	
	for (auto h(0); h < ImageNum; h++){
		//src_gray = Silhouette[h];         	
		Silhouette[h].copyTo(src_gray);
		cout << "Silhouette_" << h << endl;

		for (auto j(0); j < Octree.size(); j++){

			Octree[j].SetVertexWorld_Rotated(vertexW);
			for (int k = 0; k < 8; ++k){	//8 vertices of the voxel
				M[0] = vertexW[k].x;
				M[1] = vertexW[k].y;
				M[2] = vertexW[k].z;
				m = ART[h] * M;
				vertexII[h][j][k].setPixel_u_v((int)(m[0] / m[2]), (int)(m[1] / m[2]));  // normalize
			}

			//-------------------------------------- bounding box ------------------------
			for (auto k(0); k < 12; k++){
				//Draw 12 lines of the voxel in img.
				Start_point.x = vertexII[h][j][Boundingbox_line[k][0]].getPixel_u();
				Start_point.y = vertexII[h][j][Boundingbox_line[k][0]].getPixel_v();
				PointStart.push_back(Start_point);
				End_point.x = vertexII[h][j][Boundingbox_line[k][1]].getPixel_u();
				End_point.y = vertexII[h][j][Boundingbox_line[k][1]].getPixel_v();
				PointEnd.push_back(End_point);

				//line(src_gray, Start_point, End_point, Scalar(225, 225,255), 2.0, CV_AA);
			}
		}
		

		Mat canny_output;
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;

		double max_contour_area(0.0);
		int largest_contour_index(0);
		
		/// Detect edges using canny
		Canny(src_gray, canny_output, thresh, max_thresh, 3);
		/// Find contours
		//findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
		findContours(canny_output, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point(0, 0));

		/// Draw contours
		drawing = Mat::zeros(canny_output.size(), CV_8UC3);

		for (auto n(0); n < PointEnd.size(); n++){
			line(drawing, PointStart[n], PointEnd[n], Scalar(225, 225, 225), 1.0, 1, 0);
		}

		/// Get the moments
		vector<Moments> mu(contours.size());
		for (int i = 0; i < contours.size(); i++)
		{
			mu[i] = moments(contours[i], false);
			//cout << "# of contour points: " << contours[i].size() << endl;
			for (int j = 0; j < contours[i].size(); j++)
			{
				//cout << "Point(x,y)=" <<i<<" j "<<j<<" "<< contours[i][j] << endl;
			}
		}
		////  Get the mass centers:
		vector<Point2f> mc(contours.size());
		for (int i = 0; i < contours.size(); i++)
		{
			mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
		}
		//// ---------- - Find the convex hull object for each contour
			vector<vector<Point>>hull(contours.size());
		for (int i = 0; i < contours.size(); i++){
			convexHull(Mat(contours[i]), hull[i], false);
		}				
		
		// Calculate the area with the moments 00 and compare with the result of the OpenCV function
		//printf("\t Info: Area and Contour Length \n");

		//cout << "contours.size() " << contours.size() << endl;
		double countour_Area(0.0);
		double arc_Length(0.0);

		for (int i = 0; i < contours.size(); i++)
		{
			countour_Area = (double)contourArea(contours[i]);
			arc_Length = (double)arcLength(contours[i], true);

			//cout << "contourArea [" << i << "] " << ": Moment " << mu[i].m00 
			//	 << " OpenCV " << countour_Area << " arcLength " << arc_Length << endl;		
			//cout << "countour_Area "<< countour_Area << " " << endl;

			if (countour_Area > max_contour_area){
				max_contour_area = countour_Area;
				largest_contour_index = i;
			}

			//------- draw all contour ---------------
			//Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
			//drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
			//circle(drawing, mc[i], 4, color, -1, 8, 0);
			//drawContours(drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point());
			//drawContours(drawing, contours, i, Scalar(255, 255, 255), 0.10, 8, hierarchy, 0, Point());

		}
		//------- draw largest contour ---------------
		Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		drawContours(drawing, contours, largest_contour_index, color, 2, 8, hierarchy, 0, Point());
		//circle(drawing, mc[largest_contour_index], 4, color, -1, 8, 0);		
		//drawContours(drawing, contours, largest_contour_index, Scalar(0, 255, 255), 2, 8, hierarchy, 0, Point());
		//drawContours(drawing, hull, largest_contour_index, color, 2, 8, vector<Vec4i>(), 0, Point());
		//drawContours(drawing, contours, largest_contour_index, Scalar(255, 255, 255), 1, 8, hierarchy, 0, Point());

		fout << max_contour_area << endl;
		cout << "max_contour_area " << max_contour_area << endl;	
		
		//----------------------- Show in a window --------------------------------------
		//resize(drawing, drawing, ssize, INTER_NEAREST);
		namedWindow("Contours", CV_WINDOW_AUTOSIZE);
		imshow("Contours", drawing);

		//output white boundary
		imwrite("../../data2016/input/newzebra/contour_voxel/contour_voxel" + to_string(h) + ".bmp", drawing);

		waitKey(0);
		destroyWindow("silhouette");

		PointStart.clear();
		PointStart.shrink_to_fit();
		PointEnd.clear();
		PointEnd.shrink_to_fit();
	}

	//getchar();
}
QVector<CColorsType> CColor::getCorBoard_CorPlaced()
{

    imgFHSV = Frame2HSV(imgSharp, CColorsType::YELLOW);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++)
    {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if( ! fig.isSquare(approx)) {///                     YELLOW                 ///As peças jogadas são retangulos
                CColorsType colorBoard =  poscenter.getBoardColor(CColorsType::YELLOW);
                if(colorBoard == CColorsType::YELLOW)  ///Right Answer
                {
                    setMessage2Robot(true, colorBoard, CColorsType::YELLOW);
                    return QVector<CColorsType>() << colorBoard << CColorsType::YELLOW;
                }
                else
                {
                    setMessage2Robot(false, colorBoard, CColorsType::YELLOW);
                    return QVector<CColorsType>() << colorBoard << CColorsType::YELLOW;
                }
            }
        }
    }

    imgFHSV = Frame2HSV(imgSharp, CColorsType::RED);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++)
    {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4) //este if não deve ser necessario
        {
            if( ! fig.isSquare(approx)){///                     RED                 ///As peças jogadas são retangulos
                CColorsType colorBoard =  poscenter.getBoardColor(CColorsType::RED);
                if(colorBoard == CColorsType::RED)
                {
                    setMessage2Robot(true, colorBoard, CColorsType::RED);
                    return QVector<CColorsType>() << colorBoard << CColorsType::RED;
                }
                else
                {
                    setMessage2Robot(false, colorBoard, CColorsType::RED);
                    return QVector<CColorsType>() << colorBoard << CColorsType::RED;
                }
            }
        }
    }

    imgFHSV = Frame2HSV(imgSharp, CColorsType::GREEN);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++)
    {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4) //este if não deve ser necessario
        {
            if( ! fig.isSquare(approx)){///                     GREEN                 ///As peças jogadas são retangulos
                CColorsType colorBoard =  poscenter.getBoardColor(CColorsType::GREEN);
                if(colorBoard == CColorsType::GREEN)
                {
                    setMessage2Robot(true, colorBoard, CColorsType::GREEN);
                    return QVector<CColorsType>() << colorBoard << CColorsType::GREEN;
                }
                else
                {
                    setMessage2Robot(false, colorBoard, CColorsType::GREEN);
                    return QVector<CColorsType>() << colorBoard << CColorsType::GREEN;
                }
            }
        }
    }

    imgFHSV = Frame2HSV(imgSharp, CColorsType::BLUE);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4) //este if não deve ser necessario
        {
            if( ! fig.isSquare(approx)){///                     BLUE                 ///As peças jogadas são retangulos
                CColorsType colorBoard =  poscenter.getBoardColor(CColorsType::BLUE);
                if(colorBoard == CColorsType::BLUE)
                {
                    setMessage2Robot(true, colorBoard, CColorsType::BLUE);
                    return QVector<CColorsType>() << colorBoard << CColorsType::BLUE;
                }
                else
                {
                    setMessage2Robot(false, colorBoard, CColorsType::BLUE);
                    return QVector<CColorsType>() << colorBoard << CColorsType::BLUE;
                }
            }
        }
    }

    imgFHSV = Frame2HSV(imgSharp, CColorsType::BLACK);
    Canny(imgFHSV, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4) //este if não deve ser necessario
        {
            if( ! fig.isSquare(approx)){///                     BLACK                 ///As peças jogadas são retangulos
                CColorsType colorBoard =  poscenter.getBoardColor(CColorsType::BLACK);
                if(colorBoard == CColorsType::BLACK)
                {
                    setMessage2Robot(true, colorBoard, CColorsType::BLACK);
                    return QVector<CColorsType>() << colorBoard << CColorsType::BLACK;
                }
                else
                {
                    setMessage2Robot(false, colorBoard, CColorsType::BLACK);
                    return QVector<CColorsType>() << colorBoard << CColorsType::BLACK;
                }
            }
        }
    }

    return QVector<CColorsType>() << CColorsType::NONE  << CColorsType::NONE;  ///REVER
}
/** @function thresh_callback */
void thresh_callback(Mat src_gray, Mat& drawing, double scale, Size& ssize)
{
	RNG rng(12345);
	int thresh = 100;
	ofstream fout("larget_contour_area.txt");

	Mat canny_output;
	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;

	double max_contour_area(0.0);
	int largest_contour_index(0);

	/// Detect edges using canny
	Canny(src_gray, canny_output, thresh, thresh * 2, 3);
	/// Find contours
	findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

	/// Get the moments
	vector<Moments> mu(contours.size());
	for (int i = 0; i < contours.size(); i++)
	{
		mu[i] = moments(contours[i], false);
		//cout << "# of contour points: " << contours[i].size() << endl;
		for (int j = 0; j < contours[i].size(); j++)
		{
			//cout << "Point(x,y)=" <<i<<" j "<<j<<" "<< contours[i][j] << endl;
		}
	}

	///  Get the mass centers:
	vector<Point2f> mc(contours.size());
	for (int i = 0; i < contours.size(); i++)
	{
		mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
	}

	//----------- Find the convex hull object for each contour
	vector<vector<Point>>hull(contours.size());
	for (int i = 0; i < contours.size(); i++){
		convexHull(Mat(contours[i]), hull[i], false);
	}

	//------------ Draw contours
	drawing = Mat::zeros(canny_output.size(), CV_8UC3);
	//Size ssize;

	ssize = Size((int)(drawing.size().width * scale), (int)(drawing.size().height*scale));
	//the dst image size,e.g.100x100

	// Calculate the area with the moments 00 and compare with the result of the OpenCV function
	//printf("\t Info: Area and Contour Length \n");

	//cout << "contours.size() " << contours.size() << endl;
	double countour_Area(0.0), arc_Length(0.0);

	for (int i = 0; i < contours.size(); i++)
	{
		countour_Area = (double)contourArea(contours[i]);
		arc_Length = (double)arcLength(contours[i], true);

		//cout << "contourArea [" << i << "] " << ": Moment " << mu[i].m00 
		//	 << " OpenCV " << countour_Area << " arcLength " << arc_Length << endl;

		//cout << "countour_Area "<< countour_Area << " " << endl;
		if (countour_Area > max_contour_area){
			max_contour_area = countour_Area;
			largest_contour_index = i;
		}

		//------- draw all contour ---------------
		//Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		//drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
		//circle(drawing, mc[i], 4, color, -1, 8, 0);

	}
	//------- draw largest contour ---------------
	if (contours.size() > 0){
		Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		drawContours(drawing, contours, largest_contour_index, color, 1, 8, hierarchy, 0, Point());
		circle(drawing, mc[largest_contour_index], 4, color, -1, 8, 0);
		drawContours(drawing, hull, largest_contour_index, color, 1, 8, vector<Vec4i>(), 0, Point());
	}
	fout << max_contour_area << endl;
	cout << "max_contour_area " << max_contour_area << endl;

}
///Level 1
void CColor::setALLBoardColorPosition()
{
    Scalar colorContours;
    imgFHSVBoard = Frame2HSV(imgSharp, CColorsType::NOTWHITE);
    ///setColorPosition2Board(CColorsType::RED);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::RED);
    bitwise_and(imgFHSVBoard, imgFHSV, imgFComper); //sumar a figura imgFHSVBoard com imgFHSV para obter só a cor
    Canny(imgFComper, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    drawingContours = Mat::zeros(imgCanny.size(), CV_8UC3);
    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(fig.isSquare(approx)) {
                poscenter.ChooseSaveBoardColorCenter( CColorsType::RED, contours[i]);

                colorContours = CV_RGB(255, 0, 0);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    ///setColorPosition2Board(CColorsType::GREEN);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::GREEN);
    bitwise_and(imgFHSVBoard, imgFHSV, imgFComper); //sumar a figura imgFHSVBoard com imgFHSV para obter só a cor
    Canny(imgFComper, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(fig.isSquare(approx)) {
                poscenter.ChooseSaveBoardColorCenter( CColorsType::GREEN, contours[i]);

                colorContours = CV_RGB(0, 255, 0);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    ///setColorPosition2Board(CColorsType::BLUE);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::BLUE);
    bitwise_and(imgFHSVBoard, imgFHSV, imgFComper); //sumar a figura imgFHSVBoard com imgFHSV para obter só a cor
    Canny(imgFComper, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(fig.isSquare(approx)) {
                poscenter.ChooseSaveBoardColorCenter( CColorsType::BLUE, contours[i]);

                colorContours = CV_RGB(0, 0, 255);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    ////setColorPosition2Board(CColorsType::YELLOW);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::YELLOW);
    bitwise_and(imgFHSVBoard, imgFHSV, imgFComper); //sumar a figura imgFHSVBoard com imgFHSV para obter só a cor
    Canny(imgFComper, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(fig.isSquare(approx)) {
                poscenter.ChooseSaveBoardColorCenter( CColorsType::YELLOW, contours[i]);

                colorContours = CV_RGB(255, 255, 0);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    ///setColorPosition2Board(CColorsType::BLACK);
    imgFHSV = Frame2HSV(imgSharp, CColorsType::BLACK );
    bitwise_and(imgFHSVBoard, imgFHSV, imgFComper); //sumar a figura imgFHSVBoard com imgFHSV para obter só a cor
    Canny(imgFComper, imgCanny, 180, 120);
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    for (size_t i = 0; i< contours.size(); i++) {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4)
        {
            if(fig.isSquare(approx)) {
                poscenter.ChooseSaveBoardColorCenter( CColorsType::BLACK, contours[i]);

                colorContours = CV_RGB(255, 255, 0);
                drawContours(drawingContours, contours, (int)i, colorContours, 2, /*CV_AA*/8, hierarchy, 0, Point());
            }
        }
    }

    //imshow("drawingContours", drawingContours);
}
void AndarPelaParedeAteLinha::execute(Robotino *robotino)
{
    float Vx = 200, Vy, w, distParede;
    float erroDist = 0;
    int paredeAlvo = robotino->paredeAlvo();

    static State<Robotino> * voltar;
    static float a = std::sin(60*PI/180)/std::sin(80*PI/180);
    static float cos20 = std::cos(20*PI/180);
    static float K = R*(a-1);
    static float erro_int = 0;


    float e1 = robotino->irDistance(Robotino::IR_ESQUERDO_1);
    float e2 = robotino->irDistance(Robotino::IR_ESQUERDO_2);
    float ref_e1 = e2*a+K;
    float d1 = robotino->irDistance(Robotino::IR_DIREITO_1);
    float d2 = robotino->irDistance(Robotino::IR_DIREITO_2);
    float ref_d1 = 1.15*(d2*a+K);
    float distancia_da_esquerda, distancia_da_direita;
    float erro;

    vector<Vec4i> lines;
    Vec4i l, l2;
    Mat img, cdst;
    int num_linha = 0;
    int min_Hough = 70, dist_Hough = 50;
    int min_canny =150 , max_canny = 3*min_canny;

    distParede = robotino->getRefDistParede();
    distParede += R;

    img = robotino->getImage();

    cvtColor( img, cdst, CV_BGR2GRAY );

    Canny( cdst, cdst, (double)min_canny, (double)max_canny, 3 );
    convertScaleAbs(cdst, cdst);

    //cv::imshow("Canny",cdst);
    //cv::waitKey(1);

    threshold(cdst, cdst, (double)5, (double)255, CV_THRESH_BINARY);

    HoughLinesP(cdst, lines, 1, CV_PI/180, min_Hough, min_Hough, dist_Hough );

    cvtColor( cdst, cdst, CV_GRAY2BGR );


    if (paredeAlvo == Robotino::NORTEN90  || paredeAlvo == Robotino::OESTE0 || paredeAlvo == Robotino::SUL90 || paredeAlvo == Robotino::LESTE180){

        erro = (e1-ref_e1);
        erro_int += erro*dt;
        w = Kp*erro+Ki*erro_int;

        distancia_da_esquerda = ((e1+ref_e1+2*R)*cos20)/2;
        erroDist = (distancia_da_esquerda) - distParede;
        Vy = Kpy*erroDist;

        std::cout << "erro dist: " << erroDist << "\n";

        std::cout<< "Esquerda 1: " << e1 << std::endl;
        std::cout<< "RefEsquerda 1: " << ref_e1 << std::endl;
        std::cout<< "Esquerda 2: " << e2 << std::endl;

        std::cout << "Distância da esquerda: " << distancia_da_esquerda << "\n";

        if (lines.size() > numeroLinhasMin){
            if (paredeAlvo == Robotino::OESTE0) {
                robotino->setOdometry(robotino->odometryX(),-(distancia_da_esquerda*10+15),0);
            }
            if (paredeAlvo == Robotino::NORTEN90) {
                robotino->setOdometry((robotino->getAlturaMapa())*10 -(distancia_da_esquerda*10+15),robotino->odometryY(),-90);
            }
             if (paredeAlvo == Robotino::SUL90) {
                robotino->setOdometry((distancia_da_esquerda*10+15),robotino->odometryY(),90);
            }
             if (paredeAlvo == Robotino::LESTE180) {
               robotino->setOdometry(robotino->odometryX(),-((robotino->getLarguraMapa())*10 -(distancia_da_esquerda*10+15)),180);
            }
        }

    }else if (paredeAlvo == Robotino::SULN90  || paredeAlvo == Robotino::LESTE0 || paredeAlvo == Robotino::NORTE90  || paredeAlvo == Robotino::OESTE180) {

        erro = (d1-ref_d1);
        erro_int += erro*dt;
        w = -Kp*erro-Ki*erro_int;

        distancia_da_direita = ((d1+ref_d1+2*R)*cos20)/2;
        erroDist = distParede - ( distancia_da_direita );
        Vy = Kpy*erroDist;

        std::cout<< "Direita 1: " << d1 << std::endl;
        std::cout<< "RefDireita 1: " << ref_d1 << std::endl;
        std::cout<< "Direita 2: " << d2 << std::endl;


        std::cout << "Distância da direita: " << distancia_da_direita << "\n";

        if (lines.size() > numeroLinhasMin){

            if (paredeAlvo == Robotino::SULN90) {
                robotino->setOdometry((distancia_da_direita*10+15),robotino->odometryY(),-90);
            }
            if (paredeAlvo == Robotino::LESTE0) {
                robotino->setOdometry(robotino->odometryX(),-((robotino->getLarguraMapa()) * 10-(distancia_da_direita*10+15)),0);
            }
            if (paredeAlvo == Robotino::NORTE90) {
                robotino->setOdometry((robotino->getAlturaMapa()*10 - (distancia_da_direita*10+15)),robotino->odometryY(),90);
            }
            if (paredeAlvo == Robotino::OESTE180) {
                robotino->setOdometry(robotino->odometryX(),-((distancia_da_direita*10+15)),180);
            }
        }
    }



    if(distParede > 99){
        robotino->setVelocity(Vx,0,0);
    }else{
        robotino->setVelocity(Vx,Vy,w);
    }

for( size_t i = 0; i < lines.size(); i++ ){
      Vec4i l = lines[i];
      //if (l[3]  > 100 || l[1]  > 100){
        num_linha++;
      //}
    }

    if (num_linha > numeroLinhasMin){
        robotino->setVelocity(0,0,0);

        robotino->change_state(robotino->previous_state());
    }

}
Exemple #23
0
void DkPageSegmentation::findRectangles(const cv::Mat& img, std::vector<DkPolyRect>& rects, int channel, int threshold) const {

    cv::Mat imgL;
    cv::normalize(img, imgL, 255, 0, cv::NORM_MINMAX);

	// downscale
	if (scale != 1.0f)
		cv::resize(imgL, imgL, cv::Size(), scale, scale, CV_INTER_LINEAR);

	std::vector<std::vector<cv::Point> > contours;

	int threshStep = dsc::round(255.0 / numThresh);
	//std::cout << "thresh step: " << threshStep << std::endl;

	cv::Mat gray;
	std::vector<DkPolyRect> rectsL;

	std::vector<int> indexes;
	if (threshold == -1) {

		// use less thresholds for a/b channels
		if (channel > 0)
			threshStep *= 2;

		for (int idx = 0; idx < 255; idx += threshStep)
			indexes.push_back(idx);
	}
	else
		indexes.push_back(threshold);

	// try several threshold levels
	for (int thr : indexes) {

		if (thr == 0) {

			int thresh = 80;
			Canny(imgL, gray, thresh, thresh*3, 5);
			// dilate canny output to remove potential
			// holes between edge segments
			//dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));

			//cv::imwrite("C:/VSProjects/DocScan/img/tests/edge.png", gray);
		}
		else
			gray = imgL >= thr;

        cv::erode(gray, gray, cv::Mat(), cv::Point(-1,-1));

		// find contours and store them all as a list
		findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

		if (looseDetection) {
			std::vector<std::vector<cv::Point> > hull;
			for (int i = 0; i < (int)contours.size(); i++) {

				double cArea = contourArea(cv::Mat(contours[i]));

				if (fabs(cArea) > mMinArea*scale*scale && (!mMaxArea || fabs(cArea) < mMaxArea*(scale*scale))) {
					std::vector<cv::Point> cHull;
					cv::convexHull(cv::Mat(contours[i]), cHull, false);
					hull.push_back(cHull);
				}
			}

			contours = hull;
		}

		std::vector<cv::Point> approx;

		// DEBUG ------------------------
		//cv::Mat pImg = imgL.clone();
		//cv::cvtColor(pImg, pImg, CV_GRAY2BGR);
		// DEBUG ------------------------

		// test each contour
		for (size_t i = 0; i < contours.size(); i++) {
			// approxicv::Mate contour with accuracy proportional
			// to the contour perimeter
			approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);

			double cArea = contourArea(cv::Mat(approx));

			// DEBUG ------------------------
			//if (fabs(cArea) < mMaxArea)
				//fillConvexPoly(pImg, &approx[0], (int)approx.size(), cv::Scalar(255,0,0));
			// DEBUG ------------------------

			// square contours should have 4 vertices after approxicv::Mation
			// relatively large area (to filter out noisy contours)
			// and be convex.
			// Note: absolute value of an area is used because
			// area may be positive or negative - in accordance with the
			// contour orientation
			if (approx.size() == 4 &&
				fabs(cArea) > mMinArea*scale*scale &&
				(!mMaxArea || fabs(cArea) < mMaxArea*scale*scale) &&
				isContourConvex(cv::Mat(approx))) {

				DkPolyRect cr(approx);
				//std::cout << mMinArea*scale*scale << " < " << fabs(cArea) << " < " << mMaxArea*scale*scale << std::endl;

				// if cosines of all angles are small
				// (all angles are ~90 degree)
				if(/*cr.maxSide() < std::max(tImg.rows, tImg.cols)*maxSideFactor && */
					(!maxSide || cr.maxSide() < maxSide*scale) &&
					cr.getMaxCosine() < 0.3 ) {

					cr.setChannel(channel);
					cr.setThreshold(thr);
					rectsL.push_back(cr);
				}
			}
		}
		// DEBUG ------------------------
		//cv::cvtColor(pImg, pImg, CV_RGB2BGR);
		//cv::imwrite("C:/VSProjects/DocScan/img/tests/poly" + Utils::num2str(thr) + ".png", pImg);
		// DEBUG ------------------------
	}

	for (size_t idx = 0; idx < rectsL.size(); idx++)
		rectsL[idx].scale(1.0f/scale);


	// filter rectangles which are found because of the image border
	for (const DkPolyRect& p : rectsL) {

		DkBox b = p.getBBox();

		if (b.size().height < img.rows*maxSideFactor &&
			b.size().width < img.cols*maxSideFactor) {
			rects.push_back(p);
		}
	}

	//cv::normalize(dbgImg, dbgImg, 255, 0, cv::NORM_MINMAX);

}
Exemple #24
0
bool ShapeFilter::findCirc(cv::Mat img){
    //getting the contours
    cv::Mat canny;
    std::vector<std::vector<cv::Point> > contours;
    std::vector<cv::Vec4i> hierarchy;

    float radius;
    cv::Point2f center;
    this->radius.clear();
    this->center.clear();

    int thresh = 100;
    // Detect edges using canny
    Canny(img, canny, thresh, thresh*2, 3 );
    // Find contours
    findContours( canny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );

    int circlesFound = 0;
    //constants
    unsigned int minPoints = 6;
    double minArea = 10;
    float minRad = 100;

    for (std::vector<cv::Point> co: contours){
        if (co.size() < minPoints){
            println("Circle Not enough Points");
            continue;
        }

        double area = cv::contourArea(co);
        if (area < minArea) {
            println ("Circle not enough area " + std::to_string(area));
            continue;
        }

        /*
        /// Get the moments
        std::vector<cv::Moments> mu(co.size() );
        for( int i = 0; i < co.size(); i++ ){
            mu[i] = moments( co[i], false );
        }

        ///  Get the mass centers:
        std::vector<cv::Point2f> mc( contours.size() );
        for( int i = 0; i < contours.size(); i++ )
        { mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
*/

        cv::Moments cvmoments = moments(co, false);
        double nu11 = cvmoments.nu11;
        double nu20 = cvmoments.nu02;
        double nu02 = cvmoments.nu20;
        double nu21 = cvmoments.nu21;
        double nu12 = cvmoments.nu12;
        double nu03 = cvmoments.nu03;
        double nu30 = cvmoments.nu30;

        double r03 = fabs(nu30 / nu03);
        r03 = (r03 > 1) ? r03 : 1.0/r03;
        double r12 = fabs(nu12 / nu21);
        r12 = (r12 > 1) ? r12 : 1.0/r12;
        double r02 = fabs(nu02 / nu20);
        r02 = (r02 > 1) ? r02 : 1.0/r02;

        double r11 = fabs( MEAN2(nu02,nu20) / nu11);
        double R = MEAN2(nu20,nu02) / std::max((MEAN2(nu21,nu12)), (MEAN2(nu30,nu03)));
        bool pass = true;
        pass = (r03 <= 25.0) && (r12 <= 12.0) && (r02 <= 12.0) && (r11 > 2.5) && (R > 25);

        if (!pass){
            println("Circle failed math test");
            continue;
        }

        // get min enclosing circle and radius
        //CvPoint2D32f centroid32f;

        //cv::minEnclosingCircle(co, &centroid32f, &radius);
        cv::minEnclosingCircle(co, center, radius);

        if (radius > minRad || radius < 0) {
            println("Circle radius too small");
            continue;
        }

        // do checks on area and perimeter
        double area_ratio = area / (CV_PI*radius*radius);
        //double perimeter_ratio = perimeter / (2*CV_PI*radius);
        if (area_ratio < 0.7) {
            println("Circle fail Area");
            continue;
        }

        bool repeat = false;
        //check if circle is found already
        for (unsigned int i = 0; i < this->center.size(); i++){
            cv::Point2f c = this->center[i];
            if (std::abs(c.x-center.x) < 20 && std::abs(c.y - center.y) < 20){
                repeat = true;
                break;
            }
        }

        if (!repeat){
            //check if i found the number of circles requested
            if (this->center.size() < max){
                println("Found circle");
                this->radius.push_back(radius);
                this->center.push_back(center);
                circlesFound++;
            }else{
                println("Already found enough circles");
            }
        }else{
            println("Already found this circle");
        }
    }
    return circlesFound != 0;
}
void MainWindow::capturePhoto() {

    if (photosCaptured < QString(ui->numPhotoLineEdit->text()).toInt()) {

        try {

            CGrabResultPtr ptrGrabResult;
            CInstantCamera Camera(CTlFactory::GetInstance().CreateDevice(camera));

            if (Camera.GrabOne(QString(ui->delayLineEdit->text()).toInt(), ptrGrabResult)) {

                time_t t = time(0);
                struct tm now = *localtime(&t);
                strftime(timebuf, sizeof(timebuf), "%Y-%m-%d %X", &now);
                strftime(filebuf, sizeof(filebuf), "PlanktonTracker_Files/%Y-%m-%d_%X.png", &now);

                CImagePersistence::Save(ImageFileFormat_Png, filebuf, ptrGrabResult);

                QImage img = QImage(filebuf);
                QImage scaledImage = img.scaled(ui->imageWidget->size(), Qt::KeepAspectRatio, Qt::SmoothTransformation);
                ui->imageWidget->setPixmap(QPixmap::fromImage(scaledImage));

                // Find contours
                cv::Mat im = cv::imread(filebuf, cv::IMREAD_COLOR);
                cv::Mat gray;
                cvtColor(im, gray, CV_BGR2GRAY);
                Canny(gray, gray, cv_minContourTresh, cv_minContourTresh * 2, 3);

                vector<vector<Point> > contours;
                vector<Vec4i> hierarchy;
                findContours(gray, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

                // Draw contours
                // RNG rng(12345);
                // Mat drawing = Mat::zeros(gray.size(), CV_8UC3);

                contoursCount = 0;
                for (int i = 0; i < contours.size(); i++) {
                    // Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
                    // drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
                    double area = contourArea(contours[i], false);
                    if (area >= cv_minContourArea) {
                        ++contoursCount;
                        //cout << i << " area:  " << area << "\t|\t count: " << contoursCount <<endl;
                    }
                }

                csvFile.open("PlanktonTracker_Files/output.csv", std::ios_base::app | std::ios_base::out);
                if (csvFile.is_open()) {
                    csvFile << timebuf << "," << filebuf << "," << contoursCount << "\n";
                    csvFile.close();
                }

                // imshow( "Result window", drawing );
                // waitKey(0);

                ++photosCaptured;

            }

        } catch (GenICam::GenericException &e) {
            QString status = QString("Could not grab an image: %1").arg((QString)e.GetDescription());
            ui->statusBar->showMessage(status);
        }


    } else {

        stopCapture();

    }

}
Exemple #26
0
void KeyPointsDeliverer::extractCupidsBowKeyPoints(int thresholdDifferenceToAvg, int totalLineCheck)
{
    QList<PossibleKeyPoint> possibleKeyPoints;
    PossibleKeyPoint possibleKeyPoint;

    for (int i = 0; i < rTopFinal.cols; ++i) {
        for (int j = rTopFinal.rows/2; j > totalLineCheck/2; --j) {

            int currentDiffToAvg = 0;

            for (int k = 1; k < totalLineCheck/2 + 1; ++k) {
                currentDiffToAvg += rTopFinal.at<uchar>(j-k,i) + rTopFinal.at<uchar>(j+k,i);

            }
            currentDiffToAvg = currentDiffToAvg / totalLineCheck;

            if(currentDiffToAvg > 0){
                currentDiffToAvg = 100 - (rTopFinal.at<uchar>(j,i) * 100 / currentDiffToAvg);
            }

            if(currentDiffToAvg > thresholdDifferenceToAvg){
                possibleKeyPoint.differenceToAvg = currentDiffToAvg;
                possibleKeyPoint.keyPoint.x  = i;
                possibleKeyPoint.keyPoint.y  = j;
                possibleKeyPoints.append(possibleKeyPoint);
            }
        }
    }

    Mat contourImg(rTopFinal.rows, rTopFinal.cols, CV_8UC1, Scalar(0,0,0));
    Point p;
    for (int i = 0; i < possibleKeyPoints.size(); ++i) {

      if(i % 3 == 0)
        circle(rTopFinal, p, 1, Scalar(255,255,255));

        p = possibleKeyPoints.at(i).keyPoint;
        contourImg.at<uchar>(p.y, p.x) = 255;
    }
    Mat _img;
    double otsu_thresh_val = cv::threshold(
                contourImg, _img, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU
                );

    Canny(contourImg, contourImg, otsu_thresh_val*0.5, otsu_thresh_val);

    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    findContours( contourImg, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
    for( uint i = 0; i< contours.size(); i++ ){
        drawContours( contourImg, contours, i, Scalar(255,255,255), 1, 8, hierarchy, 1, Point() );
    }


    keyPoint2.y = 1000;
    for (int i = 0; i < rTopFinal.rows; ++i) {
        for (int j = rTopFinal.cols/2; j > 0; --j) {
            if(contourImg.at<uchar>(i,j) == 255){
                if(keyPoint2.y >= i){
                    keyPoint2.y = i;
                    keyPoint2.x = j;
                }
            }
        }
    }


    keyPoint4.y = 1000;
    for (int i = 0; i < rTopFinal.rows; ++i) {
        for (int j = rTopFinal.cols/2; j < rTopFinal.cols; ++j) {
            if(contourImg.at<uchar>(i,j) == 255){
                if(keyPoint4.y >= i){
                    keyPoint4.y = i;
                    keyPoint4.x = j;
                }
            }
        }
    }


    keyPoint3.y = 0;
    int kp2kp3Width = keyPoint4.x  - keyPoint2.x;
    kp2kp3Width = kp2kp3Width/2;

    for (int i = keyPoint2.x; i < keyPoint4.x; ++i) {
        for (int j = 0; j < keyPoint1.y-14; ++j) {
            if(contourImg.at<uchar>(j,i) == 255){
                if(keyPoint3.y <= j &&  i <= (keyPoint2.x + kp2kp3Width + (kp2kp3Width*0.2)) ){
                    keyPoint3.y = j;
                    keyPoint3.x = i;
                }
            }
        }
    }
}
Exemple #27
0
  void PlateLines::processImage(Mat inputImage, vector<TextLine> textLines, float sensitivity)
  {
    if (this->debug)
      cout << "PlateLines findLines" << endl;

    timespec startTime;
    getTimeMonotonic(&startTime);


    // Ignore input images that are pure white or pure black
    Scalar avgPixelIntensity = mean(inputImage);
    if (avgPixelIntensity[0] >= 252)
      return;
    else if (avgPixelIntensity[0] <= 3)
      return;

    // Do a bilateral filter to clean the noise but keep edges sharp
    Mat smoothed(inputImage.size(), inputImage.type());
    adaptiveBilateralFilter(inputImage, smoothed, Size(3,3), 45, 45);


    int morph_elem  = 2;
    int morph_size = 2;
    Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );


    Mat edges(inputImage.size(), inputImage.type());
    Canny(smoothed, edges, 66, 133);

    // Create a mask that is dilated based on the detected characters


    Mat mask = Mat::zeros(inputImage.size(), CV_8U);

    for (unsigned int i = 0; i < textLines.size(); i++)
    {
      vector<vector<Point> > polygons;
      polygons.push_back(textLines[i].textArea);
      fillPoly(mask, polygons, Scalar(255,255,255));
    }



    dilate(mask, mask, getStructuringElement( 1, Size( 1 + 1, 2*1+1 ), Point( 1, 1 ) ));
    bitwise_not(mask, mask);

    // AND canny edges with the character mask
    bitwise_and(edges, mask, edges);


    vector<PlateLine> hlines = this->getLines(edges, sensitivity, false);
    vector<PlateLine> vlines = this->getLines(edges, sensitivity, true);
    for (unsigned int i = 0; i < hlines.size(); i++)
      this->horizontalLines.push_back(hlines[i]);
    for (unsigned int i = 0; i < vlines.size(); i++)
      this->verticalLines.push_back(vlines[i]);

    // if debug is enabled, draw the image
    if (this->debug)
    {
      Mat debugImgHoriz(edges.size(), edges.type());
      Mat debugImgVert(edges.size(), edges.type());
      edges.copyTo(debugImgHoriz);
      edges.copyTo(debugImgVert);
      cvtColor(debugImgHoriz,debugImgHoriz,CV_GRAY2BGR);
      cvtColor(debugImgVert,debugImgVert,CV_GRAY2BGR);

      for( size_t i = 0; i < this->horizontalLines.size(); i++ )
      {
        line( debugImgHoriz, this->horizontalLines[i].line.p1, this->horizontalLines[i].line.p2, Scalar(0,0,255), 1, CV_AA);
      }

      for( size_t i = 0; i < this->verticalLines.size(); i++ )
      {
        line( debugImgVert, this->verticalLines[i].line.p1, this->verticalLines[i].line.p2, Scalar(0,0,255), 1, CV_AA);
      }

      vector<Mat> images;
      images.push_back(debugImgHoriz);
      images.push_back(debugImgVert);

      Mat dashboard = drawImageDashboard(images, debugImgVert.type(), 1);
      displayImage(pipelineData->config, "Hough Lines", dashboard);
    }

    if (pipelineData->config->debugTiming)
    {
      timespec endTime;
      getTimeMonotonic(&endTime);
      cout << "Plate Lines Time: " << diffclock(startTime, endTime) << "ms." << endl;
    }

  }
Exemple #28
0
void KeyPointsDeliverer::extractLowerLipKeyPoint(int thresholdDifferenceToAvg, int totalLineCheck)
{
    QList<PossibleKeyPoint> possibleKeyPoints;
    PossibleKeyPoint possibleKeyPoint;

    QList<QList<int> > val;
    int aPixelsumme;
    int dPixeldurchschnitt;
    int diffPixeldurchschnitt;
    int rlow;

    for (int i = 0; i < rLowFinal.cols; ++i) {
        for (int j = rLowFinal.rows/2; j < rLowFinal.rows-totalLineCheck/2; ++j) {

            QList<int> brabs;
            int currentDiffToAvg = 0;

            for (int k = 1; k < totalLineCheck/2 + 1; ++k) {
                currentDiffToAvg += rLowFinal.at<uchar>(j-k,i) + rLowFinal.at<uchar>(j+k,i);
            }
            aPixelsumme =  currentDiffToAvg;
            currentDiffToAvg = currentDiffToAvg / totalLineCheck;

            dPixeldurchschnitt  = currentDiffToAvg;

            if(currentDiffToAvg > 0){
                currentDiffToAvg = 100 - (rLowFinal.at<uchar>(j,i) * 100 / currentDiffToAvg);
            }
            diffPixeldurchschnitt = currentDiffToAvg;
            rlow = rLowFinal.at<uchar>(j,i);

            if(currentDiffToAvg > thresholdDifferenceToAvg){
              brabs << aPixelsumme << dPixeldurchschnitt << diffPixeldurchschnitt << rlow;
              val << brabs;

                possibleKeyPoint.differenceToAvg = currentDiffToAvg;
                possibleKeyPoint.keyPoint.x  = i;
                possibleKeyPoint.keyPoint.y  = j;
                possibleKeyPoints.append(possibleKeyPoint);
            }
        }
    }

//    foreach (QList<int> v, val) {
//      ROS_INFO("#################1##################");
//      ROS_INFO("aPixelsumme %d", v.at(0));
//      ROS_INFO("dPixeldurchschnitt %d", v.at(1));
//      ROS_INFO("diffPixeldurchschnitt %d Rlow %d", v.at(2), v.at(3));


//      ROS_INFO("#################2##################");
//    }

    Mat contourImg(rLowFinal.rows, rLowFinal.cols, CV_8UC1, Scalar(0,0,0));
    Point p;
    for (int i = 0; i < possibleKeyPoints.size(); ++i) {

      if(i % 3 == 0)
        circle(rLowFinal, p, 1, Scalar(255,255,255));

        p = possibleKeyPoints.at(i).keyPoint;
        contourImg.at<uchar>(p.y, p.x) = 255;
    }
    Mat _img;
    double otsu_thresh_val = cv::threshold(
                contourImg, _img, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU
                );
    Canny(contourImg, contourImg, otsu_thresh_val*0.5, otsu_thresh_val);

    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    findContours( contourImg, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
    for( uint i = 0; i< contours.size(); i++ ){
        drawContours( contourImg, contours, i, Scalar(255,255,255), 1, 8, hierarchy, 1, Point() );
    }


    Point kpTmp;
    kpTmp.y = 0;
    int kp2kp3Width = keyPoint4.x - keyPoint2.x;
    kp2kp3Width = kp2kp3Width/2;

    for (int i = keyPoint2.x; i < keyPoint4.x; ++i) {
        for (int j = rLowFinal.rows-(rLowFinal.rows*0.2); j > keyPoint1.y; --j) {
            if(contourImg.at<uchar>(j, i) == 255){
                if(kpTmp.y <= j && i <= (keyPoint2.x + kp2kp3Width)){
                    kpTmp.y = j;
                    keyPoint6.y = j;
                    keyPoint6.x = i;
                    break;
                }
            }
        }
    }
}
/*
 * General image processing. Returns the original image with objects drawn on top of it.
 */
Mat RoadDetection::processImage()
{
	Mat rawFrame, tmpFrame, grayFrame, blurredFrame, contoursFrame, houghFrame, sectionFrame;
	Point vanishingPoint;

	vector<Line> houghLines, houghMainLines;
	vector<Point> roadShape;

	int sectionOffset;

	// save a copy of the original frame
	original.copyTo(rawFrame);

	// smooth and remove noise
	GaussianBlur(rawFrame, blurredFrame, Size(BLUR_KERNEL, BLUR_KERNEL), 0, 0);

	// edge detection (canny, inverted)
	Canny(blurredFrame, contoursFrame, CANNY_MIN_THRESH, CANNY_MAX_THRESH);
	threshold(contoursFrame, contoursFrame, 128, 255, THRESH_BINARY);

	// hough transform lines
	houghLines = getHoughLines(contoursFrame, true);

	// vanishing point
	vanishingPoint = getVanishingPoint(houghLines, rawFrame.size());
	sectionOffset = vanishingPoint.y;

	// section frame (below vanishing point)
	contoursFrame.copyTo(tmpFrame);
	sectionFrame = tmpFrame(CvRect(0, vanishingPoint.y, contoursFrame.cols, contoursFrame.rows - vanishingPoint.y));

	// re-apply hough transform to section frame
	houghLines = getHoughLines(sectionFrame, true);

	// shift lines downwards
	houghLines = shiftLines(houghLines, sectionOffset);

	houghLines = getFilteredLines(houghLines);

	// best line matches
	houghMainLines = getMainLines(houghLines);

	if (houghMainLines.size() >= 2)
	{
		Point intersection = getLineIntersection(houghMainLines[0], houghMainLines[1]);

		if (intersection.x > 0 && intersection.y >= 0)
		{
			vanishingPoint = intersection;
			sectionOffset = intersection.y;
		}

		// get road shape
		roadShape = getRoadShape(rawFrame, houghMainLines[0], houghMainLines[1], vanishingPoint);
	}

	// limit lines
	houghLines = getLimitedLines(houghLines, sectionOffset);
	houghMainLines = getLimitedLines(houghMainLines, sectionOffset);

	// drawing process
	drawLines(rawFrame, houghLines, Scalar(0, 0, 255), 2, 0);
	drawLines(rawFrame, houghMainLines, Scalar(20, 125, 255), 2, 0);
	drawRoadShape(rawFrame, roadShape, Scalar(20, 125, 255), 0.4);
	drawCircle(rawFrame, vanishingPoint, Scalar(20, 125, 255), 15, -1, 0);

	return rawFrame;
}
void ImageProcessor::process(MultispectralImage frame)
{
	MultispectralImage frame8Bit;
	QList<imgDesc> results;

	quint8 i;

	Mat filterMask;
	Mat maskedFCImage;
	double maxVal = 0;
	double maxTemp = 0.0;

	Mat temp;
	double spread;

	Mat motionMask;

	errorOccurred = false;

	lockConfig.lockForRead();

	//main processing tasks
	//*********************

	//subtract dark image, if enabled
	if(myConfig.calibration.subtractDark && !frame.getDarkSubtracted())
	{
		for(i = 1; i < frame.getChannelCount(); i++)
		{
			//subtract dark image from current image
			Mat tmp;
			cv::subtract(frame.getImageByChannelNumber(i),
						 frame.getDarkImage(),
						 tmp);

			//set result as new channel image
			frame.setChannelImage(frame.getWavebands().at(i), tmp);
		}

		frame.setDarkSubtracted(true);
	}

	//perform skin detection by using quotient filters, if enabled
	if(myConfig.detectSkinByQuotient && (myConfig.quotientFilters.size() > 0))
	{
		//clear result list
		skinDetectionResults.clear();

		//signal processing of all filters
		emit doSkinDetection(frame);
	}

	//if image depth is more than 8bit, image has to be resampled to be displayed
	if(frame.getDepth() > 8)
	{
		//if automatic contrast is enabled, find the brightest spot in all channels
		if(myConfig.contrastAutomatic)
		{
			//iterate through all bands (except dark) to find maximum value
			for(i = 1; i < frame.getChannelCount(); i++)
			{
				minMaxLoc(frame.getImageByChannelNumber(i), NULL, &maxTemp);
				if ( maxTemp > maxVal )
				{
					maxVal = maxTemp;
				}
			}

			//subtract contrast dark offset from maximum
			maxVal -= myConfig.contrastOffset;

			//slowly increase or decrease contrast value
			if((maxVal / myConfig.contrastValue) < 220)
			{
				myConfig.contrastValue -= (myConfig.contrastValue - (maxVal / 255)) / 10;
			}
			else if((maxVal / myConfig.contrastValue) > 250)
			{
				myConfig.contrastValue += ((maxVal / 255) - myConfig.contrastValue) / 10;
			}
		}

		//calculate spread factor
		spread = 1.0 / (double)myConfig.contrastValue;

		//configure GUI image object
		frame8Bit.setSize(frame.getWidth(), frame.getHeight());
		frame8Bit.setDepth(8);

		//scale down every band
		for (i = 0; i < frame.getChannelCount(); i++)
		{
			//subtract contrast offset, if enabled
			Mat tempOffset;
			if(myConfig.contrastOffset > 0)
			{
				subtract(frame.getImageByChannelNumber(i),
						 Scalar(myConfig.contrastOffset),
						 tempOffset);
			}
			else
			{
				tempOffset = frame.getImageByChannelNumber(i);
			}

			//convert to 8 bit using spread factor
			tempOffset.convertTo(temp, 8, spread );
			frame8Bit.setChannelImage(frame.getWavebands().at(i), temp.clone());
		}
	}
	else
	{
		frame8Bit = frame;
	}

	//detect edges
	if(myConfig.edgeDetection)
	{
		QMapIterator<qint16, Mat> it(frame8Bit.getImages());
		while(it.hasNext())
		{
			it.next();

			Mat edges = doEdgeDetection(it.value(), myConfig.edgeThreshold);

			struct imgDesc edgeResult;
			edgeResult.desc = QString("Edges %1nm").arg(it.key());
			edgeResult.img = edges;
			results.append(edgeResult);
		}
	}

	//Estimate distance (in separate thread)
	if (myConfig.estimateDistance)
	{
		//make edge mask on selected image
		Mat edges;
		if(autoSelectCannyImage) //automatically select sharpest band image for edge detection
		{
			Canny(frame8Bit.getImageByChannelNumber(lastSharpestBand), edges, cannyLowThresh, cannyHighThresh);
		}
		else //use band image selected by the user (in GUI)
		{
			Canny(frame8Bit.getImageByChannelNumber(cannyImage), edges, cannyLowThresh, cannyHighThresh);
		}

		//emit signals to distance estimation thread
		distEstimationResults.clear();
		emit setDistEstimParams((int)myConfig.sharpMetric, edges, myConfig.sharpnessNbrhdSize, medianKernel);
		emit doDistanceEstimation(frame8Bit);

		//wait for thread to finish
		while (!errorOccurred && distEstimationResults.size() < 1) //frame8Bit.getChannelCount()-1)
		{
			QCoreApplication::processEvents();
		}
		if(errorOccurred)
		{
			emit errorProcessing(ImageSourceException("Error in task: estimateDistanceByChromAberr."));
			return;
		}

		//append distance estimation result to results in order to display them
		if(!distEstimationResults.empty())
		{
			//get 8 bit image from 1st list entry (at position 0)
			results.append(distEstimationResults.at(0));
		}
	}

	//wait for threads to finish:
	//***************************

	//wait until all threads are finished, get results and delete them

	if(myConfig.detectSkinByQuotient && (myConfig.quotientFilters.size() > 0))
	{
		maskedFCImage = Mat::zeros(frame8Bit.getDarkImage().rows,
								   frame8Bit.getDarkImage().cols, CV_8UC3);

		//wait until all threads are finished and get results
		while(!errorOccurred &&
			  (myConfig.quotientFilters.size() > skinDetectionResults.size()))
		{
			QCoreApplication::processEvents(QEventLoop::AllEvents);
		}
		if(errorOccurred)
		{
			emit errorProcessing(ImageSourceException("Error in task: detectSkinByQuotients."));
			return;
		}
		//multiply (cut) the filter masks
		filterMask = skinDetectionResults.at(0);
		for(i = 1; i < skinDetectionResults.size(); i++ )
		{
			multiply(filterMask, skinDetectionResults.at(i),
					 filterMask, 1.0);
		}

		//remove positive pixels with motion artifacts
		if(myConfig.suppressMotion && (lastFrame.getChannelCount() == frame.getChannelCount()))
		{
			motionMask = Mat::ones(maskedFCImage.rows, maskedFCImage.cols, CV_8UC1);

			for(i= 0; i < frame.getChannelCount(); i++)
			{
				Mat diffF, threshF, thresh;
				Mat curF, prevF;

				//get frame channels and convert to float
				frame.getImageByChannelNumber(i).convertTo(curF, CV_32F);
				lastFrame.getImageByChannelNumber(i).convertTo(prevF, CV_32F);

				//calculate absolute difference between current and previous frame
				absdiff(curF, prevF, diffF);

				//threshold the absolute difference
				threshold(diffF, threshF, myConfig.motionThreshold, 1.0, THRESH_BINARY_INV);

				//convert to 8 bit unsigned
				threshF.convertTo(thresh, CV_8U);

				//update motion mask with new thresholded difference mask
				multiply(motionMask, thresh, motionMask);
			}

			//now multiply motion mask with filter mask to remove positive filter results
			//where there was motion detected
			multiply(motionMask, filterMask, filterMask);

			//add motion mask to results
			struct imgDesc motionResult;
			motionResult.desc = "Motion";
			threshold(motionMask, motionResult.img, 0, 255, THRESH_BINARY_INV) ;
			results.append(motionResult);
		}

		//Morph result:
		if(myConfig.morphResult)
		{
			Mat element(4,4,CV_8U,Scalar(1));
			morphologyEx(filterMask, filterMask, MORPH_OPEN, element);
		}

		//set mask on top of (8bit) false colour image
		bitwise_or(maskedFCImage,
				   frame8Bit.getFalseColorImage(myConfig.falseColorChannels),
				   maskedFCImage, filterMask);

		if(myConfig.showMaskContours)
		{
			vector<vector<Point> > contours;
			CvScalar green = CV_RGB(0,255,0);
			//CvScalar blue = CV_RGB(0,0,255);

			findContours(filterMask, contours,
						 CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

			drawContours(maskedFCImage, contours, -1, green, 2, 8);
		}

		struct imgDesc skinMask;
		struct imgDesc skinResult;

		skinMask.desc = "QF Mask";
		threshold(filterMask, skinMask.img, 0, 255, THRESH_BINARY) ;
		results.append(skinMask);

		skinResult.desc = "Masked FC Image";
		skinResult.img = maskedFCImage;
		results.append(skinResult);
	}

	lockConfig.unlock();

	emit finishedProcessing(frame, frame8Bit, results);

	lastFrame = frame;
}