Exemple #1
0
int doHoughLine(int argc, char** argv)
{
	char *iamgePath = argv[1];
	IplImage* src = cvLoadImage(iamgePath, 0 );
	IplImage* dst;
	IplImage* color_dst;
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* lines = 0;
	int i;

	if( !src )
		return -1;

	dst = cvCreateImage( cvGetSize(src), 8, 1 );
	color_dst = cvCreateImage( cvGetSize(src), 8, 3 );

	cvCanny( src, dst, 50, 200, 3 );
	cvCvtColor( dst, color_dst, CV_GRAY2BGR );
#if 0
lines = cvHoughLines2( dst, storage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );

for( i = 0; i < MIN(lines->total,100); i++ )
{
	float* line = (float*)cvGetSeqElem(lines,i);
	float rho = line[0];
	float theta = line[1];
	CvPoint pt1, pt2;
	double a = cos(theta), b = sin(theta);
	double x0 = a*rho, y0 = b*rho;
	pt1.x = cvRound(x0 + 1000*(-b));
	pt1.y = cvRound(y0 + 1000*(a));
	pt2.x = cvRound(x0 - 1000*(-b));
	pt2.y = cvRound(y0 - 1000*(a));
	cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 3, CV_AA, 0 );
}
#else
	lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 50, 50, 10 );
	for( i = 0; i < lines->total; i++ )
	{
		CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
		cvLine( color_dst, line[0], line[1], CV_RGB(255,0,0), 3, CV_AA, 0 );
	}
#endif
	cvNamedWindow( "Source", 1 );
	cvShowImage( "Source", src );

	cvNamedWindow( "Hough", 1 );
	cvShowImage( "Hough", color_dst );

	cvWaitKey(0);

	return 0;
}
Exemple #2
0
void demo(Image &img) {
	IplImage *src = ImageToCv(img);
    
    IplImage* dst = cvCreateImage(cvGetSize(src), 8, 1);
    IplImage* color_dst = cvCreateImage(cvGetSize(src), 8, 3);
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* lines = 0;
    int i;
    cvCanny(src, dst, 50, 200, 3);
    cvCvtColor(dst, color_dst, CV_GRAY2BGR);

    lines = cvHoughLines2(dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 80, 30, 10);
    for(i = 0; i < lines->total; i++) {
        CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
        cvLine( color_dst, line[0], line[1], CV_RGB(255,0,0), 3, 8);
    }

    cvNamedWindow("Source", 1);
    cvShowImage("Source", src);

    cvNamedWindow("Hough", 1);
    cvShowImage("Hough", color_dst); 

    cvWaitKey(0);
    
    cvReleaseImage(&src);
    cvReleaseImage(&dst);
    cvReleaseImage(&color_dst);
    
    cvDestroyWindow("Source");
	cvDestroyWindow("Hough");
}
Exemple #3
0
int  cvHoughLinesSDiv( CvArr* image, double rho, int srn,
                       double theta, int stn, int threshold,
                       float* lines, int linesNumber )
{
    CvMat linesMat = cvMat( 1, linesNumber, CV_32FC2, lines );
    cvHoughLines2( image, &linesMat, CV_HOUGH_MULTI_SCALE,
                   rho, theta, threshold, srn, stn );

    return linesMat.cols;
}
Exemple #4
0
int  cvHoughLines( CvArr* image, double rho,
                   double theta, int threshold,
                   float* lines, int linesNumber )
{
    CvMat linesMat = cvMat( 1, linesNumber, CV_32FC2, lines );
    cvHoughLines2( image, &linesMat, CV_HOUGH_STANDARD,
                   rho, theta, threshold, 0, 0 );

    return linesMat.cols;
}
Exemple #5
0
int  cvHoughLinesP( CvArr* image, double rho,
                    double theta, int threshold,
                    int lineLength, int lineGap,
                    int* lines, int linesNumber )
{
    CvMat linesMat = cvMat( 1, linesNumber, CV_32SC4, lines );
    cvHoughLines2( image, &linesMat, CV_HOUGH_PROBABILISTIC,
                   rho, theta, threshold, lineLength, lineGap );

    return linesMat.cols;
}
Exemple #6
0
void houghDetection() {
	IplImage *temporaryImage = cvCreateImage(cvGetSize(objectImage), IPL_DEPTH_32F, 1);
	IplImage *houghImage = cvLoadImage("object.jpg", CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
	IplImage *cannyImage = _canny(objectImage);
	CvMemStorage *storage = cvCreateMemStorage(0);
	CvSeq *lines = NULL;
	
	lines = cvHoughLines2(cannyImage, storage, CV_HOUGH_PROBABILISTIC, 1, (CV_PI/180), 50, 50, 10);
	
	for ( int i = 0; i < lines->total; i++ ) {
		CvPoint *line = (CvPoint *)cvGetSeqElem(lines, i);
		cvLine(houghImage, line[0], line[1], CV_RGB(255,0,0), 1, 1, 0);
	}
	
	cvShowImage(windowHoughName, houghImage);
	
	cvReleaseImage(&houghImage);
}
Exemple #7
0
void calcNecessaryImageRotation(IplImage *src) {
#define MAX_LINES 100
  CvMemStorage* storage = cvCreateMemStorage(0);
  CvSize img_sz = cvGetSize( src );

	IplImage* color_dst = cvCreateImage( img_sz, 8, 3 );
	IplImage* dst = cvCreateImage( img_sz, 8, 1 );
  CvSeq* lines = 0;
  int i;
  
  cvCanny( src, dst, 50, 200, 3 );
  cvCvtColor( dst, color_dst, CV_GRAY2BGR );
	
  cvSaveImage("canny.png", dst);
  
  lines = cvHoughLines2( dst,
                         storage,
                         CV_HOUGH_PROBABILISTIC,
                         1,
                         CV_PI/180,
                         80,
                         30,
                         10 );
  for( i = 0; i < lines->total; i++ )
  {
      CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
      cvLine( color_dst, line[0], line[1], CV_RGB(255,0,0), 1, 8 );
      printf("%f\n", atan((double)(line[1].y-line[0].y) / (double)(line[1].x-line[0].x)));
  }

  // TODO(kroo): build up a smoothed histogram (cvHistogram)
  // TODO(kroo): find two peaks, assert that they are separated by roughly 90˚
  // TODO(kroo): find smallest rotation necessary to cause the lines to point straight up/down/left/right
  

  cvSaveImage("hough.png", color_dst);

  cvNamedWindow( "Hough Transform", 1 );
  cvShowImage( "Hough Transform", color_dst );
  
  cvWaitKey(0);

}
void rotate(IplImage *img)
{
	CvRect rect;
	IplImage *imgLine;
	rect.x=GlobalGandhiji.x+GlobalGandhiji.width;
	rect.y=GlobalGandhiji.y-5;
	rect.width=(int)((GlobalGandhiji.width)-5);
	rect.height=GlobalGandhiji.height+15;
	if(GlobalGandhiji.matchval!=-1 && rect.x>0 && rect.y>0 &&  rect.y+rect.height<= img->height && rect.x+rect.width<= img->width)
	{	
	
		imgLine=cropRectangle(img,rect);
		cvNamedWindow("imgLine",1);
		cvShowImage("imgLine",imgLine);
		IplImage* src1 = cvCreateImage(
		cvGetSize(imgLine), 8, 1 );

		cvCvtColor( imgLine, src1, CV_RGB2GRAY );

		IplImage* dst = cvCreateImage(
		cvGetSize(src1), 8, 1 );
		IplImage* color_dst = cvCreateImage(
		cvGetSize(src1), 8, 3 );
		CvMemStorage* storage =
		cvCreateMemStorage(0);
		CvSeq* lines = 0;
		int i;
		cvCanny( src1, dst,50, 150, 3 );
		//cvDilate( dst, dst, 0, 1 );
		cvNamedWindow("edgedest",1);
		cvShowImage("edgedest",dst);
		cvCvtColor( dst, color_dst, CV_GRAY2BGR );
		#if 1
		lines = cvHoughLines2( dst, storage,
		CV_HOUGH_STANDARD, 1, CV_PI/180, 30, 0, 0 );
		for( i = 0; i < MIN(lines->total,100); i++ )
		{
		float* line =
		(float*)cvGetSeqElem(lines,i);
		float rho = line[0];
		float theta = line[1];

		printf("theta = %f",(theta*180/3.142));
		CvPoint pt1, pt2;
		double a = cos(theta), b = sin(theta);
		double x0 = a*rho, y0 = b*rho;
		printf("a= %f  b=%f  x0=%f  y0=%f roh=%f\n", a,b,x0,y0,rho);
		pt1.x = cvRound(x0 + 1000*(-b));
		pt1.y = cvRound(y0 + 1000*(a));
		pt2.x = cvRound(x0 - 1000*(-b));
		pt2.y = cvRound(y0 - 1000*(a));
		printf("    x1 = %d, y1 = %d",pt1.x,pt1.y);
		printf("    x2 = %d, y2 = %d\n\n",pt2.x,pt2.y);

		//if((theta*180/3.142) < 100 && (theta*180/3.142) > 79 )
		cvLine( color_dst, pt1, pt2,
		CV_RGB(255,0,0), 3, 8 );
		}
		#else
		lines = cvHoughLines2( dst, storage,
		CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 30, 0, 0 );
		for( i = 0; i < lines->total; i++ )
		{
		CvPoint* line =
		(CvPoint*)cvGetSeqElem(lines,i);
		cvLine( color_dst, line[0], line[1],
		CV_RGB(255,0,0), 3, 8 );
		}
		#endif
		
		cvNamedWindow( "Hough", 1 );
		cvShowImage( "Hough", color_dst );



	}
	


	/*
	




*/




}
int _tmain(int argc, _TCHAR* argv[])
{
	//CvCapture *capture = cvCaptureFromCAM(0);
	
	int t = 10, n=0;
	double time = 0;
	while(1)
	{
		clock_t begin=clock();
		//------------------------ Original Camera Feed -----------------------------------------//
		//IplImage *img = cvQueryFrame(capture);
		IplImage *img = cvLoadImage("../../Data/13 Oct/img13.jpg");
		//cvNamedWindow("Camera Feed", 0);
		//cvShowImage("Camera Feed", img);
				
		//------------------------ Finding Gray Scale Image -------------------------------------//
		IplImage *gray = cvCreateImage(cvGetSize(img), 8, 1);
		cvCvtColor(img, gray, CV_BGR2GRAY);
		//cvReleaseImage(&img);

		//------------------------ Removing Salt and Pepper Noise -------------------------------//
		//cvSmooth(gray, gray, CV_MEDIAN, 3, 3, 0.0, 0.0);		// For really poor images
		cvMorphologyEx(gray, gray, NULL, NULL, CV_MOP_OPEN, 1);	// Removes Salt Noise
		for(int y = 0; y < gray->height; y++){
			uchar* ptr = (uchar*) (gray->imageData + y * gray->widthStep);								//implement inversion and pepper removal in single pass
			for(int x = 0; x < gray->width; x++)
				ptr[x] = 255-ptr[x];							// Pepper becomes Salt
		}
		cvMorphologyEx(gray, gray, NULL, NULL, CV_MOP_OPEN, 1);	// Removes Pepper Noise
		
		//------------------------ Finding Gradient Image ---------------------------------------//
		int **A = (int **) calloc(gray->height, sizeof(int));
		
		IplImage *gradient = cvCreateImage(cvGetSize(gray), 8, 1);
		float min=255, max=0;
		for(int i=1; i<gray->height-1; i++)
		{
			A[i] = (int*) calloc(gray->width, sizeof(int));
			uchar* ptr1 = (uchar*) (gray->imageData + (i-1)*gray->widthStep);
			uchar* ptr2 = (uchar*) (gray->imageData + i*gray->widthStep);
			uchar* ptr3 = (uchar*) (gray->imageData + (i+1)*gray->widthStep);
			uchar* ptr = (uchar*) (gradient->imageData + i*gradient->widthStep);
			for(int j=1; j<gray->width-1; j++)
			{
				A[i][j] = (int)ptr1[j] + (int)ptr2[j-1] + (int)(-4)*ptr2[j] + (int)ptr2[j+1] + (int)ptr3[j];
				if(A[i][j] <= min)
					min = (int)A[i][j];
				if(A[i][j] >= max)
					max = (int)A[i][j];
			}
		}
		//printf("max:%f min:%f\n", max, min);
		int *bins = (int *)calloc(256, sizeof(int));
		for(int i=0; i<256; i++)
			bins[i] = 0;
		for(int i=1; i<gradient->height-1; i++)
		{
			uchar* ptr = (uchar*) (gradient->imageData + i*gradient->widthStep);
			for(int j=1; j<gradient->width-1; j++)
			{
				ptr[j] = (int)(A[i][j]-min)/(max-min)*255;
				bins[ptr[j]]++;	
			}
			free(A[i]);
		}
		free(A);

		//------------ Applying 90 Percentile Threshold to get a Laplacian Edge Mask ------------//
		float sum=0;
		int pth=255;
		float percentile = 10;
		while((pth >= 0)&&(sum < gradient->imageSize*percentile/100))
			sum += bins[pth--];
		//printf("Percentile Threshold:%d\n", pth);

		//--------------- Applying the Laplacian Edge Mask to Original Gray Image ---------------//
		for(int i=0; i<256; i++)
			bins[i] = 0;
		for(int y = 0; y < gray->height; y++){
			uchar* ptr = (uchar*) (gray->imageData + y * gray->widthStep);
			uchar* ptr1 = (uchar*) (gradient->imageData + y * gradient->widthStep);
			for(int x = 0; x < gray->width; x++){
				if(ptr1[x] > pth){
					bins[ptr[x]]++;
				}
			}
		}
		cvReleaseImage(&gradient);
		//cvReleaseImage(&gray);
		
		//------------- Thresholding the Original Gray Image using The Otsu Threshold -----------//
		int threshold = 255-OtsuThreshold(bins, 256);
		free(bins);

		//------------- Filtering points to get better line fittings ----------------------------//
		//img = cvQueryFrame(capture);
		//img = cvLoadImage("../../Data/13 Oct/img4.jpg");
		//IplImage *otsu = cvCreateImage(cvGetSize(img), 8, 1);
		cvCvtColor(img, gray, CV_BGR2GRAY);
		//cvReleaseImage(&img);

		for(int i=0, d=t; i<gray->height; i++)
		{
			int flag = 0;
			uchar* ptr = (uchar*) (gray->imageData + i*gray->widthStep);
			for(int j=0; j<gray->width; j++)
				if(ptr[j] > threshold)
					ptr[j] = 255; 
				else
					ptr[j] = 0;
			for(int x = 0; x < gray->width; x++)
				if(ptr[x]==255)
				{
					for(int i=0; i<d; i++)
						ptr[x+i] = 0;
					ptr[x+d/2] = 255;
					break;
				}
		}

		//---------------------------------- Hough Lines ----------------------------------------//
		CvMemStorage* line_storage = cvCreateMemStorage(0);
		CvSeq* results =  cvHoughLines2(gray, line_storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 40, 5, 200);
		cvReleaseImage(&gray);
		printf("lines:%d\tt=%d\n", results->total, t);
		
		float angle = 0.0, temp;
		for(int i = 0; i < results->total; i++)
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results, i);
			//cvLine(img, line[0], line[1], CV_RGB(255,0,0), 3, CV_AA, 0);
			if(line[0].y > line[1].y)
				temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
			else
				temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
			if(temp < 0)
				angle += 90 + 180/3.14*temp;
			else
				angle += 180/3.14*temp - 90;
		}
		printf("angle: %f\n", angle = angle/results->total);
		cvReleaseMemStorage(&line_storage);
		//cvNamedWindow("Hough Lines", 0);
		//cvShowImage("Hough Lines", img);
		
		send(FORWARD, angle);
		
		clock_t end=clock();
		time = diffclock(end,begin);
		printf("Time Taken: %f fps\n", 1000/(time));
		
		int c = cvWaitKey(0);
		if(c=='a')
			t++;
		else if(c == 'z')
			t--;
		else if(c == 'x')
		{
			send(BRAKE, 0);
			break;
		}
	}
	int c = cvWaitKey(0);

	return 0;
}
Exemple #10
0
int main()
{
    IplImage* img = cvLoadImage("goal_arena.bmp");
    CvSize imgSize = cvGetSize(img);
    IplImage* detected = cvCreateImage(imgSize, 8, 1);
 
    IplImage* imgBlue = cvCreateImage(imgSize, 8, 1);
    IplImage* imgGreen = cvCreateImage(imgSize, 8, 1);
    IplImage* imgRed = cvCreateImage(imgSize, 8, 1);

    cvSplit(img, imgBlue, imgGreen, imgRed, NULL);
    cvAnd(imgGreen, imgBlue, detected);
    cvAnd(detected, imgRed, detected);
    cvErode(detected, detected);
    cvDilate(detected, detected);    // Opening
 
    // cvThreshold(detected, detected, 100, 250, CV_THRESH_BINARY);
    CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
    cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);
    // CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
    // cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);

    CvPoint left1 = cvPoint(0, 0);
    CvPoint left2 = cvPoint(0, 0);
    CvPoint right1 = cvPoint(0, 0);
    CvPoint right2 = cvPoint(0, 0);
    CvPoint top1 = cvPoint(0, 0);
    CvPoint top2 = cvPoint(0, 0);
    CvPoint bottom1 = cvPoint(0, 0);
    CvPoint bottom2 = cvPoint(0, 0);
 
    int numLines = lines->rows;
    int numTop = 0;
    int numBottom = 0;
    int numLeft = 0;
    int numRight = 0;

    for(int i=0;i<numLines;i++)
    {
    	CvScalar dat = cvGet1D(lines, i);
        double rho = dat.val[0];
        double theta = dat.val[1];
        if(theta==0.0)
            continue;
        double degrees = theta*180.0/(3.1412);
 
        CvPoint pt1 = cvPoint(0, rho/sin(theta));
        CvPoint pt2 = cvPoint(img->width, (-img->width/tan(theta)) + rho/sin(theta));
         if(abs(rho)<50.0)
        {
        	if(degrees>45.0 && degrees<135.0)
            {
            	numTop++;
 
                // The line is vertical and near the top
                top1.x+=pt1.x;
                top1.y+=pt1.y;
 
                top2.x+=pt2.x;
                top2.y+=pt2.y;
            }

            else
            {
                numLeft++;
 
                // The line is vertical and near the left
                left1.x+=pt1.x;
                left1.y+=pt1.y;
 
                left2.x+=pt2.x;
                left2.y+=pt2.y;
            }
        }

        else
        {
            // We're in the right portion
            if(degrees>45.0 && degrees<135.0)
            {
                numBottom++;
 
                //The line is horizontal and near the bottom
                bottom1.x+=pt1.x;
                bottom1.y+=pt1.y;
 
                bottom2.x+=pt2.x;
                bottom2.y+=pt2.y;
            }
            else
            {
                numRight++;
 
                // The line is vertical and near the right
                right1.x+=pt1.x;
                right1.y+=pt1.y;
 
                right2.x+=pt2.x;
                right2.y+=pt2.y;
            }
        }
    }

    left1.x/=numLeft;
    left1.y/=numLeft;
    left2.x/=numLeft;
    left2.y/=numLeft;
 
    right1.x/=numRight;
    right1.y/=numRight;
    right2.x/=numRight;
    right2.y/=numRight;
 
    top1.x/=numTop;
    top1.y/=numTop;
    top2.x/=numTop;
    top2.y/=numTop;
 
    bottom1.x/=numBottom;
    bottom1.y/=numBottom;
    bottom2.x/=numBottom;
    bottom2.y/=numBottom;

    cvLine(img, left1, left2, CV_RGB(255, 0,0), 1);
    cvLine(img, right1, right2, CV_RGB(255, 0,0), 1);
    cvLine(img, top1, top2, CV_RGB(255, 0,0), 1);
    cvLine(img, bottom1, bottom2, CV_RGB(255, 0,0), 1);

    // Next, we need to figure out the four intersection points
    double leftA = left2.y-left1.y;
    double leftB = left1.x-left2.x;
    double leftC = leftA*left1.x + leftB*left1.y;
 
    double rightA = right2.y-right1.y;
    double rightB = right1.x-right2.x;
    double rightC = rightA*right1.x + rightB*right1.y;
 
    double topA = top2.y-top1.y;
    double topB = top1.x-top2.x;
    double topC = topA*top1.x + topB*top1.y;
 
    double bottomA = bottom2.y-bottom1.y;
    double bottomB = bottom1.x-bottom2.x;
    double bottomC = bottomA*bottom1.x + bottomB*bottom1.y;
 
    // Intersection of left and top
    double detTopLeft = leftA*topB - leftB*topA;
    CvPoint ptTopLeft = cvPoint((topB*leftC - leftB*topC)/detTopLeft, (leftA*topC - topA*leftC)/detTopLeft);
 
    // Intersection of top and right
    double detTopRight = rightA*topB - rightB*topA;
    CvPoint ptTopRight = cvPoint((topB*rightC-rightB*topC)/detTopRight, (rightA*topC-topA*rightC)/detTopRight);
 
    // Intersection of right and bottom
    double detBottomRight = rightA*bottomB - rightB*bottomA;
    CvPoint ptBottomRight = cvPoint((bottomB*rightC-rightB*bottomC)/detBottomRight, (rightA*bottomC-bottomA*rightC)/detBottomRight);
 
    // Intersection of bottom and left
    double detBottomLeft = leftA*bottomB-leftB*bottomA;
    CvPoint ptBottomLeft = cvPoint((bottomB*leftC-leftB*bottomC)/detBottomLeft, (leftA*bottomC-bottomA*leftC)/detBottomLeft);

    cvLine(img, ptTopLeft, ptTopLeft, CV_RGB(0,255,0), 5);
    cvLine(img, ptTopRight, ptTopRight, CV_RGB(0,255,0), 5);
    cvLine(img, ptBottomRight, ptBottomRight, CV_RGB(0,255,0), 5);
    cvLine(img, ptBottomLeft, ptBottomLeft, CV_RGB(0,255,0), 5);

    IplImage* imgMask = cvCreateImage(imgSize, 8, 3);
    cvZero(imgMask);
    CvPoint* pts = new CvPoint[4];
    pts[0] = ptTopLeft;
    pts[1] = ptTopRight;
    pts[2] = ptBottomRight;
    pts[3] = ptBottomLeft;
    cvFillConvexPoly(imgMask, pts, 4, cvScalar(255,255,255));
    cvAnd(img, imgMask, img);
 
    cvNamedWindow("Original");
    cvNamedWindow("Detected");
 
    cvShowImage("Original", img);
    cvShowImage("Detected", detected);
    cvWaitKey(0);
 
    return 0;
}
UINT WINAPI
//DWORD WINAPI
#elif defined(POSIX_SYS)
// using pthread
void *
#endif
	ChessRecognition::HoughLineThread(
#if defined(WINDOWS_SYS)
	LPVOID
#elif defined(POSIX_SYS)
	void *
#endif
	Param) {
	// 실제로 뒤에서 동작하는 windows용 thread함수.
	// 함수 인자로 클래스를 받아옴.
	ChessRecognition *_TChessRecognition = (ChessRecognition *)Param;
	_TChessRecognition->_HoughLineBased = new HoughLineBased();

	CvSeq *_TLineX, *_TLineY;
	double _TH[] = { -1, -7, -15, 0, 15, 7, 1 };

	CvMat _TDoGX = cvMat(1, 7, CV_64FC1, _TH);
	CvMat* _TDoGY = cvCreateMat(7, 1, CV_64FC1);
	cvTranspose(&_TDoGX, _TDoGY); // transpose(&DoGx) -> DoGy

	double _TMinValX, _TMaxValX, _TMinValY, _TMaxValY, _TMinValT, _TMaxValT;
	int _TKernel = 1;

	// Hough 사용되는 Image에 대한 Initialize.
	IplImage *iplTemp = cvCreateImage(cvSize(_TChessRecognition->_Width, _TChessRecognition->_Height), IPL_DEPTH_32F, 1);                   
	IplImage *iplDoGx = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGy = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGyClone = cvCloneImage(iplDoGy);
	IplImage *iplDoGxClone = cvCloneImage(iplDoGx);
	IplImage *iplEdgeX = cvCreateImage(cvGetSize(iplTemp), 8, 1);
	IplImage *iplEdgeY = cvCreateImage(cvGetSize(iplTemp), 8, 1);

	CvMemStorage* _TStorageX = cvCreateMemStorage(0), *_TStorageY = cvCreateMemStorage(0);

	while (_TChessRecognition->_EnableThread != false) {
		// 이미지를 받아옴. main루프와 동기를 맞추기 위해서 critical section 사용.
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.lock();
		//EnterCriticalSection(&(_TChessRecognition->cs));
		cvConvert(_TChessRecognition->_ChessBoardDetectionInternalImage, iplTemp);
		//LeaveCriticalSection(&_TChessRecognition->cs);
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.unlock();

		// 각 X축 Y축 라인을 검출해 내기 위해서 filter 적용.
		cvFilter2D(iplTemp, iplDoGx, &_TDoGX); // 라인만 축출해내고.
		cvFilter2D(iplTemp, iplDoGy, _TDoGY);
		cvAbs(iplDoGx, iplDoGx);
		cvAbs(iplDoGy, iplDoGy);

		// 이미지 내부에서 최댓값과 최소값을 구하여 정규화.
		cvMinMaxLoc(iplDoGx, &_TMinValX, &_TMaxValX);
		cvMinMaxLoc(iplDoGy, &_TMinValY, &_TMaxValY);
		cvMinMaxLoc(iplTemp, &_TMinValT, &_TMaxValT);
		cvScale(iplDoGx, iplDoGx, 2.0 / _TMaxValX); // 정규화.
		cvScale(iplDoGy, iplDoGy, 2.0 / _TMaxValY);
		cvScale(iplTemp, iplTemp, 2.0 / _TMaxValT);

		cvCopy(iplDoGy, iplDoGyClone);
		cvCopy(iplDoGx, iplDoGxClone);

		// NMS진행후 추가 작업
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGx, iplDoGyClone, _TKernel);
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGy, iplDoGxClone, _TKernel);

		cvConvert(iplDoGx, iplEdgeY); // IPL_DEPTH_8U로 다시 재변환.
		cvConvert(iplDoGy, iplEdgeX);

		double rho = 1.0; // distance resolution in pixel-related units.
		double theta = 1.0; // angle resolution measured in radians.
		int threshold = 20;

		if (threshold == 0)
			threshold = 1;

		// detecting 해낸 edge에서 hough line 검출.
		_TLineX = cvHoughLines2(iplEdgeX, _TStorageX, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);
		_TLineY = cvHoughLines2(iplEdgeY, _TStorageY, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);

		// cvSeq를 vector로 바꾸기 위한 연산.
		_TChessRecognition->_Vec_ProtectionMutex.lock();
		_TChessRecognition->_HoughLineBased->CastSequence(_TLineX, _TLineY);
		_TChessRecognition->_Vec_ProtectionMutex.unlock();

		Sleep(2);
	}

	// mat 할당 해제.
	cvReleaseMat(&_TDoGY);

	// 내부 연산에 사용된 이미지 할당 해제.
	cvReleaseImage(&iplTemp);
	cvReleaseImage(&iplDoGx);
	cvReleaseImage(&iplDoGy);
	cvReleaseImage(&iplDoGyClone);
	cvReleaseImage(&iplDoGxClone);
	cvReleaseImage(&iplEdgeX);
	cvReleaseImage(&iplEdgeY);

	// houghline2에 사용된 opencv 메모리 할당 해제.
	cvReleaseMemStorage(&_TStorageX);
	cvReleaseMemStorage(&_TStorageY);

	delete _TChessRecognition->_HoughLineBased;
#if defined(WINDOWS_SYS)
	_endthread();
#elif defined(POSIX_SYS)

#endif
	_TChessRecognition->_EndThread = true;
	return 0;
}
void main(int argc, char** argv)
{
	cvNamedWindow("src",0 );
	cvNamedWindow("warp image",0 );
	cvNamedWindow("warp image (grey)",0 );
	cvNamedWindow("Smoothed warped gray",0 );
	cvNamedWindow("threshold image",0 );
	cvNamedWindow("canny",0 );
	cvNamedWindow("final",1 );
		
	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
	float Z=1;

	dstQuad[0].x = 216; //src Top left
	dstQuad[0].y = 15;
	dstQuad[1].x = 392; //src Top right
	dstQuad[1].y = 6;
	dstQuad[2].x = 12; //src Bottom left
	dstQuad[2].y = 187;
	dstQuad[3].x = 620; //src Bot right
	dstQuad[3].y = 159;

	srcQuad[0].x = 100; //dst Top left
	srcQuad[0].y = 120;
	srcQuad[1].x = 540; //dst Top right
	srcQuad[1].y = 120;
	srcQuad[2].x = 100; //dst Bottom left
	srcQuad[2].y = 360;
	srcQuad[3].x = 540; //dst Bot right
	srcQuad[3].y = 360;

	cvGetPerspectiveTransform(srcQuad, dstQuad,	warp_matrix);
	
	//CvCapture *capture = cvCaptureFromCAM(0);
	/*double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
	IplImage* image = cvRetrieveFrame(capture);
	CvSize imgSize;
    imgSize.width = image->width;
    imgSize.height = image->height;
	CvVideoWriter *writer = cvCreateVideoWriter("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, imgSize);*/
	int ik=0;
	while(1)
	{
		//IplImage* img = cvQueryFrame(capture);
		IplImage* img = cvLoadImage( "../../Data/6 Dec/009.jpg", CV_LOAD_IMAGE_COLOR);
		cvShowImage( "src", img );
		//cvWriteFrame(writer, img);
		//cvSaveImage(nameGen(ik++), img, 0);
		
		IplImage* warp_img = cvCloneImage(img);
		CV_MAT_ELEM(*warp_matrix, float, 2, 2) = Z;
		cvWarpPerspective(img, warp_img, warp_matrix, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		cvShowImage( "warp image", warp_img );

		IplImage* grayimg = cvCreateImage(cvGetSize(warp_img),IPL_DEPTH_8U,1);
		cvCvtColor( warp_img, grayimg, CV_RGB2GRAY );
		cvShowImage( "warp image (grey)", grayimg );
		
		cvSmooth(grayimg, grayimg, CV_GAUSSIAN, 3, 3, 0.0, 0.0);
		cvShowImage( "Smoothed warped gray", grayimg );
		
		IplImage* thresholded_img=simplethreshold(grayimg, 220);
		cvShowImage("threshold image",thresholded_img);

		//grayimg = doCanny( thresholded_img, 50, 100, 3 );
		grayimg = cvCloneImage(thresholded_img);
		cvShowImage("canny",grayimg);

		IplImage* finalimg = cvCreateImage(cvGetSize(grayimg),IPL_DEPTH_8U,3);
		CvMemStorage* line_storage=cvCreateMemStorage(0);

		CvSeq* results =  cvHoughLines2(grayimg,line_storage,CV_HOUGH_PROBABILISTIC,10,CV_PI/180*5,350,100,10);
		double angle = 0.0, temp;
		double lengthSqd, wSum=0;
		double xc = 0, yc = 0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			cvLine( finalimg, line[0], line[1], CV_RGB(0,0,255), 1, CV_AA, 0 );
			//lengthSqd = (line[0].x - line[1].x)*(line[0].x - line[1].x) + (line[0].y - line[1].y)*(line[0].y - line[1].y);
			wSum += 1;//lengthSqd;
			if(line[0].y > line[1].y)
				temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
			else
				temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
			if(temp < 0)
				angle += (90 + 180/3.14*temp)/* * lengthSqd*/;
			else
				angle += (180/3.14*temp - 90)/* * lengthSqd*/;
			xc += line[0].x + line[1].x;
			yc += line[0].y + line[1].y;
		}
		angle=angle/wSum;
		//angle+=10;
		printf("total: %d, angle: % f\n", results->total, angle);

		xc /= 2*results->total;
		yc /= 2*results->total;
		double m = (angle != 0) ? 1/tan(angle*3.14/180) : 100;	// 100 represents a very large slope (near vertical)
		m=-m;

		double x1, y1, x2, y2;	// The Center Line
		y1 = 0;
		y2 = finalimg->height;
		x1 = xc + (y1-yc)/m;
		x2 = xc + (y2-yc)/m; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(0,255,0), 1, CV_AA, 0);
		printf("point: %f\t%f\n", xc, yc);

		double lx=0, ly=0, lm=0, lc=0, rx=0, ry=0, rm=0, rc=0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			double xm = (line[0].x + line[1].x)/2.0, ym = (line[0].y + line[1].y)/2.0;
			if(ym - yc - m*(xm - xc) > 0)
			{
				lx += xm;
				ly += ym;
				lm += (line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				lc++;
			}
			else
			{
				rx += xm;
				ry += ym;
				rm += (line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				rc++;
			}
		}

		// The Left Line
		lx /= lc;	ly /= lc;	lm /= lc;
		rx /= rc;	ry /= rc;	rm /= rc;
		printf("lins: %f\t%f\t%f\n", lx, ly, lm);
		printf("lins: %f\t%f\t%f\n", rx, ry, rm);
		y1 = 0;
		y2 = finalimg->height-5;
		x1 = lx + (y1-ly)/lm;
		x2 = lx + (y2-ly)/lm; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(255,255,0), 1, CV_AA, 0);

		// The Right Line
		y1 = 0;
		y2 = finalimg->height-5;
		x1 = rx + (y1-ry)/rm;
		x2 = rx + (y2-ry)/rm; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(0,255,255), 1, CV_AA, 0);

		// The Center Point
		CvPoint vpt = cvPoint(finalimg->width/2, 416);
		printf("center point: %d\t%d\n", vpt.x, vpt.y);
		
		// The Dl and Dr
		int dl = vpt.x - lx + (ly-vpt.y+0.0)/lm;
		int dr = (vpt.y-ry+0.0)/rm + rx - vpt.x;
		printf("dl-dr: %d\n", dl-dr);

		cvShowImage("final",finalimg);

		if(dl-dr < SAFEZONE_LL)	// Assume that the bot lies just on the boundary of the safe zone
		{
			if(angle < -10)
			{
				navCommand(7, angle);
			}
			else
			{
				navCommand(7, angle);
			}
		}	
		else if(dl-dr > SAFEZONE_RL)
		{
			if(angle > 10)
			{
				navCommand(-7, angle);
			}
			else
			{
				navCommand(-7, angle);
			}
		}
		else
		{
			if((angle < 10) && (angle > -10))
			{
				navCommand(angle, angle);
			}
			else
			{
				navCommand(0, angle);
			}
		}

		cvWaitKey(0);
	}
}
Exemple #13
0
void MaizeDetector::processLaserScan(



	const sensor_msgs::LaserScan::ConstPtr& laser_scan) {
	float rthetamean = 0, rrhomean = 0, lthetamean = 0, lrhomean = 0, theta = 0, rho = 0;
	double x0 = 0, y0 = 0, a, b;
	int lmc = 0, rmc = 0;

	static int right_count = 0;
	static int left_count = 0;

	clearRawImage();

	tf::TransformListener listener_;
	sensor_msgs::PointCloud cloud;


    try
    {
    	projector_.projectLaser(*laser_scan, cloud);
       // projector_.transformLaserScanToPointCloud("base_link",*laser_scan,cloud,listener_);
    }
    catch (tf::TransformException& e)
    {
        std::cout << e.what();
        return;
    }


	//ROS_INFO("Got it right");
	int size = cloud.points.size();

	for (int i = 0; i < size; i++) {
		//if (((abs(cloud.points[i].y))-((cloud.points[i].x-300)*0.5))<0.65) {
		if (abs((cloud.points[i].y))<0.65 && cloud.points[i].x >0 ) {


			point1.x = ((int)(cloud.points[i].x * 50) + 300);
			point1.y = ((int)(cloud.points[i].y * 50) + 300);
			point2.x = point1.x-4;
			point2.y = point1.y;
			cvLine(rawData_, point1, point2, CV_RGB(255,255,255), 3, CV_AA, 0);

		}
	}

	cvCvtColor(rawData_, workingImg_, CV_BGR2GRAY);
	cvDilate(rawData_,rawData_,NULL,6);
	cvErode(rawData_,rawData_,NULL,4);


	lines_ = cvHoughLines2(workingImg_, storage_, CV_HOUGH_STANDARD, 1, CV_PI/ 180, 60, 0, 0);
	//lines_ = cvHoughLines2(workingImg_, storage_, CV_HOUGH_PROBABILISTIC, 1, CV_PI/360, 30, 10, 30);


	for(int i = 0; i < MIN(lines_->total,20); i++){
	//for (int i = 0; i < MIN(lines_->total,15); i++) {

		float* line = (float*) cvGetSeqElem(lines_, i);
		rho = line[0];
		theta = line[1];

		a = cos(theta);
		b = sin(theta);
		x0 = a * rho;
		y0 = b * rho;
		point1.x = cvRound(x0 + 600 * (-b));
		point1.y = cvRound(y0 + 600 * (a));
		point2.x = cvRound(x0 - 600 * (-b));
		point2.y = cvRound(y0 - 600 * (a));
		point3.x = 300, point3.y = 300;
		point4.x = 300, point4.y = 600;
		point5.x = 300, point5.y = 0;

		//cvLine(rawData_, point1, point2, CV_RGB(255,0,0), 1, CV_AA,0);

		cvLine(rawData_, point3, point4, CV_RGB(0,0,255), 1, CV_AA, 0);
		cvLine(rawData_, point3, point5, CV_RGB(0,0,255), 1, CV_AA, 0);

		if (intersect(point1, point2, point3, point4)) {
			{
				rrhomean += rho;
				rthetamean += theta;
				rmc++;
				//cvLine(workingImg_, point1, point2, CV_RGB(0,0,255), 1, CV_AA,0);
			}
		} else if (intersect(point1, point2, point3, point5)) {
			{
				lrhomean += rho;
				lthetamean += theta;
				lmc++;
				//cvLine(workingImg_, point1, point2, CV_RGB(255,255,255), 1,CV_AA, 0);
			}
		}
	}
	theta = lthetamean / lmc;
	rho = lrhomean / lmc;

	a = cos(theta);
	b = sin(theta);
	x0 = a * rho;
	y0 = b * rho;
	point1.x = cvRound(x0 + 600 * (-b)), point1.y = cvRound(y0 + 600 * (a));
	point2.x = cvRound(x0 - 600 * (-b)), point2.y = cvRound(y0 - 600 * (a));

	//cvLine(rawData_, point1, point2, CV_RGB(255,0,0), 3, CV_AA, 0);

	point4.x = 300;
	point4.y = 300;

	point5.x = point4.x + 800 * sin(CV_PI - (theta - CV_PI / 2));
	point5.y = point4.y + 800 * cos(CV_PI - (theta - CV_PI / 2));
	cvLine(rawData_, point5, point4, CV_RGB(255,255,255), 1, CV_AA, 0);

	rows_.header.stamp = ros::Time::now();
	rows_.leftvalid = false;
	rows_.rightvalid = false;


	//detect valid lines
	if (intersect(point1, point2, point4, point5)) {

		right_distance_ = sqrt(((intersection_.y - 300) * (intersection_.y- 300)) + ((intersection_.x - 300) * (intersection_.x - 300)))* 2;
		right_angle_ = (theta) - CV_PI / 2;
		right_count++;

		right_angle_rolling_mean_[right_count%10] = right_angle_;
		right_dist_rolling_mean_[right_count%10] = right_distance_;
		right_angle_ = 0;
		right_distance_ = 0;

		for(int i=0;i < 10;i++){
			right_angle_ += right_angle_rolling_mean_[i];
			right_distance_ += right_dist_rolling_mean_[i];
		}

		right_angle_ = right_angle_/10;
		right_distance_ = right_distance_ /10;

		ROS_WARN("right_distance_: %f",right_distance_);
		cvLine(rawData_, point1, point2, CV_RGB(0,255,0), 1, CV_AA, 0);


		marker_r.header.frame_id = "/laser_front_link";
		marker_r.header.stamp = ros::Time::now();

		marker_r.ns = "kf_shapes";


		// Set the marker type.  Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
		marker_r.type = visualization_msgs::Marker::CUBE;
		// Set the marker action.  Options are ADD and DELETE
		marker_r.action = visualization_msgs::Marker::ADD;

		// Set the pose of the marker.  This is a full 6DOF pose relative to the frame/time specified in the header
		marker_r.id = 2;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(right_angle_);
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = -((float) right_distance_) / 100;
		marker_r.pose.position.z = 0;
		marker_r.pose.orientation = pose_quat;

		// Set the scale of the marker -- 1x1x1 here means 1m on a side
		marker_r.scale.x = 10.0;
		marker_r.scale.y = 0.1;
		marker_r.scale.z = 0.5;
		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 0.0f;
		marker_r.color.a = 0.5;
		marker_r.lifetime = ros::Duration(.1);
		marker_r_pub.publish(marker_r);

		// Set the color -- be sure to set alpha to something non-zero!

		// Publish the marker

		rows_.rightvalid = true;
		rows_.rightdistance = ((float) right_distance_) / 100;
		rows_.rightangle = right_angle_;

	}else{
		rows_.rightvalid = false;
		rows_.rightdistance = 0;
		rows_.rightangle = 0;

	}


	theta = rthetamean / rmc;
	rho = rrhomean / rmc;

	a = cos(theta);
	b = sin(theta);
	x0 = a * rho;
	y0 = b * rho;
	point1.x = cvRound(x0 + 600 * (-b)), point1.y = cvRound(y0 + 600 * (a));
	point2.x = cvRound(x0 - 600 * (-b)), point2.y = cvRound(y0 - 600 * (a));

	point4.x = 300;
	point4.y = 300;
	point5.x = point4.x - 800 * sin(CV_PI - (theta - CV_PI / 2));
	point5.y = point4.y - 800 * cos(CV_PI - (theta - CV_PI / 2));

	cvLine(rawData_, point5, point4, CV_RGB(255,255,255), 1, CV_AA, 0);

	//detect valid lines
	if (intersect(point1, point2, point4, point5)) {

		left_distance_ = sqrt(((intersection_.y - 300) * (intersection_.y- 300)) + ((intersection_.x - 300) * (intersection_.x - 300)))* 2;
		left_angle_ = (theta) - CV_PI / 2;
		left_count++;

		left_angle_rolling_mean_[left_count%10] = left_angle_;
		left_dist_rolling_mean_[left_count%10] = left_distance_;
		left_angle_ = 0;
		left_distance_ = 0;

		for(int i=0;i < 10;i++){
			left_angle_ += left_angle_rolling_mean_[i];
			left_distance_ += left_dist_rolling_mean_[i];
		}

		left_angle_ = left_angle_/10;
		left_distance_ = left_distance_ /10;


		ROS_WARN("left_distance_: %f",left_distance_);

		marker_r.id = 1;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(left_angle_);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 0.0f;
		marker_r.color.b = 1.0f;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = ((float) left_distance_) / 100;
		marker_r.pose.position.z = 0;
		marker_r.pose.orientation = pose_quat;
		marker_r_pub.publish(marker_r);

		//rolling_mean_[count%10] = right_angle_;

		//left_angle_ = 0;

		//for(int i=0;i < 10;i++){
		//	left_angle_ += rolling_mean_[i];
		//}

		//right_angle_ = right_angle_/10;

		cvLine(rawData_, point1, point2, CV_RGB(0,255,255), 1, CV_AA, 0);

		rows_.leftvalid = true;
		rows_.leftdistance = ((float) left_distance_) / 100;
		rows_.leftangle = left_angle_;

	}else{
		rows_.leftvalid = false;
		rows_.leftdistance = 0;
		rows_.leftangle = 0;
	}

	if (rows_.leftvalid && rows_.rightvalid){
		e_angle = (rows_.leftangle + rows_.rightangle) / 2;
		e_distance = (rows_.leftdistance - rows_.rightdistance);

		row_dist_est =( (rows_.leftdistance + rows_.rightdistance) + row_dist_est)/2;

		ROS_INFO("2ROWS row_dist_est %f, e_dist %f",row_dist_est,e_distance);

		rows_.error_angle = e_angle;
		rows_.error_distance = e_distance;
		rows_.var = 5^2;

		marker_r.id = 3;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(e_angle);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 1.0f;

		marker_r.pose.orientation = pose_quat;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = (e_distance);
		marker_r.pose.position.z = 0;
		marker_r_pub.publish(marker_r);
	}else if (rows_.leftvalid && !rows_.rightvalid){
		e_angle = (rows_.leftangle);
		e_distance = 0;//row_dist_est/2-(rows_.leftdistance);

		//ROS_INFO("e_angle %f, e_dist %f",e_angle,e_distance);

		rows_.error_angle = e_angle;
		rows_.error_distance = e_distance;//e_distance-(0.75/2);
		rows_.var = 10^2;

		marker_r.id = 3;

		ROS_INFO("LEFTROW row_dist_est %f, e_dist %f",row_dist_est,e_distance);


		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(e_angle);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 1.0f;

		marker_r.pose.orientation = pose_quat;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = (e_distance);
		marker_r.pose.position.z = 0;
		marker_r_pub.publish(marker_r);
	}else if (!rows_.leftvalid && rows_.rightvalid){


		e_angle = (rows_.rightangle);
		e_distance = 0;//row_dist_est/2-(rows_.rightdistance);

		//ROS_INFO("e_angle %f, e_dist %f",e_angle,e_distance);
		ROS_INFO("LRIGHTROW row_dist_est %f, e_dist %f",row_dist_est,e_distance);


		rows_.error_angle = (0.75/2)-e_angle;
		rows_.error_distance = e_distance;//e_distance-(0.75/2);
		rows_.var = 10^2;

		marker_r.id = 3;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(e_angle);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 1.0f;

		marker_r.pose.orientation = pose_quat;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = (e_distance);
		marker_r.pose.position.z = 0;
		marker_r_pub.publish(marker_r);
	}else{
		e_angle = 0;
		e_distance = 0;

		ROS_INFO("e_angle %f, e_dist %f",e_angle,e_distance);
		ROS_INFO("NOROW row_dist_est %f, e_dist %f",row_dist_est,e_distance);

		rows_.error_angle = e_angle;
		rows_.error_distance = e_distance;
		rows_.var = 4000^2;

		marker_r.id = 3;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(e_angle);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 1.0f;

		marker_r.pose.orientation = pose_quat;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = (e_distance);
		marker_r.pose.position.z = 0;
		marker_r_pub.publish(marker_r);
	}




	//cvLine(rawData_, cvPoint(0,300+150), cvPoint(600,300+150), CV_RGB(255,255,255), 1, CV_AA, 0);
	//cvLine(rawData_, cvPoint(0,300-150), cvPoint(600,300-150), CV_RGB(255,255,255), 1, CV_AA, 0);
	row_pub.publish(rows_);

	//cvShowImage("TEST", rawData_);
	//cvWaitKey(10);
	//pc_pub.publish(cloud);

}
Exemple #14
0
unsigned short int* find2 (IplImage* frame2)
{
	//--------------------------------Область алгоритма---------------------------------------------------
	unsigned short int xy[6]={0};

	int up = brightness_spot (frame2, (frame2->width/2)-block_size/2, delta_edge);
	int right = brightness_spot (frame2, frame2->width-block_size-delta_edge, (frame2->height/2)-block_size/2);
	int down = brightness_spot (frame2, (frame2->width/2)-block_size/2, frame2->height-block_size-delta_edge);
	int left = brightness_spot (frame2, delta_edge, (frame2->height/2)-block_size/2);

	if ( up>=getW-delta_brightness_max || up<=getB+delta_brightness_max)
	{
		xy[0]++;
		xy[1]=1;
		CvPoint pt = cvPoint( cvRound( frame2->width/2 ), cvRound( delta_edge*3 ) );
		cvCircle(frame2, pt, cvRound( Radius ), CV_RGB(255,0,0), 10 );

		if ( up>=getW-delta_brightness_max )
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(255,255,255), 5 );
		}
		else
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(0,0,0), 5 );
		}
	}
	//if ( abs(right-w)<=delta_brightness_max || abs(right-b)<=delta_brightness_max)
	if ( right>=getW-delta_brightness_max || right<=getB+delta_brightness_max)
	{
		xy[0]++;
		xy[2]=1;
		CvPoint pt = cvPoint( cvRound(frame2->width - delta_edge*3  ), cvRound( frame2->height/2 ) );
		cvCircle(frame2, pt, cvRound( Radius ), CV_RGB(255,0,0), 10 );

		if ( right>=getW-delta_brightness_max)
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(255,255,255), 5 );
		}
		else
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(0,0,0), 5 );
		}
	}
	//if ( abs(down-w)<=delta_brightness_max || abs(down-b)<=delta_brightness_max)
	if ( down>=getW-delta_brightness_max || down<=getB+delta_brightness_max)
	{
		xy[0]++;
		xy[3]=1;
		CvPoint pt = cvPoint( cvRound( frame2->width/2 ), cvRound(frame2->height - delta_edge*3 ) );
		cvCircle(frame2, pt, cvRound( Radius ), CV_RGB(255,0,0), 10 );

		if ( down>=getW-delta_brightness_max)
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(255,255,255), 5 );
		}
		else
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(0,0,0), 5 );
		}

	}
	//if ( abs(left-w)<=delta_brightness_max || abs(left-b)<=delta_brightness_max)
	if ( left>=getW-delta_brightness_max || left<=getB+delta_brightness_max)
	{
		xy[0]++;
		xy[4]=1;
		CvPoint pt = cvPoint( cvRound( delta_edge*3 ), cvRound(frame2->height/2 ) );
		cvCircle(frame2, pt, cvRound( Radius ), CV_RGB(255,0,0), 10 );
		if (left>=getW-delta_brightness_max)
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(255,255,255), 5 );
		}
		else
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(0,0,0), 5 );
		}
	}

	// if(xy[0]>0)//Поиск перпендикулярных линий.
	// {
		class Vect///Ограничение на количество найденных линий!!! Может быть ошибка, если вдруг превысит.
{
public: int x[1000], y[1000];
		Vect()
		{
			for (int i=0; i<1000; i++)
			{
				x[i]=0; 
				y[i]=0;
			}
		}
} Vec;

		IplImage* dst=0; 
		// хранилище памяти для хранения найденных линий
		CvMemStorage* storage = cvCreateMemStorage(0);
		CvSeq* lines = 0;
		int i = 0;
		dst = cvCreateImage( cvGetSize(frame2), 8, 1 );
		// детектирование границ
		cvCanny( frame2, dst, 50, 100, 3 );
		// конвертируем в цветное изображение
		//cvCvtColor( dst, color_dst, CV_GRAY2BGR );

		// нахождение линий
		lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 50, 200, 50 );

		std::list<short unsigned int> MXX,MYY;
		// Нарисуем найденные линии и найдём соответствующие им вектора.
		for( i = 0; i < lines->total; i++ ){
			CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
			cvLine( frame2, line[0], line[1], CV_RGB(255,0,0), 3, CV_AA, 0 );
			Vec.x[i]=line[1].x-line[0].x;///////// Для каждого отрезка найдём вектор (xn-ck,yn-yk)
			Vec.y[i]=line[1].y-line[0].y;

			MXX.push_back(line[0].x);
			MXX.push_back(line[1].x);
			MYY.push_back(line[0].y);
			MYY.push_back(line[1].y);
		}

		int countPer=0;

		const short int Os=100;
		for (int i=0; i<lines->total; i++)
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
			/////for (std::list<short unsigned int>::iterator x2=VX.begin(),std::list<short unsigned int>::iterator y2=VY.begin();x2!=VX.end(),y2!=VY.end();x2++,y2++)
			for (int j=i+1; j<lines->total; j++)
			{
				CvPoint* line2 = (CvPoint*)cvGetSeqElem(lines,j);
				if  ( ((Vec.x[i])*(Vec.x[j])+(Vec.y[i])*(Vec.y[j]))>=-Os && ((Vec.x[i])*(Vec.x[j])+(Vec.y[i])*(Vec.y[j]))<=Os)//Перпендикулярность
				{
					countPer++;
				}
			}
		}

		if (lines->total!=0 && countPer>4)
		{
			int MX=0,MY=0;
			for(std::list<short unsigned int>::iterator i=MXX.begin();i!=MXX.end();i++)
			{
				MX+=(*i);
			}
			for(std::list<short unsigned int>::iterator i=MYY.begin();i!=MYY.end();i++)
			{
				MY+=(*i);
			}
			MX/=(lines->total*2); MY/=(lines->total*2);
			//const short int k2=10;

			//Рисуем крестик
			/*CvPoint* line=new CvPoint();
			line[0].x=MX-k2;
			line[0].y=MY;
			line[1].x=MX+k2;
			line[1].y=MY;
			cvLine( frame2, line[0], line[1], CV_RGB(0,255,0), 3, CV_AA, 0 );
			line[0].x=MX;
			line[0].y=MY-k2;
			line[1].x=MX;
			line[1].y=MY+k2;
			cvLine( frame2, line[0], line[1], CV_RGB(0,255,0), 3, CV_AA, 0 );
			*/
			
			xy[5]=1;
		}

		// освобождаем ресурсы
		cvReleaseMemStorage(&storage);
		cvReleaseImage(&dst);
		MXX.clear();
		MYY.clear();
	// }
	//----------------------------------------------------------------------------------------------------

	return xy;
}
Exemple #15
0
t_jit_err cv_jit_lines_matrix_calc(t_cv_jit_lines *x, void *inputs, void *outputs)
{
	t_jit_err err = JIT_ERR_NONE;
	long in_savelock,out_savelock;
	t_jit_matrix_info in_minfo,out_minfo;
	char *in_bp,*out_bp;
	long i,dimcount,dim[JIT_MATRIX_MAX_DIMCOUNT];
	void *in_matrix,*out_matrix;
	t_int32 *out;
	
	double thresh1, thresh2, theta, rho;
	int houghThresh;
	
	CvMat source;
	CvPoint *ln;
	
	in_matrix 	= jit_object_method(inputs,_jit_sym_getindex,0);
	out_matrix 	= jit_object_method(outputs,_jit_sym_getindex,0);

	if (x&&in_matrix&&out_matrix) {
		
		in_savelock = (long) jit_object_method(in_matrix,_jit_sym_lock,1);
		out_savelock = (long) jit_object_method(out_matrix,_jit_sym_lock,1);
		
		jit_object_method(in_matrix,_jit_sym_getinfo,&in_minfo);
		jit_object_method(out_matrix,_jit_sym_getinfo,&out_minfo);
		
		jit_object_method(in_matrix,_jit_sym_getdata,&in_bp);
		
		if (!in_bp) { err=JIT_ERR_INVALID_INPUT; goto out;}
		
		//compatible types?
		if (in_minfo.type!=_jit_sym_char) { 
			err=JIT_ERR_MISMATCH_TYPE; 
			goto out;
		}		

		//compatible planes?
		if ((in_minfo.planecount!=1)||(out_minfo.planecount!=4)) { 
			err=JIT_ERR_MISMATCH_PLANE; 
			goto out;
		}		

		//get dimensions/planecount
		dimcount   = in_minfo.dimcount;
		for (i=0;i<dimcount;i++) {
			dim[i] = MIN(in_minfo.dim[i],out_minfo.dim[i]);
		}		
		
		//Convert input matrix to OpenCV matrices
		cvJitter2CvMat(in_matrix, &source);
		
		//Adjust size of edge matrix if need be
		if((x->edges->rows != source.rows)||(x->edges->cols != source.cols))
		{
			cvReleaseMat(&(x->edges));
			x->edges = cvCreateMat( source.rows, source.cols, CV_8UC1 );
		}
		
		//Calculate parameter values for Hough and Canny algorithms
		thresh1 = x->threshold - THRESHOLD_RANGE;
		thresh2 = x->threshold + THRESHOLD_RANGE;
		CLIP_ASSIGN(thresh1,0,255);
		CLIP_ASSIGN(thresh2,0,255);
		
		theta = CV_PI /  (180 / (double)x->resolution);
		rho = (double)x->resolution;
		
		houghThresh = x->sensitivity;
		
		x->gap = MAX(0,x->gap);
		x->length = MAX(0,x->length);
				
		//calculate edges using Canny algorithm
		cvCanny( &source, x->edges, thresh1, thresh2, 3 );
		
		//Find lines using the probabilistic Hough transform method
		x->lines = cvHoughLines2( x->edges, x->storage, CV_HOUGH_PROBABILISTIC, rho, theta, houghThresh, x->length, x->gap );
		
		//Transfer line information to output matrix
		
		//First adjust matrix size
		out_minfo.dim[0] = x->lines->total;
		jit_object_method(out_matrix,_jit_sym_setinfo,&out_minfo);
		jit_object_method(out_matrix,_jit_sym_getinfo,&out_minfo);
		
		jit_object_method(out_matrix,_jit_sym_getdata,&out_bp);
		if (!out_bp) { err=JIT_ERR_INVALID_OUTPUT; goto out;}
		
		//Copy...
		out = (t_int32 *)out_bp;
		
		for( i = 0; i < x->lines->total; i++ )
       	 	{
            		ln = (CvPoint*)cvGetSeqElem(x->lines,i);
            		out[0] = ln[0].x;
            		out[1] = ln[0].y;
            		out[2] = ln[1].x;
            		out[3] = ln[1].y;
            		
            		out+=4;
        	}
		
	} else {
		return JIT_ERR_INVALID_PTR;
	}
	
out:
	jit_object_method(out_matrix,gensym("lock"),out_savelock);
	jit_object_method(in_matrix,gensym("lock"),in_savelock);
	return err;
}
void main(int argc, char** argv)
{
	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
	float Z=1;
	/*cvNamedWindow("img", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("warp", CV_WINDOW_AUTOSIZE);*/

	dstQuad[0].x = 250; //src Top left
	dstQuad[0].y = 100;
	dstQuad[1].x = 430; //src Top right
	dstQuad[1].y = 115;
	dstQuad[2].x = 50; //src Bottom left
	dstQuad[2].y = 170;
	dstQuad[3].x = 630; //src Bot right
	dstQuad[3].y = 250;

	int lOff = 50, tOff = 150;
	srcQuad[0].x = tOff; //dst Top left
	srcQuad[0].y = lOff;
	srcQuad[1].x = 640-tOff; //dst Top right
	srcQuad[1].y = lOff;
	srcQuad[2].x = tOff; //dst Bottom left
	srcQuad[2].y = 480-lOff;
	srcQuad[3].x = 640-tOff; //dst Bot right
	srcQuad[3].y = 480-lOff;

	cvGetPerspectiveTransform(srcQuad, dstQuad,	warp_matrix);

	int ik=0, ni = 0, niX = 22-1;
	char names[22][25] = {
							"../../Data/6 Dec/009.jpg", 
							"../../Data/6 Dec/011.jpg",
							"../../Data/6 Dec/012.jpg",
							"../../Data/6 Dec/016.jpg",
							"../../Data/6 Dec/018.jpg",
							"../../Data/6 Dec/019.jpg",
							"../../Data/6 Dec/020.jpg",
							"../../Data/6 Dec/022.jpg",
							"../../Data/6 Dec/024.jpg",
							"../../Data/6 Dec/064.jpg",
							"../../Data/6 Dec/065.jpg",
							"../../Data/6 Dec/066.jpg",
							"../../Data/6 Dec/067.jpg",
							"../../Data/6 Dec/068.jpg",
							"../../Data/6 Dec/069.jpg",
							"../../Data/6 Dec/070.jpg",
							"../../Data/6 Dec/071.jpg",
							"../../Data/6 Dec/072.jpg",
							"../../Data/6 Dec/073.jpg",
							"../../Data/6 Dec/074.jpg",
							"../../Data/6 Dec/075.jpg",
							"../../Data/6 Dec/076.jpg"
						};
	int lwSum = 0, nopf = 0;
	//CvCapture *capture = cvCaptureFromCAM(0);
	/*double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
	IplImage* image = cvRetrieveFrame(capture);
	CvSize imgSize;
    imgSize.width = image->width;
    imgSize.height = image->height;
	CvVideoWriter *writer = cvCreateVideoWriter("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, imgSize);*/
	while(1)
	{
		//IplImage* img = cvQueryFrame(capture);
		IplImage* img = cvLoadImage( "../../Data/23 Jan/c.jpg", CV_LOAD_IMAGE_COLOR);
		//cvSaveImage(nameGen(ik++), img, 0);
		//cvShowImage("img", img);

		IplImage* warp_img = cvCloneImage(img);
		CV_MAT_ELEM(*warp_matrix, float, 2, 2) = Z;
		cvWarpPerspective(img, warp_img, warp_matrix, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		//cvReleaseImage(&img);
		//cvWaitKey(0);

		IplImage* grayimg = cvCreateImage(cvGetSize(warp_img),IPL_DEPTH_8U,1);
		cvCvtColor( warp_img, grayimg, CV_RGB2GRAY );
		cvReleaseImage(&warp_img);
		
		cvSmooth(grayimg, grayimg, CV_GAUSSIAN, 3, 3, 0.0, 0.0);
		cvEqualizeHist(grayimg, grayimg);
		cvThreshold(grayimg, grayimg, PercentileThreshold(grayimg, 10.0), 255, CV_THRESH_BINARY);
		
		IplImage* finalimg = cvCreateImage(cvGetSize(grayimg),IPL_DEPTH_8U,3);
		CvMemStorage* line_storage=cvCreateMemStorage(0);

		CvSeq* results =  cvHoughLines2(grayimg,line_storage,CV_HOUGH_PROBABILISTIC,10,CV_PI/180*5,350,100,10);
		cvReleaseImage(&grayimg);

		double angle = 0.0, temp;
		double lengthSqd, wSum=0;
		CvPoint center = cvPoint(0, 0);
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			//lengthSqd = (line[0].x - line[1].x)*(line[0].x - line[1].x) + (line[0].y - line[1].y)*(line[0].y - line[1].y);
			wSum += 1;//lengthSqd;
			if(line[0].y > line[1].y)
				temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
			else
				temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
			if(temp < 0)
				angle += (90 + 180/3.14*temp)/* * lengthSqd*/;
			else
				angle += (180/3.14*temp - 90)/* * lengthSqd*/;
			center.x += (line[0].x + line[1].x)/2;
			center.y += (line[0].y + line[1].y)/2;
		}
		angle /= wSum;	// Angle Direction: Left == -ve and Right == +ve
						// Angle is calculated w.r.t Vertical
		//angle+=10;	// Angle Offset (Depends on camera's position)
		center.x /= results->total;
		center.y /= results->total;

		double m = (angle != 0) ? tan(CV_PI*(0.5-angle/180)) : 100000;	// 100000 represents a very large slope (near vertical)
		//m=-m;		// Slope Correction
		
		CvPoint leftCenter = cvPoint(0, 0), rightCenter = cvPoint(0, 0);
		double leftSlope = 0, rightSlope = 0, leftCount = 0, rightCount = 0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			CvPoint midPoint = cvPoint((line[0].x + line[1].x)/2, (line[0].y + line[1].y)/2);
			double L11 = (0-center.y + m*(0-center.x + 0.0))/m;
			double L22 = (midPoint.y-center.y + m*(midPoint.x-center.x + 0.0))/m;
			if(L11*L22 > 0)
			{
				leftCenter.x += midPoint.x;
				leftCenter.y += midPoint.y;
				leftSlope += -(line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				leftCount++;
			}
			else
			{
				rightCenter.x += midPoint.x;
				rightCenter.y += midPoint.y;
				rightSlope += -(line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				rightCount++;
			}
		}
		cvReleaseMemStorage(&line_storage);
		leftCenter.x /= leftCount;		leftCenter.y /= leftCount;		leftSlope /= leftCount;
		rightCenter.x /= rightCount;	rightCenter.y /= rightCount;	rightSlope /= rightCount;
		
		CvPoint botCenter = cvPoint(finalimg->width/2, finalimg->height);
		int dL = abs(botCenter.y-leftCenter.y + m * (botCenter.x-leftCenter.x)) / sqrt(m*m + 1);
		int dR = abs(botCenter.y-rightCenter.y + m * (botCenter.x-rightCenter.x)) / sqrt(m*m + 1);
		
		int lw = abs((leftCenter.y - rightCenter.y) + m*(leftCenter.x - rightCenter.x)) / sqrt(m*m + 1);
		lwSum += lw;
		nopf++;
		
		if(lw <= SINGLE_LANE_WIDTH)
		{
			double L11 = (0-leftCenter.y + m*(0-leftCenter.x + 0.0))/m;
			double L22 = (botCenter.y-leftCenter.y + m*(botCenter.x-leftCenter.x + 0.0))/m;
			if(L11*L22 < 0)
				dR = lwSum/nopf - dL;	// Only Left Lane is visible
			else
				dL = lwSum/nopf - dR;	// Only Right Lane is visible
		}
		
		//cvSaveImage("test.jpg", finalimg, 0);

		printf("Bot:\t(%d, %d, %.3f)\n", dL, (finalimg->height)/10, 90.0-angle);
		printf("Target:\t(%d, %d, %.3f)\n", (dL+dR)/2, (finalimg->height)*9/10, 90.0);

		location bot, target;
		bot.x = dL;		bot.y = (finalimg->height)/10;		bot.theta = 90.0-angle;
		target.x = (dL+dR)/2;	target.y = (finalimg->height)*9/10;	target.theta = 90.0;

		cvReleaseImage(&finalimg);

		list *ol = NULL, *cl = NULL;
		elem e,vare;
		e.l = bot;	e.g = 0;	e.h = 0;	e.id = UNDEFINED;

		int n = 15;
		elem* np = loadPosData(n);
	
		while(1)
		{
			cl = append(cl, e);
			//printList(cl);
			if(isNear(e.l, target))
				break;
			ol = update(ol, e, target, np, n);
			//printList(ol);
			e = findMin(ol);
			//printf("Min: (%.3f, %.3f, %.3f, %d)\n", e.l.x, e.l.y, e.l.theta, e.id);
			ol = detach(ol, e);
			//printList(ol);
			//getchar();
		}
		free(np);

		vare = e;
		printf("(%.3f, %.3f, %.3f) : %d\n", vare.l.x, vare.l.y, vare.l.theta, vare.id);
		while(!((abs(vare.l.x-bot.x) < 1.25) && (abs(vare.l.y-bot.y) < 1.25)))
		{
			vare=search(cl,vare.parent.x,vare.parent.y);
			if(vare.id != -1)
			{
				printf("(%.3f, %.3f, %.3f) : %d\n", vare.l.x, vare.l.y, vare.l.theta, vare.id);
				e = vare;
			}
		}
		printf("\n(%.3f, %.3f, %.3f) : %d\n", e.l.x, e.l.y, e.l.theta, e.id);
		//navCommand(10-e.id, e.id);

		releaseList(ol);
		releaseList(cl);
		
		getchar();
		int c = cvWaitKey(0);
		if(c == '4')
		{
			if(ni != 0)
				ni--;
		}
		else if(c == '6')
		{
			if(ni != niX)
				ni++;
		}
	}
}
Exemple #17
0
bool Robot::adjustWorldCoordinate(IplImage* image, double coordAdjustRate)
{
    IplImage *img;
    IplImage* src1=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
    if(image->nChannels==3)
    {
        IplImage *hsv_img = get_hsv(image);
        img=worldMap.getField(hsv_img);
        cvReleaseImage(&hsv_img);
        src1=img;
    }
    else
    {
        img=image;
        src1=img;
            //cvCvtColor(img, src1, CV_BGR2GRAY);
    }
		if( img != 0 )
		{
			IplImage* dst = cvCreateImage( cvGetSize(img), 8, 1 );
			IplImage* color_dst = cvCreateImage( cvGetSize(img), 8, 3 );
			CvMemStorage* storage = cvCreateMemStorage(0);
			CvSeq* ls = 0;
			int i;
			cvCanny( src1, dst, 50, 200, 3 );

			cvCvtColor( dst, color_dst, CV_GRAY2BGR );

			ls = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 2, CV_PI/90, 20, 5, 30 );
			//ls = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 5, CV_PI/30, 10, 20, 5 );
			vector<myLine> tmplines;
			for( i = 0; i < ls->total; i++ )
			{
				CvPoint* tmpl = (CvPoint*)cvGetSeqElem(ls,i);
				cvLine( color_dst, tmpl[0], tmpl[1], CV_RGB(255,0,0), 1, 8 );

                cv::Point2f tmpp[2];
                cv::Point2f scrPos(tmpl[0].x,tmpl[0].y);
                cv::Point2f roboPos=worldMap.coord_screen2robot(scrPos,true);
                cv::Point2f worldPos=worldMap.coord_robot2world(roboPos);
                tmpp[0]=worldPos;
                scrPos=cv::Point2f(tmpl[1].x,tmpl[1].y);
                roboPos=worldMap.coord_screen2robot(scrPos,true);
                worldPos=worldMap.coord_robot2world(roboPos);
                tmpp[1]=worldPos;
                myLine templ(tmpp[0],tmpp[1]);
                if(templ.l>LINE_LENGTH_LBOUND)
                    tmplines.push_back(templ);
//				//printf("length=%f angle=%f\n",sqrt(float((tmpl[1].y-tmpl[0].y)*(tmpl[1].y-tmpl[0].y));
				//	+float((tmpl[1].x-tmpl[0].x)*(tmpl[1].x-tmpl[0].x)))
				//	,atan2(float(tmpl[1].y-tmpl[0].y),float(tmpl[1].x-tmpl[0].x)));
			}
			//printf("\n");
			cvNamedWindow( "Source", 1 );
			cvShowImage( "Source", img );

			cvNamedWindow( "Hough", 1 );
			cvShowImage( "Hough", color_dst );

			cvWaitKey(10);
			cvReleaseImage(&dst);
			cvReleaseImage(&src1);
			cvReleaseImage(&color_dst);
			cvReleaseMemStorage(&storage);
			if(coordAdjustRate==0)
			{
                for(i=0;i<tmplines.size();++i)
                {
                    lines.push_back(tmplines[i]);
                }
			}
			else if(coordAdjustRate==2)
			{
                for(i=0;i<tmplines.size();++i)
                {
                    lines.push_back(tmplines[i]);
                }
                //vector<double> oris;
                vector<int> lineNums;
                vector<double> lineValues;
                int groupId=0;
			    for(i=0;i<lines.size();++i)
			    {
			        bool classified=false;
			        int j;
			        for(j=0;j<i;++j)
			        {
			            double angle=lines[i].theta-lines[j].theta+CV_PI/4.0;   //to make the process simple, add 45 degree
                                                                                //to turn the cared angles to the middle of a phase
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            int phase=(int)(angle/(CV_PI/2.0));
			            double angle90=angle-CV_PI/2.0*(double)phase;
			            phase%=2;
			            if(abs(angle90-CV_PI/4.0)<CV_PI/60.0)//subtract the added 45 degree
			            {
			                lines[i].clsId=lines[j].clsId/2*2+phase;
			                ++lineNums[lines[i].clsId];
			                lineValues[lines[i].clsId]+=lines[i].l;
			                classified=true;
			                break;
			            }
			        }
			        if(classified==false)
			        {
			            lines[i].clsId=groupId;
                        lineNums.push_back(1);
                        lineNums.push_back(0);
                        lineValues.push_back(lines[i].l);
                        lineValues.push_back(0);
			            groupId+=2;
			        }
			    }
			    int maxValueGroup=0;
			    double maxValue=0;
			    for(i=0;i<lineNums.size();i+=2)
			    {
			        if(lineValues[i]+lineValues[i+1]>maxValue)
			        {
			            maxValue=lineValues[i]+lineValues[i+1];
			            maxValueGroup=i;
			        }
			    }
			    maxValueGroup/=2;
			    double sumAngle=0;
			    double sumL=0;
			    for(i=0;i<lines.size();++i)
			    {
			        if(lines[i].clsId/2==maxValueGroup)
			        {
			            double angle=lines[i].theta+CV_PI/4.0;//similar strategy, add 45 degree
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            double angle90=angle-CV_PI/2.0*(double)((int)(angle/(CV_PI/2.0)));
			            sumAngle+=(angle90-CV_PI/4.0)*lines[i].l;//subtract 45 degree
			            sumL+=lines[i].l;
			        }
			    }
			    if(sumL==0)
			    {
                    //printf("false 2 sumL=0\n");
			        return false;
			    }
			    mainAngle=sumAngle/sumL;
			    mainGroupId=maxValueGroup;
			    //printf("mainAngle=%f mainGroupId=%d\n",mainAngle,mainGroupId);
			}
			else if(coordAdjustRate==1)
            {
                CvRect bBox=worldMap.getMap_bbox();
                    //printf("in func param=1\n");
                    //printf("tmplines.size=%d\n",tmplines.size());
                for(i=0;i<tmplines.size();++i)
                {
                    cv::Point2f imgPos=world2image(tmplines[i].p[0]);
                    if(!(imgPos.x>bBox.x-BBOX_DELTA && imgPos.x<bBox.x+bBox.width+BBOX_DELTA && imgPos.y>bBox.y-BBOX_DELTA && imgPos.y<bBox.y+bBox.height+BBOX_DELTA))
                        continue;
			        bool classified=false;
			        double minAngle=CV_PI;
			        int minAnglePhase=0;
			        int bestJ=-1;
			        int j;
			        for(j=0;j<lines.size();++j)
			        {
			            if(lines[j].clsId/2!=mainGroupId)
                            continue;
			            double angle=tmplines[i].theta-lines[j].theta+CV_PI/4.0;   //to make the process simple, add 45 degree
                                                                                //to turn the cared angles to the middle of a phase
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            int phase=(int)(angle/(CV_PI/2.0));
			            double angle90=angle-CV_PI/2.0*(double)phase;
			            phase%=2;
			            if(abs(angle90-CV_PI/4.0)<minAngle)//subtract the added 45 degree
			            {
			                minAngle=abs(angle90-CV_PI/4.0);
			                bestJ=j;
                                minAnglePhase=phase;
			            }
			        }
			        if(bestJ>-1)
			        {
			            //if(minAngle<CV_PI/6.0)
                        tmplines[i].clsId=mainGroupId*2+minAnglePhase;
                        classified=true;
                        //printf("nearest main ori found. angle diff=%f\n",minAngle);
			        }
			    }
			    double sumAngle=0;
			    double sumL=0;
			    for(i=0;i<tmplines.size();++i)
			    {
			        if(tmplines[i].clsId/2==mainGroupId)
			        {
                    //printf("comparing with a main line..i=%d\n",i);
			            double angle=tmplines[i].theta+CV_PI/4.0;//similar strategy, add 45 degree
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            double angle90=angle-CV_PI/2.0*double((int)(angle/(CV_PI/2.0)));
			            sumAngle+=angle90*tmplines[i].l;//use the 45 degree to balance the unwanted lines
			            sumL+=tmplines[i].l;
			        }
			    }
			    if(sumL<LINE_LENGTH_SUM_LBOUND)
			    {
                    //printf("false sumL=%f<%d\n",sumL,LINE_LENGTH_SUM_LBOUND);
			        return false;
			    }
			    double curAngle=sumAngle/sumL-CV_PI/4.0;//subtract 45 degree
			    ori+=curAngle-mainAngle;
                    //printf("true oriChange=%f\n",curAngle-mainAngle);
            }
		}

    return true;
}
Exemple #18
0
//--------------------------------HOUGHLINES---------------------------------
void sendtoHoughLines(IplImage * img)
{


	IplImage* src = cvCreateImage(
	cvGetSize(img), 8, 1 );
	cvCvtColor(img, src, CV_RGB2GRAY);

	//-------------CREATING A IMAGE NAMED dst FOR EDGE SHOWING ON IT FROM CANNY RESULT--------------------
	IplImage* dst = cvCreateImage(
	cvGetSize(src), 8, 1 );

	//---------------CREATING color_dst IMAGE FOR RESULT OF LINE DISPLAY PURPOSE---------------------------
	IplImage *color_dst = cvCreateImage(
	cvGetSize(src), 8, 3 );

	//---------------CEATING A STORE POOL FOR LINE PURPOSE AND LINE DECLARATIVE PARAMETER-------------
	CvMemStorage* storage =
	cvCreateMemStorage(0);
	CvSeq* lines = 0;
	int i;

	//---------------APPLYING THE CANNY FUNCTION ON SRC AND STORING THE EDGE RESULTS IN DST-------------
	cvCanny( src, dst,30, 90, 3 );

	cvDilate( dst, dst, 0, 1 );

	//--------------CONVERTING THE CANNY RESULT IMAGE DST INTO RGB AND STORE IT IN COLORDST AND SHOWING A WINDOW OF IT--------------
	
	cvCvtColor( dst, color_dst, CV_GRAY2BGR );


	/*
	//-----hough lines algo--------
	lines = cvHoughLines2( dst, storage,
					  CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 30, 30, 0 );

			//---------------ACCESING THE POINTS OF THE LINES AND DRAWING THOSE LINES ON THE GRAY TO RGB CONVERTED IMAGE NAMED color_dst-------------- 
	for( i = 0; i < lines->total; i++ )
	{
	CvPoint* line =
	(CvPoint*)cvGetSeqElem(lines,i);
	cvLine( color_dst, line[0], line[1],

		   CV_RGB(255,0,0), 3, 8 );
	//printf("\n i = %d x1 = %d y1 = %d x2 = %d y2 = %d ",i,line[0].x,line[0].y,line[1].x,line[1].y);


	}
	*/


	lines = cvHoughLines2( dst, storage,
			CV_HOUGH_STANDARD, 1, CV_PI/180, 30, 0, 0 );
			for( i = 0; i < MIN(lines->total,100); i++ )
			{
			float* line =
			(float*)cvGetSeqElem(lines,i);
			float rho = line[0];
			float theta = line[1];

			printf("theta = %f",(theta*180/3.142));
			CvPoint pt1, pt2;
			double a = cos(theta), b = sin(theta);
			double x0 = a*rho, y0 = b*rho;
			printf("a= %f  b=%f  x0=%f  y0=%f roh=%f\n", a,b,x0,y0,rho);
			pt1.x = cvRound(x0 + 1000*(-b));
			pt1.y = cvRound(y0 + 1000*(a));
			pt2.x = cvRound(x0 - 1000*(-b));
			pt2.y = cvRound(y0 - 1000*(a));
			printf("    x1 = %d, y1 = %d",pt1.x,pt1.y);
			printf("    x2 = %d, y2 = %d\n\n",pt2.x,pt2.y);

			//if((theta*180/3.142) < 100 && (theta*180/3.142) > 79 )
			cvLine( color_dst, pt1, pt2,
			CV_RGB(255,0,0), 3, 8 );
			}
	cvNamedWindow("HoughLinesShow",1);
	cvShowImage("HoughLinesShow",color_dst);
	cvWaitKey(1000);




}
Exemple #19
0
const CvPoint2D32f* locate_puzzle(IplImage *in, IplImage **annotated) {
    IplImage *grid_image = _grid(in);

    *annotated = cvCloneImage(in);

    // find lines using Hough transform
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* lines = 0;

    double distance_resolution = 1;
    double angle_resolution    = CV_PI / 60;
    int threshold              = 60;
    int minimum_line_length    = in->width / 2;
    int maximum_join_gap       = in->width / 10;
    lines = cvHoughLines2(grid_image, storage, CV_HOUGH_PROBABILISTIC,  distance_resolution, angle_resolution, threshold, minimum_line_length, maximum_join_gap);

    cvCvtColor(grid_image, *annotated, CV_GRAY2RGB);

    cvReleaseImage(&grid_image);

    double most_horizontal = INFINITY;
    for (int i = 0; i < lines->total; ++i) {
        CvPoint *line = (CvPoint*)cvGetSeqElem(lines,i);
        double dx     = abs(line[1].x - line[0].x);
        double dy     = abs(line[1].y - line[0].y);

        double slope = INFINITY;
        if (dx != 0) {
            slope = dy / dx;
        }
        if (slope != INFINITY) {
            if (slope < most_horizontal) {
                //printf("most horizontal seen: %0.2f\n", slope);
                most_horizontal = slope;
            }
        }
    }

    int top    = -1;
    int left   = -1;
    int bottom = -1;
    int right  = -1;
    for (int i = 0; i < lines->total; i++) {
        CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
        double dx     = abs(line[1].x - line[0].x);
        double dy     = abs(line[1].y - line[0].y);
        double slope  = INFINITY;
        if (dx) {
            slope = dy / dx;
        }

        cvLine(*annotated, line[0], line[1], CV_RGB(255, 0, 0), 1, 8, 0);
        if (abs(slope - most_horizontal) <= 1) {
            if ((top == -1) || (line[1].y < ((CvPoint*)cvGetSeqElem(lines,top))[1].y)) {
                top = i;
            }
            if ((bottom == -1) || (line[1].y > ((CvPoint*)cvGetSeqElem(lines,bottom))[1].y)) {
                bottom = i;
            }
        } else {
            if ((left == -1) || (line[1].x < ((CvPoint*)cvGetSeqElem(lines,left))[1].x)) {
                left = i;
            }
            if ((right == -1) || (line[1].x > ((CvPoint*)cvGetSeqElem(lines,right))[1].x)) {
                right = i;
            }
        }
    }
    //printf("number of lines: %d\n", lines->total);
    if ((top == -1) || (left == -1) || (bottom == -1) || (right == -1)) {
        return NULL;
    }

    CvPoint *top_line    = (CvPoint*)cvGetSeqElem(lines,top);
    cvLine(*annotated, top_line[0], top_line[1], CV_RGB(0, 0, 255), 6, 8, 0);

    CvPoint *bottom_line = (CvPoint*)cvGetSeqElem(lines,bottom);
    cvLine(*annotated, bottom_line[0], bottom_line[1], CV_RGB(0, 255, 255), 6, 8, 0);

    CvPoint *left_line   = (CvPoint*)cvGetSeqElem(lines,left);
    cvLine(*annotated, left_line[0], left_line[1], CV_RGB(0, 255, 0), 6, 8, 0);

    CvPoint *right_line  = (CvPoint*)cvGetSeqElem(lines,right);
    cvLine(*annotated, right_line[0], right_line[1], CV_RGB(255, 255, 0), 6, 8, 0);

    CvPoint2D32f *coordinates;
    coordinates = malloc(sizeof(CvPoint2D32f) * 4);

    // top left
    intersect(top_line, left_line, &(coordinates[0]));
    cvLine(*annotated, cvPointFrom32f(coordinates[0]), cvPointFrom32f(coordinates[0]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("top_left: %.0f, %.0f\n", coordinates[0].x, coordinates[0].y);

    // top right
    intersect(top_line, right_line, &(coordinates[1]));
    cvLine(*annotated, cvPointFrom32f(coordinates[1]), cvPointFrom32f(coordinates[1]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("top_right: %.0f, %.0f\n", coordinates[1].x, coordinates[1].y);

    // bottom right
    intersect(bottom_line, right_line, &(coordinates[2]));
    cvLine(*annotated, cvPointFrom32f(coordinates[2]), cvPointFrom32f(coordinates[2]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("bottom_right: %.0f, %.0f\n", coordinates[2].x, coordinates[2].y);

    // bottom left
    intersect(bottom_line, left_line, &(coordinates[3]));
    cvLine(*annotated, cvPointFrom32f(coordinates[3]), cvPointFrom32f(coordinates[3]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("bottom_left: %.0f, %.0f\n", coordinates[3].x, coordinates[3].y);

    return coordinates;
}
Exemple #20
0
int main( int argc, char** argv ) {

    if (argc>=2){
        //Load image
        IplImage*src = cvLoadImage( argv[1]);

        //Create Windows and Position
        cvNamedWindow("Input",0);
        cvResizeWindow("Input",500,350);
        cvMoveWindow("Input", 0, 0);

        cvNamedWindow("Output",0);
        cvResizeWindow("Output",500,350);
        cvMoveWindow("Output", 0, 600);

        cvNamedWindow( "Hough", 0 );
        cvResizeWindow("Hough",500,350);
        cvMoveWindow("Hough",700,0);

        cvNamedWindow( "FloodFill", 0 );
        cvResizeWindow("FloodFill",500,350);
        cvMoveWindow("FloodFill",700,600);

        //Display Original Image
        cvShowImage( "Input", src );
        IplImage*srcCopy = cvCloneImage(src);

        //Flood Fill
        CvPoint seedPoint= cvPoint((srcCopy->width)/2,(srcCopy->height)/2);
        CvScalar pixColor=avgpixelcolor(srcCopy,seedPoint,5); //Takes avg pixel color value (5x5 grid)
        cvLine( srcCopy, cvPoint(seedPoint.x,srcCopy->height*.9), cvPoint(seedPoint.x,srcCopy->height*.1), pixColor, 3, 8 );
        cvFloodFill(srcCopy,seedPoint,cvScalarAll(255),cvScalarAll(50),cvScalarAll(50),NULL,8|CV_FLOODFILL_FIXED_RANGE, NULL);
        cvShowImage("FloodFill",srcCopy);

        //Convert to Grayscale
        IplImage*dst = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1 );
        cvCvtColor( srcCopy, dst , CV_BGR2GRAY );

        //Display Flood Fill Results
        cvCircle(srcCopy,seedPoint,5,cvScalarAll(0),3,8,0);

        //Threshold
        IplImage*thresh = cvCreateImage(cvGetSize(dst), IPL_DEPTH_8U,1);
        cvThreshold(dst,thresh,
                    230,        //Set Manually during Initialization
                    255,        //Max Pixel Intensity Value (Do not Change)
                    CV_THRESH_TOZERO
                    );

        //Canny Edge Detection
        cvCanny( thresh, dst,
                 0,    //Low Threshold
                 255,   //High Threshold
                 3
                 );

        //Storage for Hough Line Endpoints
        CvMemStorage* storage = cvCreateMemStorage(0);

        //Hough
        CvSeq* lines = cvHoughLines2( dst,storage,CV_HOUGH_PROBABILISTIC,
                                      1,               //rho
                                      1*CV_PI/180,     //theta
                                      150,             //Accumulator threshold
                                      500,             //Min Line Length
                                      200              //Min Colinear Separation
                                      );

        //Draw Vertical Lines on src image
        for(int i = 0; i < lines->total; i++ )
        {
            CvPoint* Point = (CvPoint*)cvGetSeqElem(lines,i);
            cvLine( src, Point[0], Point[1], CV_RGB(0,220,20), 3, 8 );

            //Reject Horizontal lines
            float slope=(Point[0].y-Point[1].y)/(Point[0].x-Point[1].x);

        }

        //Create a Trapazodal Mask

        //Detect Horizontal Lines

        //Isolate 4 points

        //Display Image
        cvShowImage( "Output", src);

        //For Calibration Purposes "what the Hough transform sees"
        cvShowImage( "Hough", dst );

        //Wait for User 10sec
        cvWaitKey(10000);

        //Deallocate Memory
        cvReleaseImage( &src );
        cvReleaseImage( &dst );
        cvReleaseImage( &thresh );
        cvDestroyWindow( "Input" );
        cvDestroyWindow( "Output" );
        cvDestroyWindow("Hough");

    }
    else{
        printf("Hough Transform Code Requires \n");
        return 0;
    }
}
unsigned long Test()
{
	bool visualize = true;
	std::string RangeWindowName = "Range";
	std::string HoughWindowName = "Hough";

	IplImage* visualizationReferenceImage = cvLoadImage("Pictures/building.jpg");
	CvMemStorage* storage = cvCreateMemStorage(0);	/// Line endings storage
	CvSeq* lines = 0;
	int AngleBins = 45;	/// Controls angle resolution of Hough trafo (bins per pi)
	IplImage* Image = cvCreateImage(cvGetSize(visualizationReferenceImage), IPL_DEPTH_8U, 3);	/// Visualization image
	cvCopyImage(visualizationReferenceImage,Image);
	IplImage* GrayImage = cvCreateImage(cvGetSize(Image), IPL_DEPTH_8U, 1);
	cvCvtColor(Image, GrayImage, CV_RGB2GRAY);
	IplImage* CannyImage = cvCreateImage(cvGetSize(Image), IPL_DEPTH_8U, 1);	/// Edge image
	cvCanny(GrayImage, CannyImage, 25, 50);
	CvPoint ROIp1 = cvPoint(100,10);		/// Tablet ROI
	CvPoint ROIp2 = cvPoint(visualizationReferenceImage->width-40+ROIp1.x,visualizationReferenceImage->height-200+ROIp1.y);
	cvSetImageROI(CannyImage,cvRect(ROIp1.x,ROIp1.y,ROIp2.x-ROIp1.x,ROIp2.y-ROIp1.y));
	cvRectangle(Image, ROIp1, ROIp2, CV_RGB(0,255,0));
	//int maxd = cvRound(sqrt(sqrt((double)ROIp2.x-ROIp1.x)+sqrt((double)ROIp2.y-ROIp1.y)));	/// Maximum of possible distance value in Hough space
	int maxd = cvRound(sqrt((double)(((ROIp2.x-ROIp1.x)*(ROIp2.x-ROIp1.x))+((ROIp2.y-ROIp1.y)*(ROIp2.y-ROIp1.y)))));	/// Maximum of possible distance value in Hough space
	IplImage* HoughSpace = cvCreateImage(cvSize(maxd,AngleBins+1),IPL_DEPTH_8U, 1);		/// Hough space image (black=no line, white=there are lines at these bins)
	cvZero(HoughSpace);
	
	/// Hough transformation
	int AccumulatorThreshold = 100;		/// Threshold parameter. A line is returned by the function if the corresponding accumulator value is greater than threshold.
	double MinimumLineLength = 50;		/// For probabilistic Hough transform it is the minimum line length.
	double MaximumGap = 4;				/// For probabilistic Hough transform it is the maximum gap between line segments lieing on the same line to treat them as the single line segment (i.e. to join them).
	lines = cvHoughLines2(CannyImage, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/AngleBins, AccumulatorThreshold, MinimumLineLength, MaximumGap);
	
	for(int i = 0; i < lines->total; i++ )
	{
		CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
		/// Endings of a line
		CvPoint p0 = cvPoint(line[0].x+ROIp1.x,line[0].y+ROIp1.y);
		CvPoint p1 = cvPoint(line[1].x+ROIp1.x,line[1].y+ROIp1.y);
		cvLine(Image, p0, p1, CV_RGB(255,0,0), 3, 8 );
		
		/// Slope/angle of line
		double phi = CV_PI/2;
		if(p0.x != p1.x) phi = atan((double)(p1.y-p0.y)/(double)(p1.x-p0.x));
		phi += (phi < 0)*CV_PI;

		/// Hessian normal form parameters: d = x*cos(alpha) + y*sin(alpha)
		/// with alpha in [0...pi], d in [0...maxd]
		double alpha = phi+CV_PI/2;
		alpha -= (alpha > CV_PI)*CV_PI;

		double d = p0.x;
		if(p0.x != p1.x)
		{
			double n = p1.y - (p1.y-p0.y)/(p1.x-p0.x) * p1.x;
			d = abs(n * cos(phi));
		}

		/// Write Line into Hough space
		cvLine(HoughSpace, cvPoint(cvRound(d),cvRound(alpha/CV_PI*AngleBins)),cvPoint(cvRound(d),cvRound(alpha/CV_PI*AngleBins)),CV_RGB(255,255,255));
	}
	if(visualize)
	{
		cvNamedWindow(RangeWindowName.c_str());
		cvNamedWindow(HoughWindowName.c_str());
		cvShowImage(RangeWindowName.c_str(), Image);
		cvShowImage(HoughWindowName.c_str(), HoughSpace);
		cvWaitKey(0);
	}
	cvCopyImage(Image,visualizationReferenceImage);

	cvReleaseImage(&GrayImage);
	cvReleaseImage(&CannyImage);

	/*
	IplImage* img1 = cvLoadImage("Cob3.jpg");
	IplImage* img2 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	IplImage* img3 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	IplImage* img4 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	cvNamedWindow("Img1");
	cvNamedWindow("Img2");
	cvNamedWindow("Img3");
	cvCvtColor(img1, img2, CV_RGB2GRAY);
	cvCanny(img2, img3, 100, 200);
	cvShowImage("Img1", img1);
	cvShowImage("Img2", img2);
	cvShowImage("Img3", img3);
	cvWaitKey(0);
	cvReleaseImage(&img1);
	cvReleaseImage(&img2);
	cvReleaseImage(&img3);
	cvDestroyAllWindows();

	/*
	IplImage* Img1;
	IplImage* Img2;
	IplImage* Img3;

	cvNamedWindow("Img");
	while (cvGetWindowHandle("Img"))
	{
		if(cvWaitKey(10)=='q') break;

		/// Uncomment when using <code>GetColorImage</code> instead of <code>GetColorImage2</code>
	    //ColorImage = cvCreateImage(cvSize(1388,1038),IPL_DEPTH_8U,3);
		if (colorCamera->GetColorImage2(&Img1) == libCameraSensors::RET_FAILED)
		//if (colorCamera->GetColorImage(ColorImage, true) == libCameraSensors::RET_FAILED)
		{
			std::cerr << "TestCameraSensors: Color image acquisition failed\n";
			getchar();
			return ipa_utils::RET_FAILED;
		}

		if (colorCamera->GetColorImage2(&Img2) == libCameraSensors::RET_FAILED)
		{
			std::cerr << "TestCameraSensors: Color image acquisition failed\n";
			getchar();
			return ipa_utils::RET_FAILED;
		}

		Img3 = cvCreateImage(cvGetSize(Img1),Img1->depth,Img1->nChannels);
		cvSub(Img1, Img2, Img3);
		cvShowImage("Img", Img3);

		cvReleaseImage(&Img1);
		cvReleaseImage(&Img2);
		cvReleaseImage(&Img3);
	}*/

	return ipa_utils::RET_OK;
}
int main()
{
	// Load the image we'll work on
	IplImage* img = cvLoadImage("C:\\goal_arena.jpg");
	CvSize imgSize = cvGetSize(img);

	// This will hold the white parts of the image
	IplImage* detected = cvCreateImage(imgSize, 8, 1);

	// These hold the three channels of the loaded image
	IplImage* imgBlue = cvCreateImage(imgSize, 8, 1);
	IplImage* imgGreen = cvCreateImage(imgSize, 8, 1);
	IplImage* imgRed = cvCreateImage(imgSize, 8, 1);
	cvSplit(img, imgBlue, imgGreen, imgRed, NULL);

	// Extract white parts into detected
	cvAnd(imgGreen, imgBlue, detected);
	cvAnd(detected, imgRed, detected);

	// Morphological opening
	cvErode(detected, detected);
	cvDilate(detected, detected);

	// Thresholding (I knew you wouldn't catch this one... so i wrote a comment here
	// I mean the command can be so decieving at times)
	cvThreshold(detected, detected, 100, 250, CV_THRESH_BINARY);

	// Do the hough thingy
	CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
	cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);
	
	// The two endpoints for each boundary line
	CvPoint left1 = cvPoint(0, 0);
	CvPoint left2 = cvPoint(0, 0);
	CvPoint right1 = cvPoint(0, 0);
	CvPoint right2 = cvPoint(0, 0);
	CvPoint top1 = cvPoint(0, 0);
	CvPoint top2 = cvPoint(0, 0);
	CvPoint bottom1 = cvPoint(0, 0);
	CvPoint bottom2 = cvPoint(0, 0);

	// Some numbers we're interested in
	int numLines = lines->rows;
	int numTop = 0;
	int numBottom = 0;
	int numLeft = 0;
	int numRight = 0;

	// Iterate through each line
	for(int i=0;i<numLines;i++)
	{
		// Get the parameters for the current line
		CvScalar dat = cvGet1D(lines, i);
		double rho = dat.val[0];
		double theta = dat.val[1];
		
		if(theta==0.0)
		{
			// This is an obviously vertical line... and we can't approximate it... NEXT
			continue;
		}

		// Convert from radians to degrees
		double degrees = theta*180/(3.1412);
		
		// Generate two points on this line (one at x=0 and one at x=image's width)
		CvPoint pt1 = cvPoint(0, rho/sin(theta));
		CvPoint pt2 = cvPoint(img->width, (-img->width/tan(theta)) + rho/sin(theta));
		
		if(abs(rho)<50)		// Top + left
		{
			if(degrees>45 && degrees<135)	// Top
			{
				numTop++;

				// The line is horizontal and near the top
				top1.x+=pt1.x;
				top1.y+=pt1.y;
			
				top2.x+=pt2.x;
				top2.y+=pt2.y;
			}
			else	// left
			{
				numLeft++;

				//The line is vertical and near the left
				left1.x+=pt1.x;
				left1.y+=pt1.y;
			
				left2.x+=pt2.x;
				left2.y+=pt2.y;
			}
		}
		else // bottom+right
		{
			if(degrees>45 && degrees<135)	// Bottom
			{
				numBottom++;

				//The line is horizontal and near the bottom
				bottom1.x+=pt1.x;
				bottom1.y+=pt1.y;
			
				bottom2.x+=pt2.x;
				bottom2.y+=pt2.y;
			}
			else	// Right
			{
				numRight++;

				// The line is vertical and near the right
				right1.x+=pt1.x;
				right1.y+=pt1.y;
				
				right2.x+=pt2.x;
				right2.y+=pt2.y;
			}
		}
	}

	// we've done the adding... now the dividing to get the "averaged" point
	left1.x/=numLeft;
	left1.y/=numLeft;
	left2.x/=numLeft;
	left2.y/=numLeft;

	right1.x/=numRight;
	right1.y/=numRight;
	right2.x/=numRight;
	right2.y/=numRight;

	top1.x/=numTop;
	top1.y/=numTop;
	top2.x/=numTop;
	top2.y/=numTop;

	bottom1.x/=numBottom;
	bottom1.y/=numBottom;
	bottom2.x/=numBottom;
	bottom2.y/=numBottom;

	// Render these lines onto the image
	cvLine(img, left1, left2, CV_RGB(255, 0,0), 1);
	cvLine(img, right1, right2, CV_RGB(255, 0,0), 1);
	cvLine(img, top1, top2, CV_RGB(255, 0,0), 1);
	cvLine(img, bottom1, bottom2, CV_RGB(255, 0,0), 1);

	// Next, we need to figure out the four intersection points
	double leftA = left2.y-left1.y;
	double leftB = left1.x-left2.x;
	double leftC = leftA*left1.x + leftB*left1.y;

	double rightA = right2.y-right1.y;
	double rightB = right1.x-right2.x;
	double rightC = rightA*right1.x + rightB*right1.y;

	double topA = top2.y-top1.y;
	double topB = top1.x-top2.x;
	double topC = topA*top1.x + topB*top1.y;

	double bottomA = bottom2.y-bottom1.y;
	double bottomB = bottom1.x-bottom2.x;
	double bottomC = bottomA*bottom1.x + bottomB*bottom1.y;

	// Intersection of left and top
	double detTopLeft = leftA*topB - leftB*topA;
	CvPoint ptTopLeft = cvPoint((topB*leftC - leftB*topC)/detTopLeft, (leftA*topC - topA*leftC)/detTopLeft);

	// Intersection of top and right
	double detTopRight = rightA*topB - rightB*topA;
	CvPoint ptTopRight = cvPoint((topB*rightC-rightB*topC)/detTopRight, (rightA*topC-topA*rightC)/detTopRight);

	// Intersection of right and bottom
	double detBottomRight = rightA*bottomB - rightB*bottomA;
	CvPoint ptBottomRight = cvPoint((bottomB*rightC-rightB*bottomC)/detBottomRight, (rightA*bottomC-bottomA*rightC)/detBottomRight);

	// Intersection of bottom and left
	double detBottomLeft = leftA*bottomB-leftB*bottomA;
	CvPoint ptBottomLeft = cvPoint((bottomB*leftC-leftB*bottomC)/detBottomLeft, (leftA*bottomC-bottomA*leftC)/detBottomLeft);

	// Render the points onto the image
	cvLine(img, ptTopLeft, ptTopLeft, CV_RGB(0,255,0), 5);
	cvLine(img, ptTopRight, ptTopRight, CV_RGB(0,255,0), 5);
	cvLine(img, ptBottomRight, ptBottomRight, CV_RGB(0,255,0), 5);
	cvLine(img, ptBottomLeft, ptBottomLeft, CV_RGB(0,255,0), 5);

	// Initialize a mask
	IplImage* imgMask = cvCreateImage(imgSize, 8, 3);
	cvZero(imgMask);

	// Generate the mask
	CvPoint* pts = new CvPoint[4];
	pts[0] = ptTopLeft;
	pts[1] = ptTopRight;
	pts[2] = ptBottomRight;
	pts[3] = ptBottomLeft;
	cvFillConvexPoly(imgMask, pts, 4, cvScalar(255,255,255));

	// Delete anything thats outside the mask
	cvAnd(img, imgMask, img);

	// Show all images in windows
	cvNamedWindow("Original");
	cvNamedWindow("Detected");

	cvShowImage("Original", img);
	cvShowImage("Detected", detected);

	cvWaitKey(0);

	return 0;
}
Exemple #23
0
int main(int argc, char* argv[])
{
	IplImage* src = 0;
	IplImage* dst = 0;
	IplImage* color_dst = 0;

	char* filename = argc >= 2 ? argv[1] : "Image0.jpg";
	//get gray image
	src = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE);

	if (!src){
		printf("[!] Error: cant load image: %s \n", filename);
		return -1;
	}

	printf("[i] image: %s\n", filename);


	//array for lines
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* lines = 0;
	int i, p, i1, i2, x, y;
	double angle1, angle2, phi1, phi2, tetta, b1, b2, k1, k2;
	//phi - starting(worst) result, angle - needed result
	phi1 = 0;
	phi2 = CV_PI / 2;
	angle1 = CV_PI / 2;
	angle2 = 0;

	dst = cvCreateImage(cvGetSize(src), 8, 1);
	color_dst = cvCreateImage(cvGetSize(src), 8, 3);

	//Canny edge detector
	cvCanny(src, dst, 50, 70, 3);

	//convert into color picture
	cvCvtColor(dst, color_dst, CV_GRAY2BGR);

	//finding lines
	lines = cvHoughLines2(dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI / 180, 100, 50, 10);

	//Finding needed lines (by angle between axis 0x and this line). For finding the point it's necessary to find 2 two out of 3 lines
	for (i = 0; i < lines->total; i++)
	{
		CvPoint* line = (CvPoint*)cvGetSeqElem(lines, i);
		//check for correct finding tg
		if (line[0].x >= line[1].x)
		{
			p = line[0].x;
			line[0].x = line[1].x;
			line[1].x = p;
			p = line[0].y;
			line[0].y = line[1].y;
			line[1].y = p;
		}
		tetta = atan((double)(line[1].y - line[0].y) / (double)(line[1].x - line[0].x));
		//first line
		if (abs(angle1 - abs(tetta)) < abs(angle1 - abs(phi1)))
		{
			phi1 = tetta;
			i1 = i;
		}
		//second line
		if (abs(angle2 - abs(tetta)) < abs(angle2 - abs(phi2)))
		{
			//abs(phi2-tetta) <= check threshold between old line and new one
			if ((abs(phi2-tetta)<=0.25) && (i != 0))
			{
				CvPoint* lastline = (CvPoint*)cvGetSeqElem(lines, i2);
				//needed lower of lines with similar angle
				if (lastline[0].y < line[0].y)
				{
					i2 = i;
					phi2 = tetta;
				}
			}
			else
			{
				i2 = i;
				phi2 = tetta;
			}
		}
	}

	CvPoint* line1 = (CvPoint*)cvGetSeqElem(lines, i1);
	CvPoint* line2 = (CvPoint*)cvGetSeqElem(lines, i2);

	//Building lines
	cvLine(color_dst, line1[0], line1[1], CV_RGB(255, 0, 0), 1, CV_AA, 0);
	cvLine(color_dst, line2[0], line2[1], CV_RGB(255, 0, 0), 1, CV_AA, 0);

	//Finding and building point
	k1 = tan(phi1);
	k2 = tan(phi2);
	b1 = line1[0].y - k1*line1[0].x;
	b2 = line2[0].y - k2*line2[0].x;
	x = (b2 - b1) / (k1 - k2);
	y = k1*x + b1;
	CvPoint* point = new CvPoint();
	point[0].x = x;
	point[1].x = x;
	point[0].y = y;
	point[1].y = y;
	cvLine(color_dst, point[0], point[1], CV_RGB(0, 255, 0), 5, CV_AA, 0);

	//Showing
	cvNamedWindow("Source", 1);
	cvShowImage("Source", src);

	cvNamedWindow("Point", 1);
	cvShowImage("Point", color_dst);

	//Waiting for key
	cvWaitKey(0);

	cvSaveImage("test.jpg", color_dst);

	//Releasing resources
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&src);
	cvReleaseImage(&dst);
	cvReleaseImage(&color_dst);
	cvDestroyAllWindows();
	return 0;
}
JNIEXPORT void JNICALL Java_com_stead_alistair_soundcoder_OpenCV_extractHoughFeature(
		JNIEnv* env, jobject thiz) {
	/* This code works perfectly on RGB but does not detect yellow because it assumes its too light
	  
	IplImage* grayImage = cvCreateImage(cvGetSize(pImage), IPL_DEPTH_8U, 1); 
	IplImage* cannyImage = cvCreateImage(cvGetSize(pImage), IPL_DEPTH_8U, 1); 
	cvCvtColor(pImage, grayImage, CV_BGR2GRAY); 
	cvCanny(grayImage, cannyImage, 20, 30);
	
	//Hough bit
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* lines = 0;
	lines = cvHoughLines2(cannyImage,
			      storage,
			      CV_HOUGH_PROBABILISTIC,
			      1,
			      CV_PI/180,
			      100,
			      100,
			      100);
	int y = 0;
	for(int i = 0; i < MIN(lines->total, 100); i++ )
        {
		y++;
            CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
            cvLine( pImage, line[0], line[1], CV_RGB(255,0,0), 3, 8 );
        }
	
	
	
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&grayImage);
	cvReleaseImage(&cannyImage);
	*********************************/
	
	
	IplImage* hsv;
	IplImage* h;
	IplImage* s;
	IplImage* v;
	
	hsv = cvCreateImage( cvGetSize(pImage), 8, 3 ) ;
	h = cvCreateImage( cvGetSize(pImage), 8, 1 );
	s = cvCreateImage( cvGetSize(pImage), 8, 1 );
	v = cvCreateImage( cvGetSize(pImage), 8, 1 ) ;
	
	
	// Convert from BGR to HSV
	cvCvtColor( pImage, hsv, CV_BGR2HSV);
	
	// Get individual planes
	cvSplit( hsv, h, s, v, 0 );
	
	//use canny on h and then hough
	IplImage* cannyImage = cvCreateImage(cvGetSize(pImage), IPL_DEPTH_8U, 1);
	cvThreshold(h,h,-50,200,CV_THRESH_BINARY);
	cvCanny(h, cannyImage, 20, 100);
	
	
	//cvMerge(cannyImage, s, v, 0, pImage);
	//cvCvtColor(pImage, pImage, CV_BGR2HSV);
	
	
	//Finally, get the Hough Lines
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* lines = 0;
	lines = cvHoughLines2(h,
			      storage,
			      CV_HOUGH_PROBABILISTIC,
			      1,
			      CV_PI/180,
			      1, //Threshold
			      100, //Minimum Line Length
			      5); //maximum gap between line segments lying on the same line to treat them as a single line segment
	LOGI("Number: %d", MIN(lines->total, 20));
	for(int i = 0; i < MIN(lines->total, 20); i++ )
        {
            CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
            cvLine( pImage, line[0], line[1], CV_RGB(255,0,0), 3, 8 );
        }
	cvReleaseMemStorage(&storage);
	//End hough
	
	
	cvReleaseImage(&cannyImage);
	cvReleaseImage(&hsv);
	cvReleaseImage(&h);
	cvReleaseImage(&s);
	cvReleaseImage(&v);
	
}
Exemple #25
0
static void process_image_common(IplImage *frame)
{
  CvFont font;
  cvInitFont(&font, CV_FONT_VECTOR0, 0.25f, 0.25f);

  CvSize video_size;
#if defined(USE_POSIX_SHARED_MEMORY)
  video_size.height = *shrd_ptr_height;
  video_size.width  = *shrd_ptr_width;
#else
  // XXX These parameters should be set ROS parameters
  video_size.height = frame->height;
  video_size.width  = frame->width;
#endif
  CvSize    frame_size = cvSize(video_size.width, video_size.height/2);
  IplImage *temp_frame = cvCreateImage(frame_size, IPL_DEPTH_8U, 3);
  IplImage *gray       = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *edges      = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *half_frame = cvCreateImage(cvSize(video_size.width/2, video_size.height/2), IPL_DEPTH_8U, 3);

  CvMemStorage *houghStorage = cvCreateMemStorage(0);

  cvPyrDown(frame, half_frame, CV_GAUSSIAN_5x5); // Reduce the image by 2

  /* we're intersted only in road below horizont - so crop top image portion off */
  crop(frame, temp_frame, cvRect(0, frame_size.height, frame_size.width, frame_size.height));
  cvCvtColor(temp_frame, gray, CV_BGR2GRAY); // contert to grayscale

  /* Perform a Gaussian blur & detect edges */
  // smoothing image more strong than original program
  cvSmooth(gray, gray, CV_GAUSSIAN, 15, 15);
  cvCanny(gray, edges, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);

  /* do Hough transform to find lanes */
  double rho = 1;
  double theta = CV_PI/180;
  CvSeq *lines = cvHoughLines2(edges, houghStorage, CV_HOUGH_PROBABILISTIC,
                               rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);

  processLanes(lines, edges, temp_frame, frame);

#ifdef SHOW_DETAIL
  /* show middle line */
  cvLine(temp_frame, cvPoint(frame_size.width/2, 0),
         cvPoint(frame_size.width/2, frame_size.height), CV_RGB(255, 255, 0), 1);

  // cvShowImage("Gray", gray);
  // cvShowImage("Edges", edges);
  // cvShowImage("Color", temp_frame);
  // cvShowImage("temp_frame", temp_frame);
  // cvShowImage("frame", frame);
#endif

#if defined(USE_POSIX_SHARED_MEMORY)
  setImage_toSHM(frame);
#endif

#ifdef SHOW_DETAIL
  // cvMoveWindow("Gray", 0, 0);
  // cvMoveWindow("Edges", 0, frame_size.height+25);
  // cvMoveWindow("Color", 0, 2*(frame_size.height+25));
#endif

  cvReleaseMemStorage(&houghStorage);
  cvReleaseImage(&gray);
  cvReleaseImage(&edges);
  cvReleaseImage(&temp_frame);
  cvReleaseImage(&half_frame);
}
Exemple #26
0
//--------------------------------------------------------------
void testApp::update(){
	

    bool bNewFrame = false;

	#ifdef _USE_LIVE_VIDEO
       vidGrabber.grabFrame();
	   bNewFrame = vidGrabber.isFrameNew();
    #else
        vidPlayer.idleMovie();
        bNewFrame = vidPlayer.isFrameNew();
	#endif

	if (bNewFrame){

		#ifdef _USE_LIVE_VIDEO
            colorImg.setFromPixels(vidGrabber.getPixels(), cw,ch);
	    #else
            colorImg.setFromPixels(vidPlayer.getPixels(), cw,ch);
        #endif

		
		kx = (float) ofGetWidth()  / cw;
		ky = (float) ofGetHeight() / ch;
		
		cvSmooth(colorImg.getCvImage(), medianImg.getCvImage(), CV_MEDIAN, medianValue, medianValue);
		medianImg.flagImageChanged();
				
		grayImage = medianImg;
		
		cvCvtColor(colorImg.getCvImage(), hsvImage.getCvImage(), CV_RGB2HSV);
		hsvImage.flagImageChanged();
		
		cvSetImageCOI(hsvImage.getCvImage(), 2);
		cvCopy(hsvImage.getCvImage(), satImage.getCvImage());
		satImage.flagImageChanged();
		cvSetImageCOI(hsvImage.getCvImage(), 0);
		
		//cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_BLUR, 3, 3, 0, 0);
		
		cvAdaptiveThreshold(grayImage.getCvImage(), trsImage.getCvImage(), 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, adaptiveThreshValue);
		//cvCanny(trsImage.getCvImage(), trsImage.getCvImage(), sb, sb*4, 3);
		trsImage.flagImageChanged();
		
		
//		cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_MEDIAN, 7, 7);
		
//		cvSmooth( iplImage, iplImage, CV_BLUR, br, br, 0, 0 );
//		cvSmooth( iplImage, iplImage, CV_MEDIAN, 7, 7);
		cvCanny(  grayImage.getCvImage(), cannyImage.getCvImage(), cannyThresh1Value, cannyThresh2Value, cannyApertureValue);
		cannyImage.flagImageChanged();
			
		//cvPyrMeanShiftFiltering(colorImg.getCvImage(), colorImg.getCvImage(), 20, 40, 2);
		
		if (mode==MODE_DRAWING) {

			if (draw_edges) {

				#if PROBABILISTIC_LINE
					lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, lineThreshValue, lineMinLengthValue, lineMaxGapValue );
				#else
					lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );
				#endif
			
			}
			
			if (draw_contours || draw_approx) {
				cvFindContours(cannyImage.getCvImage(), edgesStorage, &edgeContours);
				
				CvSeq* contour = edgeContours;
				while (contour!=NULL) {
					for (int j = 0; j < contour->total; j++){
						CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						p1->x = p1->x*(float)kx;
						p1->y = p1->y*(float)ky;
					}
					contour = contour->h_next;
				}
				
			}

			if (draw_fills) {
				cvFindContours(trsImage.getCvImage(), fillsStorage, &fillContours);
				
				CvSeq* contour = fillContours;
				while (contour!=NULL) {
					for (int j = 0; j < contour->total; j++){
						CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						p1->x = p1->x*(float)kx;
						p1->y = p1->y*(float)ky;
					}
					contour = contour->h_next;
				}
			}
		}

	}
	
	
	// update scope
//	float* rand = new float[50];
//	for(int i=0 ;i<50; i++){
//		rand[i] = ofRandom(-1.0,1);
//		
//	}
//	
//	gui->update(scope, kofxGui_Set_FloatArray, rand, sizeof(float*));
//	
//	// make 3 seconds loop
//	float f = ((ofGetElapsedTimeMillis()%3000) / 3000.0);
//	gui->update(points, kofxGui_Set_Float, &f, sizeof(float));

}
void OrchardDetector::processLaserScan(

const sensor_msgs::LaserScan::ConstPtr& laser_scan) {
	float rthetamean = 0, rrhomean = 0, lthetamean = 0, lrhomean = 0,
			theta = 0, rho = 0;
	double x0 = 0, y0 = 0, a, b;
	int lmc = 0, rmc = 0;

	static int count = 0;

	clearRawImage();

	sensor_msgs::PointCloud cloud;
	projector_.projectLaser(*laser_scan, cloud);

	int size = cloud.points.size();

	for (int i = 0; i < size; i++) {
		if (abs(cloud.points[i].y) < 1.5 && abs(cloud.points[i].y) > 0.5 &&cloud.points[i].y < 0 && cloud.points[i].x > -1 && cloud.points[i].x < 3) {
			point1.x = ((int)(cloud.points[i].x * 50) + 300);
			point1.y = ((int)(cloud.points[i].y * 50) + 300);
			point2.x = point1.x - 4;
			point2.y = point1.y;
			cvLine(rawData_, point1, point2, CV_RGB(255,255,255), 2, CV_AA, 0);
		}
	}

	cvCvtColor(rawData_, workingImg_, CV_BGR2GRAY);
	//cvThreshold(workingImg_,workingImg_,)

	cvThreshold(workingImg_,workingImg_, 100, 255, CV_THRESH_BINARY);

	CvMemStorage* stor = cvCreateMemStorage(0);
	CvSeq* cont = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq),
			sizeof(CvPoint), stor);

	// find external contours
	cvFindContours(workingImg_, stor, &cont, sizeof(CvContour),
			CV_RETR_EXTERNAL, 2, cvPoint(0, 0));

	for (; cont; cont = cont->h_next) {
		// size of pointArray and polygon
		int point_cnt = cont->total;

		// no small contours
		if (point_cnt > 20) {

			CvPoint* PointArray = (CvPoint*) malloc(point_cnt * sizeof(CvPoint));

			// Get contour point set.
			cvCvtSeqToArray(cont, PointArray, CV_WHOLE_SEQ);

			for (int i = 0; i <= point_cnt; i++) {
				// Show the Pixelcoordinates
				// have some fun with the color
				cvLine(rawData_, PointArray[i % point_cnt], PointArray[(i + 1) % point_cnt], cvScalar(0, 0, 0), 10);
				cvLine(workingImg_, PointArray[i % point_cnt], PointArray[(i + 1) % point_cnt], cvScalar(0, 0, 0), 50);
			}

			continue;
		}

		// Allocate memory for contour point set.
		CvPoint* PointArray = (CvPoint*) malloc(point_cnt * sizeof(CvPoint));

		// Get contour point set.
		cvCvtSeqToArray(cont, PointArray, CV_WHOLE_SEQ);

		for (int i = 0; i <= point_cnt; i++) {
			// Show the Pixelcoordinates
			//cout << PointArray[i].x << " " << PointArray[i].y << endl;
			// have some fun with the color
			int h_value = int(i * 3.0 / point_cnt * i) % 100;
			cvLine(rawData_, PointArray[i % point_cnt], PointArray[(i + 1) % point_cnt], cvScalar(0, 255, 0), 4);
		}

	}

	//cvDilate(workingImg_,workingImg_, 0, 3);
	//cvErode(workingImg_,workingImg_, 0, 3);

	//cvShowImage("Wporking", workingImg_);
	cvWaitKey(10);

	lines_ = cvHoughLines2(workingImg_, storage_, CV_HOUGH_STANDARD, 1, CV_PI / 180, 15, 0, 0);

	//cvHoughLines2(edgesImage, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/360, 30, 10, MAXIMUM_GAP);


	for (int i = 0; i < MIN(lines_->total,15); i++) {


		float* line = (float*) cvGetSeqElem(lines_, i);
		rho = line[0];
		theta = line[1];

		a = cos(theta);
		b = sin(theta);
		x0 = a * rho;
		y0 = b * rho;
		point1.x = cvRound(x0 + 600 * (-b));
		point1.y = cvRound(y0 + 600 * (a));
		point2.x = cvRound(x0 - 600 * (-b));
		point2.y = cvRound(y0 - 600 * (a));
		point3.x = 300, point3.y = 300;
		point4.x = 300, point4.y = 600;
		point5.x = 300, point5.y = 0;

		cvLine(rawData_, point3, point4, CV_RGB(0,0,255), 1, CV_AA, 0);
		cvLine(rawData_, point3, point5, CV_RGB(0,0,255), 1, CV_AA, 0);

		if (intersect(point1, point2, point3, point4)) {
			{
				if(abs(left_angle_ -( (theta) - CV_PI / 2)) < 0.2){
					rrhomean += rho;
					rthetamean += theta;
					rmc++;
					cvLine(workingImg_, point1, point2, CV_RGB(0,0,255), 1, CV_AA, 0);
				}
			}
		} else if (intersect(point1, point2, point3, point5)) {
			{
				if(abs(right_angle_ -( (theta) - CV_PI / 2)) < 0.5){
					lrhomean += rho;
					lthetamean += theta;
					lmc++;
					cvLine(workingImg_, point1, point2, CV_RGB(255,255,255), 1,CV_AA, 0);
				}
			}
		}
	}
	if(lmc > 5){

		theta = lthetamean / lmc;
		rho = lrhomean / lmc;

		a = cos(theta);
		b = sin(theta);
		x0 = a * rho;
		y0 = b * rho;
		point1.x = cvRound(x0 + 600 * (-b)), point1.y = cvRound(y0 + 600 * (a));
		point2.x = cvRound(x0 - 600 * (-b)), point2.y = cvRound(y0 - 600 * (a));

		cvLine(rawData_, point1, point2, CV_RGB(255,0,0), 3, CV_AA, 0);

		point4.x = 300;
		point4.y = 300;
		point5.x = point4.x + 200 * sin(CV_PI - (theta - CV_PI / 2));
		point5.y = point4.y + 200 * cos(CV_PI - (theta - CV_PI / 2));

		cvLine(rawData_, point5, point4, CV_RGB(255,255,255), 1, CV_AA, 0);

		rows_.header.stamp = ros::Time::now();
		rows_.leftvalid = false;
		rows_.rightvalid = false;

		if (intersect(point1, point2, point4, point5)) {
			right_distance_ = sqrt(((intersection_.y - 300) * (intersection_.y
					- 300)) + ((intersection_.x - 300) * (intersection_.x - 300)))
					* 2;

			right_angle_ = (theta) - CV_PI / 2;
			count++;

			rolling_mean_[count % 100][0] = right_angle_;
			right_angle_ = 0;
			for (int i = 0; i < 100; i++) {
				right_angle_ += rolling_mean_[i][0];
			}
			right_angle_ = right_angle_ / 100;

			rolling_mean_[count % 50][1] = right_distance_;
			right_distance_ = 0;
			for (int i = 0; i < 50; i++) {
				right_distance_ += rolling_mean_[i][1];
			}
			right_distance_ = right_distance_ / 50;


			inrow_cnt++;
			if(inrow_cnt > 10)
				inrow_cnt = 10;

			/*

			ROS_INFO("angle: %f",right_angle_);
			//cvLine(rawData_, point1, point2, CV_RGB(0,255,0), 1, CV_AA, 0);
	 */
			geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(
					right_angle_);

			marker_r.header.frame_id = "/laser_link";
			marker_r.header.stamp = ros::Time::now();

			marker_r.ns = "basic_shapes";
			marker_r.id = 0;

			// Set the marker type.  Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
			marker_r.type = visualization_msgs::Marker::CUBE;

			// Set the marker action.  Options are ADD and DELETE
			marker_r.action = visualization_msgs::Marker::ADD;

			// Set the pose of the marker.  This is a full 6DOF pose relative to the frame/time specified in the header
			marker_r.pose.position.x = 0;
			marker_r.pose.position.y = -((float) right_distance_) / 100;
			marker_r.pose.position.z = 0;
			marker_r.pose.orientation = pose_quat;

			// Set the scale of the marker -- 1x1x1 here means 1m on a side
			marker_r.scale.x = 10.0;
			marker_r.scale.y = 0.1;
			marker_r.scale.z = 0.5;

			// Set the color -- be sure to set alpha to something non-zero!
			marker_r.color.r = 0.0f;
			marker_r.color.g = 1.0f;
			marker_r.color.b = 0.0f;
			marker_r.color.a = 0.5;

			marker_r.lifetime = ros::Duration(.5);

			// Publish the marker
			marker_r_pub.publish(marker_r);

			rows_.rightvalid = true;
			rows_.rightdistance = ((float) right_distance_) / 100;
			rows_.rightangle = right_angle_;

		} else {

			inrow_cnt--;
			inrow_cnt--;
			if(inrow_cnt < -5){
				inrow_cnt = -5;
			}
					/*
			left_distance_ = -1;
			left_angle_ = 6000;

			rows_.rightdistance = ((float) left_distance_) / 100;
			rows_.rightangle = theta - CV_PI / 2;
			*/
			//	printf("\nDistance from right:%dmm",hough_lines.right_distance);
			//sprintf(textright, "Distance from right: %dmm, angle: %d",hough_lines.right_distance, hough_lines.right_angle);
		}

	}else{

		ROS_INFO_THROTTLE(1,"lmc = %d", lmc);

		inrow_cnt--;
		inrow_cnt--;
		if(inrow_cnt < -5){
			inrow_cnt = -5;
		}
	}
	CvFont font;
	cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, CV_AA);


	if(inrow_cnt > 0){
		twist_msg_.twist.angular.z = -((right_angle_*1.5) - (((float)right_distance_/100) - 1.2) );
		cvPutText(rawData_, "INROW-NAVIGATION", cvPoint(10, 130), &font, cvScalar(255, 255, 255, 0));
		cvLine(rawData_, cvPoint(10, 140), cvPoint(10 + abs(inrow_cnt * 20),140),CV_RGB(0,255,0), 3, CV_AA, 0);
		rows_.headland = false;
	}else{
		twist_msg_.twist.angular.z = M_PI/4;
		cvPutText(rawData_, "HEADLAND", cvPoint(10, 130), &font, cvScalar(255, 255, 255, 0));
		cvLine(rawData_, cvPoint(10, 140), cvPoint(10 + abs(inrow_cnt * 20), 140 ),CV_RGB(255,0,0), 3, CV_AA, 0);
		rows_.headland = true;
	}
	twist_pub.publish(twist_msg_);

	cvLine(rawData_, cvPoint(0, 300 + 150), cvPoint(600, 300 + 150),CV_RGB(255,255,255), 1, CV_AA, 0);
	cvLine(rawData_, cvPoint(0, 300 - 150), cvPoint(600, 300 - 150),CV_RGB(255,255,255), 1, CV_AA, 0);
	rows_.header.stamp = ros::Time::now();

	row_pub.publish(rows_);

	cvShowImage("TEST", rawData_);
	cvWaitKey(10);
	//pc_pub.publish(cloud);

}
void BoatDetector::houghline(IplImage* edgeImage, IplImage* image, IplImage* lineImage) {

	//validation
	int points = 50; // points per row
	int rows = 3; // number of rows
	int ver_dist = 10; // vertical distance between points
	int hor_dist = image->width / points; // horizontal distance between points

	cvCopy(edgeImage, lineImage);

	CvSeq* hough_lines = 0;

	CvScalar line_color = cvScalar(120);

	hough_lines = cvHoughLines2( edgeImage, hough_storage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );


	if(hough_lines->total == 0) {
		return;
	}

	bool find = false;

	CvPoint pt1, pt2;
	float* line;
	float theta;
	float rho;
	double a, b, x0, y0;
	for( int i = 0; i < min(hough_lines->total, 100); i++ )
	{
		line = (float*)cvGetSeqElem(hough_lines, i);
		theta = line[1];
		if(theta < 1.50 || theta > 1.60) {
			continue;
		}
		rho = line[0];

		a = cos(theta);
		b = sin(theta);
		x0 = a*rho;
		y0 = b*rho;
		pt1.x = cvRound(x0 + 1000*(-b));
		pt1.y = cvRound(y0 + 1000*(a));
		pt2.x = cvRound(x0 - 1000*(-b));
		pt2.y = cvRound(y0 - 1000*(a));		

		cvLine( lineImage, pt1, pt2, line_color, 2, CV_AA, 0 );
		find = true;
	}
	if(!find) {
		return;
	}

	bool run = true;
	int search_limit = lineImage->height - (ver_dist * rows);
	int line_step = lineImage->widthStep/sizeof(char);
	int img_step = image->widthStep/sizeof(char);
	int max_left, max_right;
	int tmp_limit;
	double count;
	while(run) {
		max_left = 0;
		max_right = 0;
		for(int i = ver_dist * rows; i < search_limit; i++) {
			if(((uchar)lineImage->imageData[i*line_step+3]) == line_color.val[0]) {
				if(i > max_left) {
					max_left = i;
				}
			}
			if(((uchar)lineImage->imageData[i*line_step + (lineImage->width-3)]) == line_color.val[0]) {
				if(i > max_right) {
					max_right = i;
				}
			}		
		}
		if(max_left == 0 || max_right == 0) {
			run = false;
			continue;
		}

		tmp_limit = (max_left + max_right) / 2;

		//limit validation
		count = 0;
		if(abs(max_left - max_right) < 10) {

			for(int i = tmp_limit - (ver_dist * rows), k = 0, t = rows*2; k < rows; i+=ver_dist, k++, t-=2) {
				for(int j = hor_dist; j < image->width; j+=hor_dist) {
					if(abs(image->imageData[i*img_step + j] -
						image->imageData[(i+t*ver_dist)*img_step + j] ) > 10 )
					{
						count++;
					}
				}
			}
		}
		if((count / (points * rows)) > 0.9 ) {

			sea_limit = tmp_limit;

			/*
			IplImage* ao = cvCloneImage(image);
			for(int i = tmp_limit - (ver_dist * rows), k = 0, t = rows*2; k < rows; i+=ver_dist, k++, t-=2) {
				for(int j = hor_dist; j < image->width; j+=hor_dist) {
					if(abs(image->imageData[i*img_step + j] -
							image->imageData[(i+t*ver_dist)*img_step + j] ) > 10 )
					{
						cvLine(ao, cvPoint(j,i), cvPoint(j,i), CV_RGB(0,0,0), 3);
						cvLine(ao, cvPoint(j,i+t*ver_dist), cvPoint(j,i+t*ver_dist), CV_RGB(255,255,255), 3);
					}
				}
			}

			cvShowImage("ao",ao);
			cvWaitKey(0);
			*/

			run = false;
		}
		else {
			search_limit = max(max_left, max_right);
		}
	}
}