// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4(IplImage* imgSrc, CvMemStorage* storage)
{
	CvSeq* contours;
	int i, c, l, N = 11;
	CvSize sz = cvSize(imgSrc->width & -2, imgSrc->height & -2);

	IplImage* imgTmp = cvCloneImage(imgSrc); // make a copy of input image
	IplImage* imgGray = cvCreateImage(sz, 8, 1);
	IplImage* imgPyr = cvCreateImage(cvSize(sz.width / 2, sz.height / 2), 8, 3);
	IplImage* imgGrayTmp;
	CvSeq* result;
	double s, t;

	// create empty sequence that will contain points -
	// 4 points per square (the square's vertices)
	CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);

	// select the maximum ROI in the image
	// with the width and height divisible by 2
	cvSetImageROI(imgTmp, cvRect(0, 0, sz.width, sz.height));

	// down-scale and up-scale the image to filter out the noise
	cvPyrDown(imgTmp, imgPyr, 7);
	cvPyrUp(imgPyr, imgTmp, 7);
	imgGrayTmp = cvCreateImage(sz, 8, 1);

	// find squares in every color plane of the image
	for (c = 0; c < 3; c++)
	{
		// extract the c-th color plane
		cvSetImageCOI(imgTmp, c + 1);
		cvCopy(imgTmp, imgGrayTmp, 0);

		// try several threshold levels
		for (l = 0; l < N; l++)
		{
			// hack: use Canny instead of zero threshold level.
			// Canny helps to catch squares with gradient shading
			if (l == 0)
			{
				// apply Canny. Take the upper threshold from slider
				// and set the lower to 0 (which forces edges merging)
				cvCanny(imgGrayTmp, imgGray, 0, gThresh, 5);

				// dilate canny output to remove potential
				// holes between edge segments
				cvDilate(imgGray, imgGray, 0, 1);
			}
			else
			{
				// apply threshold if l!=0:
				//     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
				cvThreshold(imgGrayTmp, imgGray, (l + 1) * 255 / N, 255, CV_THRESH_BINARY);
			}

			// find contours and store them all as a list
			cvFindContours(imgGray, storage, &contours, sizeof(CvContour),
					CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

			// test each contour
			while (contours)
			{
				// approximate contour with accuracy proportional
				// to the contour perimeter
				result = cvApproxPoly(contours, sizeof(CvContour), storage,
						CV_POLY_APPROX_DP, cvContourPerimeter(contours) * 0.02, 0);

				// square contours should have 4 vertices after approximation
				// relatively large area (to filter out noisy contours)
				// and be convex.
				// Note: absolute value of an area is used because
				// area may be positive or negative - in accordance with the
				// contour orientation
				if (result->total == 4 &&
						fabs(cvContourArea(result, CV_WHOLE_SEQ)) > 1000 &&
						cvCheckContourConvexity(result))
				{
					s = 0;

					for (i = 0; i < 5; i++)
					{
						// find minimum angle between joint edges (maximum of cosine)
						if (i >= 2)
						{
							t = fabs(angle(
									(CvPoint*) cvGetSeqElem(result, i),
									(CvPoint*) cvGetSeqElem(result, i - 2),
									(CvPoint*) cvGetSeqElem(result, i - 1)));
							s = s > t ? s : t;
						}
					}

					// if cosines of all angles are small
					// (all angles are ~90 degree) then write quad range
					// vertices to resultant sequence
					if (s < 0.3)
						for (i = 0; i < 4; i++)
							cvSeqPush(squares, (CvPoint*) cvGetSeqElem(result, i));
				}

				// take the next contour
				contours = contours->h_next;
			}
		}
	}

	// release all the temporary images
	cvReleaseImage(&imgGray);
	cvReleaseImage(&imgPyr);
	cvReleaseImage(&imgGrayTmp);
	cvReleaseImage(&imgTmp);

	return squares;
}
Example #2
0
///////////////////////////////////////////////////////////////////////////////////////////
//void cvconnectedComponents(IplImage *mask, int poly1_hull0, float perimScale, int *num, CvRect *bbs, CvPoint *centers)
// This cleans up the forground segmentation mask derived from calls to cvbackgroundDiff
//
// mask			Is a grayscale (8 bit depth) "raw" mask image which will be cleaned up
//
// OPTIONAL PARAMETERS:
// poly1_hull0	If set, approximate connected component by (DEFAULT) polygon, or else convex hull (0)
// perimScale 	Len = image (width+height)/perimScale.  If contour len < this, delete that contour (DEFAULT: 4)
// num			Maximum number of rectangles and/or centers to return, on return, will contain number filled (DEFAULT: NULL)
// bbs			Pointer to bounding box rectangle vector of length num.  (DEFAULT SETTING: NULL)
// centers		Pointer to contour centers vectore of length num (DEFULT: NULL)
//
void cvconnectedComponents(IplImage *mask, int poly1_hull0, 
                           float perimScale, int *num, 
                           CvRect *bbs, CvPoint *centers)
{
static CvMemStorage*   mem_storage = NULL;
static CvSeq*          contours    = NULL;
//CLEAN UP RAW MASK
   cvMorphologyEx( mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR );
   cvMorphologyEx( mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR );

//FIND CONTOURS AROUND ONLY BIGGER REGIONS
   if( mem_storage==NULL ) mem_storage = cvCreateMemStorage(0);
    else cvClearMemStorage(mem_storage);
   
   CvContourScanner scanner = cvStartFindContours( mask, 
                   mem_storage, sizeof(CvContour), 
                   CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
   CvSeq* c;
   int numCont = 0;
   while( (c = cvFindNextContour( scanner )) != NULL ) 
   {
      double len = cvContourPerimeter( c );
      //calculate perimeter len threshold:
      double q = (mask->height + mask->width)/perimScale;
      //Get rid of blob if it's perimeter is too small:
      if( len < q ) 
      {
         cvSubstituteContour( scanner, NULL );
      }
      else //Smooth it's edges if it's large enough
      {
         CvSeq* c_new;
         if(poly1_hull0) //Polygonal approximation  
               c_new = cvApproxPoly( c, 
                          sizeof(CvContour), mem_storage, 
                          CV_POLY_APPROX_DP, 
                          CVCONTOUR_APPROX_LEVEL, 0); 
         else            //Convex Hull of the segmentation
            c_new = cvConvexHull2( c, mem_storage,
                                   CV_CLOCKWISE, 1);
            cvSubstituteContour( scanner, c_new );
         numCont++;
        }
   }
   contours = cvEndFindContours( &scanner );

// PAINT THE FOUND REGIONS BACK INTO THE IMAGE
   cvZero( mask );
   IplImage *maskTemp;
   //CALC CENTER OF MASS AND OR BOUNDING RECTANGLES
   if(num != NULL) //User wants to collect statitics
   {
      int N = *num, numFilled = 0, i=0;
      CvMoments moments;
      double M00, M01, M10;
      maskTemp = cvCloneImage(mask);
      for(i=0, c=contours; c != NULL; c = c->h_next,i++ ) 
      {
         if(i < N) //Only process up to *num of them
         {   
            cvDrawContours( maskTemp, c, CV_CVX_WHITE, 
                            CV_CVX_WHITE,-1, CV_FILLED, 8);
            //Find the center of each contour 
            if(centers != NULL)
            {
               cvMoments(maskTemp,&moments,1);
               M00 = cvGetSpatialMoment(&moments,0,0);
               M10 = cvGetSpatialMoment(&moments,1,0);
               M01 = cvGetSpatialMoment(&moments,0,1);
               centers[i].x = (int)(M10/M00);
               centers[i].y = (int)(M01/M00);
            }
            //Bounding rectangles around blobs
            if(bbs != NULL)
            {
               bbs[i] = cvBoundingRect(c);
            }
            cvZero(maskTemp);
            numFilled++;
         }
         //Draw filled contours into mask
         cvDrawContours( mask, c, CV_CVX_WHITE, CV_CVX_WHITE, 
                         -1, CV_FILLED, 8); //draw to central mask
      } //end looping over contours   
      *num = numFilled;
      cvReleaseImage( &maskTemp);
   }
   //ELSE JUST DRAW PROCESSED CONTOURS INTO THE MASK
   else //User doesn’t want statitics, just draw the contours
   {
      for( c=contours; c != NULL; c = c->h_next ) 
      {
         cvDrawContours( mask, c, CV_CVX_WHITE, CV_CVX_BLACK, 
                         -1,CV_FILLED, 8);
      } 
   }
}
Example #3
0
int main(){
  
////////////////////GAME PROPERTIES///////////////////////////////////

//window properties
	sf::RenderWindow pong(sf::VideoMode(RENDERWIDTH, RENDERHEIGHT, 32), "GameName");
	pong.setMouseCursorVisible(false);
	pong.setFramerateLimit(60);

	//music
	sf::Music bgm;
	bgm.openFromFile("musica.wav");
	bgm.setPitch(1.5);
	bgm.setLoop(true);
	bgm.play();

	//sound
	sf::SoundBuffer buffer1;
	buffer1.loadFromFile("/usr/lib/libreoffice/share/gallery/sounds/beam.wav");
	sf::Sound bounce;
	bounce.setBuffer(buffer1);
	sf::SoundBuffer buffer2;
	buffer2.loadFromFile("/usr/lib/libreoffice/share/gallery/sounds/beam2.wav");
	sf::Sound point;
	point.setBuffer(buffer2);	
	sf::SoundBuffer buffer3;
	buffer3.loadFromFile("/usr/lib/libreoffice/share/gallery/sounds/beam.wav"); //perfecti hit!!!!!
	sf::Sound perfecthit;
	perfecthit.setBuffer(buffer3);

	//player 1 properties
	int p1Len = 80;
	sf::RectangleShape player1(sf::Vector2f(15, p1Len));
	player1.setFillColor(sf::Color(0, 0, 255));
	player1.setPosition(0, RENDERHEIGHT / 2 - player1.getSize().y / 2);
	int player1Score = 0;
	

	//player 2 properties
	int p2Len = 80;
	sf::RectangleShape player2(sf::Vector2f(15, p2Len));
	player2.setFillColor(sf::Color(0, 255, 0));
	player2.setPosition(RENDERWIDTH - player2.getSize().x, RENDERHEIGHT / 2 - player2.getSize().y / 2);
	int player2Score = 0;
	

	//ball properties
	sf::CircleShape ball(7, 25);
	ball.setFillColor(sf::Color(255,255,255));
	ball.setPosition(RENDERWIDTH / 2 - ball.getRadius(), RENDERHEIGHT / 2 - ball.getRadius());
	float BALLSPEED = 2;
	float ballVelX = -BALLSPEED, ballVelY = -BALLSPEED;
	float ballX = RENDERWIDTH / 2 - ball.getRadius(), ballY = RENDERHEIGHT / 2 - ball.getRadius();
	float ballDiameter = ball.getRadius() * 2;
	
	sf::Font font;

	font.loadFromFile("/usr/share/cups/fonts/FreeMonoOblique.ttf");

	font.loadFromFile("/usr/share/fonts/truetype/ttf-liberation/LiberationSerif-Bold.ttf");

	sf::Text score1("0", font, 80);
	score1.setPosition(RENDERWIDTH / 4, 0);
	sf::Text score2("0", font, 80);
	score2.setPosition(3 * RENDERWIDTH / 4, 0);
	
///////////////FINISH PROPERTIES//////////////////////////////////////////////////////



      CvCapture* capture =0;       
      capture = cvCaptureFromCAM(1);
      if(!capture){
printf("Capture failure\n");
return -1;
      }
      
      IplImage* frame=0;
      frame = cvQueryFrame(capture);           
      if(!frame) return -1;
  
     //create a blank image and assigned to 'imgTracking' which has the same size of original video
     imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);

     cvZero(imgTracking); //covert the image, 'imgTracking' to black

     cvNamedWindow("Video");     
     cvNamedWindow("Ball");

      //iterate through each frames of the video     
      while(true){

            frame = cvQueryFrame(capture);           
            if(!frame) break;
            frame=cvCloneImage(frame); 
            
           cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel

IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); 

            cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV

IplImage* imgThresh = GetThresholdedImage(imgHSV, 94, 169, 127, 143, 251, 229);
	    
	     cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel
	     
	   player100 =  trackObject(imgThresh, 255, 0, 0, 1);

	     
          	
IplImage* imgHSV2 = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); 
	    
	      cvCvtColor(frame, imgHSV2, CV_BGR2HSV);

IplImage* imgThresh2 = GetThresholdedImage(imgHSV2, 22, 64, 152, 50, 134, 256);

 		cvSmooth(imgThresh2, imgThresh2, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel

		player200 = trackObject(imgThresh2, 0, 255, 0, 2);

            // Add the tracking image and the frame

          cvAdd(frame, imgTracking, frame);

          cvShowImage("Ball", imgThresh); 
	  cvShowImage("Ball2", imgThresh2);            
          cvShowImage("Video", frame);
           
           //Clean up used images
           cvReleaseImage(&imgHSV);
	   cvReleaseImage(&imgHSV2);
           cvReleaseImage(&imgThresh);
	   cvReleaseImage(&imgThresh2);             
           cvReleaseImage(&frame);

            //Wait 10mS
            int c = cvWaitKey(10);
            //If 'ESC' is pressed, break the loop
            if((char)c==27 ) break;  


//////////////////////////////Game////////////////////////////////////

//player 1 movement
		
		if(player100==1){
			
			player1.move(0, -10);}

		else if(player100==-1){

			player1.move(0, 10);

		}
		//MOVIMIENTO GOLPE PLAYER1

		if(player100==0){ player1.move(0,0);}
	
		if (player1.getPosition().x <= 10){
		if(player100==2){
			
			player1.move(10, 0);
			
			
		}}

		//player 2 movement

		if(player200==1)
			player2.move(0, -10);
		else if(player200==-1)
			player2.move(0, 10);
		
		if(player200==0){ player2.move(0,0);}

		//MOVIMIENTO GOLPE PLAYER2
			
		if (player2.getPosition().x >= RENDERWIDTH-player2.getSize().x-10){
			  
		if(player200==-2){
			
			player2.move(-10, 0);
			
			
		}}

		//player 1 and wall collision
		if(player1.getPosition().y <= 0)
			player1.setPosition(0, 0);
		if(player1.getPosition().y >= RENDERHEIGHT - player1.getSize().y)
			player1.setPosition(0, RENDERHEIGHT - player1.getSize().y);

		//PLAYER1 AND WALL BACK COLLISION
		if(player1.getPosition().x != 0)
			player1.move(-1,0);
		
		//PLAYER2 AND WALL BACK COLLISION
		if(player2.getPosition().x != RENDERWIDTH-player2.getSize().x)
			player2.move(1,0);
		
		//player 2 and wall collision
		if(player2.getPosition().y <= 0)
			player2.setPosition(RENDERWIDTH - player2.getSize().x, 0);
		if(player2.getPosition().y >= RENDERHEIGHT - player2.getSize().y)
			player2.setPosition(RENDERWIDTH - player2.getSize().x, RENDERHEIGHT - player2.getSize().y);

		//ball and wall collision
		if(ball.getPosition().y <= 0 || ball.getPosition().y >= RENDERHEIGHT - ballDiameter)
		{
			ballVelY *= -1;
			bounce.play();
		}

		//ball and player 1 collision
		if (ball.getPosition().x <= player1.getPosition().x + player1.getSize().x)
		{
			if ((ball.getPosition().y + ballDiameter >= player1.getPosition().y && ball.getPosition().y + ballDiameter <= player1.getPosition().y + player1.getSize().y) || ball.getPosition().y <= player1.getPosition().y + player1.getSize().y && ball.getPosition().y >= player1.getPosition().y){


	if (player1.getPosition().x > 14){

				ballVelX = (ballVelX - 2) * -1;
				ball.setFillColor(sf::Color(255,0,0));
				perfecthit.play();
		
			}

	else if (player1.getPosition().x <= 14){

				ballVelX = (ballVelX - 1) * -1;
				ball.setFillColor(sf::Color(0,0,255));
				bounce.play();
}
			

			}

			else
			{
ball.setFillColor(sf::Color(255,255,255));
				point.play();
				player2Score += 1;  
				ballX = RENDERWIDTH / 2 - ball.getRadius();
				if (BALLSPEED < 8){
					BALLSPEED += 0.2;
					}
				ballVelX = BALLSPEED;
				score2.setString(convertInt(player2Score));
				score2.setPosition(3 * RENDERWIDTH / 4 - score2.getLocalBounds().width, 0);

				if (p2Len > 40)
					p2Len -= 10;
				player2.setSize(sf::Vector2f(15, p2Len));
				if (p1Len < 100)
					p1Len += 10;
				player1.setSize(sf::Vector2f(15, p1Len));
				
			}
		}

		//ball and player 2 collision
		if (ball.getPosition().x + ballDiameter >= player2.getPosition().x)
		{
			if ((ball.getPosition().y + ballDiameter >= player2.getPosition().y && ball.getPosition().y + ballDiameter <= player2.getPosition().y + player2.getSize().y) || ball.getPosition().y <= player2.getPosition().y + player2.getSize().y && ball.getPosition().y >= player2.getPosition().y)
			{


	if (player2.getPosition().x < (RENDERWIDTH-9-player2.getSize().x)){

				ballVelX = (ballVelX + 2) * -1;
				ball.setFillColor(sf::Color(255,0,0));
				perfecthit.play();
				
			
			}

	else if (player2.getPosition().x >= (RENDERWIDTH-9-player2.getSize().x)){

				ballVelX = (ballVelX + 1) * -1;
				ball.setFillColor(sf::Color(0,255,0));
				bounce.play();
			}
		}
				
			
			else
			{
ball.setFillColor(sf::Color(255,255,255));
				point.play();
				player1Score += 1;
				ballX = RENDERWIDTH / 2 - ball.getRadius();
				if (BALLSPEED < 8)
					BALLSPEED += 0.5;
				ballVelX = -BALLSPEED;
				score1.setString(convertInt(player1Score));
				if (p1Len > 40)
					p1Len -= 10;
				player1.setSize(sf::Vector2f(15, p1Len));
				if (p2Len < 100)
					p2Len += 10;
				player2.setSize(sf::Vector2f(15, p2Len));
			}
		}

		//ball position update
		ballX += ballVelX;
		ballY += ballVelY;
		ball.setPosition(ballX, ballY);

		//render updates
		pong.clear();
		pong.draw(score1);
		pong.draw(score2);
	
		pong.draw(player1);
		pong.draw(player2);
		pong.draw(ball);
		pong.display();

/////////////////////Finish Game/////////////////////////////////


    
      }

      cvDestroyAllWindows() ;
      cvReleaseImage(&imgTracking);
      cvReleaseCapture(&capture);     

      return 0;
}
UINT WINAPI
//DWORD WINAPI
#elif defined(POSIX_SYS)
// using pthread
void *
#endif
	ChessRecognition::HoughLineThread(
#if defined(WINDOWS_SYS)
	LPVOID
#elif defined(POSIX_SYS)
	void *
#endif
	Param) {
	// 실제로 뒤에서 동작하는 windows용 thread함수.
	// 함수 인자로 클래스를 받아옴.
	ChessRecognition *_TChessRecognition = (ChessRecognition *)Param;
	_TChessRecognition->_HoughLineBased = new HoughLineBased();

	CvSeq *_TLineX, *_TLineY;
	double _TH[] = { -1, -7, -15, 0, 15, 7, 1 };

	CvMat _TDoGX = cvMat(1, 7, CV_64FC1, _TH);
	CvMat* _TDoGY = cvCreateMat(7, 1, CV_64FC1);
	cvTranspose(&_TDoGX, _TDoGY); // transpose(&DoGx) -> DoGy

	double _TMinValX, _TMaxValX, _TMinValY, _TMaxValY, _TMinValT, _TMaxValT;
	int _TKernel = 1;

	// Hough 사용되는 Image에 대한 Initialize.
	IplImage *iplTemp = cvCreateImage(cvSize(_TChessRecognition->_Width, _TChessRecognition->_Height), IPL_DEPTH_32F, 1);                   
	IplImage *iplDoGx = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGy = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGyClone = cvCloneImage(iplDoGy);
	IplImage *iplDoGxClone = cvCloneImage(iplDoGx);
	IplImage *iplEdgeX = cvCreateImage(cvGetSize(iplTemp), 8, 1);
	IplImage *iplEdgeY = cvCreateImage(cvGetSize(iplTemp), 8, 1);

	CvMemStorage* _TStorageX = cvCreateMemStorage(0), *_TStorageY = cvCreateMemStorage(0);

	while (_TChessRecognition->_EnableThread != false) {
		// 이미지를 받아옴. main루프와 동기를 맞추기 위해서 critical section 사용.
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.lock();
		//EnterCriticalSection(&(_TChessRecognition->cs));
		cvConvert(_TChessRecognition->_ChessBoardDetectionInternalImage, iplTemp);
		//LeaveCriticalSection(&_TChessRecognition->cs);
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.unlock();

		// 각 X축 Y축 라인을 검출해 내기 위해서 filter 적용.
		cvFilter2D(iplTemp, iplDoGx, &_TDoGX); // 라인만 축출해내고.
		cvFilter2D(iplTemp, iplDoGy, _TDoGY);
		cvAbs(iplDoGx, iplDoGx);
		cvAbs(iplDoGy, iplDoGy);

		// 이미지 내부에서 최댓값과 최소값을 구하여 정규화.
		cvMinMaxLoc(iplDoGx, &_TMinValX, &_TMaxValX);
		cvMinMaxLoc(iplDoGy, &_TMinValY, &_TMaxValY);
		cvMinMaxLoc(iplTemp, &_TMinValT, &_TMaxValT);
		cvScale(iplDoGx, iplDoGx, 2.0 / _TMaxValX); // 정규화.
		cvScale(iplDoGy, iplDoGy, 2.0 / _TMaxValY);
		cvScale(iplTemp, iplTemp, 2.0 / _TMaxValT);

		cvCopy(iplDoGy, iplDoGyClone);
		cvCopy(iplDoGx, iplDoGxClone);

		// NMS진행후 추가 작업
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGx, iplDoGyClone, _TKernel);
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGy, iplDoGxClone, _TKernel);

		cvConvert(iplDoGx, iplEdgeY); // IPL_DEPTH_8U로 다시 재변환.
		cvConvert(iplDoGy, iplEdgeX);

		double rho = 1.0; // distance resolution in pixel-related units.
		double theta = 1.0; // angle resolution measured in radians.
		int threshold = 20;

		if (threshold == 0)
			threshold = 1;

		// detecting 해낸 edge에서 hough line 검출.
		_TLineX = cvHoughLines2(iplEdgeX, _TStorageX, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);
		_TLineY = cvHoughLines2(iplEdgeY, _TStorageY, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);

		// cvSeq를 vector로 바꾸기 위한 연산.
		_TChessRecognition->_Vec_ProtectionMutex.lock();
		_TChessRecognition->_HoughLineBased->CastSequence(_TLineX, _TLineY);
		_TChessRecognition->_Vec_ProtectionMutex.unlock();

		Sleep(2);
	}

	// mat 할당 해제.
	cvReleaseMat(&_TDoGY);

	// 내부 연산에 사용된 이미지 할당 해제.
	cvReleaseImage(&iplTemp);
	cvReleaseImage(&iplDoGx);
	cvReleaseImage(&iplDoGy);
	cvReleaseImage(&iplDoGyClone);
	cvReleaseImage(&iplDoGxClone);
	cvReleaseImage(&iplEdgeX);
	cvReleaseImage(&iplEdgeY);

	// houghline2에 사용된 opencv 메모리 할당 해제.
	cvReleaseMemStorage(&_TStorageX);
	cvReleaseMemStorage(&_TStorageY);

	delete _TChessRecognition->_HoughLineBased;
#if defined(WINDOWS_SYS)
	_endthread();
#elif defined(POSIX_SYS)

#endif
	_TChessRecognition->_EndThread = true;
	return 0;
}
Example #5
0
	int main(int argc, char** argv)
	{
		int row,col;
		IplImage* img = NULL;
		char* cha = "./montpellier_mission_images_floues_nettes/";//le dossier de base
	    DIR* rep = NULL;
	    struct dirent* fichierLu = NULL; /* Déclaration d'un pointeur vers la structure dirent. */
	    struct dirent* repLu = NULL; /* Déclaration d'un pointeur vers la structure dirent. */
	    char* ch;
	    rep = opendir(cha);
	    if (rep == NULL)
	    {
	        exit(1);
	    }
	    while((repLu = readdir(rep)) != NULL )
	    {
	    	while (strcmp(repLu->d_name, ".") == 0 || strcmp(repLu->d_name, "..") == 0)
	    	{
	    		repLu = readdir(rep);
	    		printf("%s\n",repLu->d_name);
	    	}
	    	DIR* rep2 = NULL;
	    	ch = concat(cha,repLu->d_name);
	    	rep2 = opendir(ch);
	    	char* c = ch;
	    	c = concat(ch,"/");
	    	while ((fichierLu = readdir(rep2)) != NULL )
	    	{
	    		if (strcmp(fichierLu->d_name, ".") != 0 && strcmp(fichierLu->d_name, "..") != 0)
				{
 					printf("Le fichier lu s'appelle %s\n", fichierLu->d_name);
 					
 					ch = concat(c,fichierLu->d_name);
 					img = cvLoadImage( ch , CV_LOAD_IMAGE_COLOR);
 					if (img == NULL)
					{
						printf("Oups j'ai eu un problème.\n");
							return -1;
					}
	
					IplImage* imgG = cvCloneImage(img);
					
					unsigned char *pRowC, *pRowG;
					for(row=0; row<img->height; row++)
					{
						pRowC = (unsigned char*)(img->imageData + row * img->widthStep);
						pRowG = (unsigned char*)(imgG->imageData + row * imgG->widthStep);
						for(col=0; col<img->width; col++)
						{
							// pour le canal B de imgG 
							*(pRowG + col*imgG->nChannels*sizeof(unsigned char)) = (unsigned char) (*(pRowC + col*img->nChannels*sizeof(unsigned char))* 0.114 + *(pRowC + col*img->nChannels*sizeof(unsigned char)+1)* 0.587 + *(pRowC + col*img->nChannels*sizeof(unsigned char)+2)* 0.299);
							// pour le canal G de imgG 
							*(pRowG + col*imgG->nChannels*sizeof(unsigned char) + 1) = (unsigned char) (*(pRowC + col*img->nChannels*sizeof(unsigned char))* 0.114 + *(pRowC + col*img->nChannels*sizeof(unsigned char)+1)* 0.587 + *(pRowC + col*img->nChannels*sizeof(unsigned char)+2)* 0.299);
							// pour le canal R de imgG 
							*(pRowG + col*imgG->nChannels*sizeof(unsigned char) + 2) = (unsigned char) (*(pRowC + col*img->nChannels*sizeof(unsigned char))* 0.114 + *(pRowC + col*img->nChannels*sizeof(unsigned char)+1)* 0.587 + *(pRowC + col*img->nChannels*sizeof(unsigned char)+2)* 0.299);
						}
					}
			
			
			
					IplImage* dst = NULL;
					IplImage* dst2 = NULL;
					dst = cvCreateImage(cvSize( imgG->width / 5, imgG->height / 5 ), imgG->depth,imgG->nChannels );
				    dst2 = cvCreateImage(cvSize( imgG->width / 5, imgG->height / 5 ), imgG->depth,imgG->nChannels );
	
					// traitements

					IplImage* imgE = cvCloneImage(img);
					cvSobel(imgG, imgE, 1, 0, 3);
					cvThreshold(imgE,imgG,80,255,3);
					cvErode(imgG,imgE,NULL,1);
					cvThreshold(imgE,imgG,100,255,3);
					cvDilate(imgG,imgE,NULL,5);
	

					//redimentionnement et sauvegardes
	
					cvResize(img, dst, CV_INTER_AREA );
					cvResize(imgE, dst2, CV_INTER_AREA );
					//cvSaveImage( NAME , imgE, 0);
					//cvSaveImage("image base.jpg", img, 0);


					// affichage
					cvShowImage("test2",dst);
					cvNamedWindow("test1", CV_WINDOW_AUTOSIZE);
					cvShowImage("test1",dst2);
	
					cvWaitKey(0);
			
					cvReleaseImage(&imgG);
					cvReleaseImage(&imgE);
					cvReleaseImage(&img);
					cvReleaseImage(&dst);
					cvReleaseImage(&dst2);
					cvDestroyWindow("test1");
					cvDestroyWindow("test2");
				}    	
	    	}
	    }
	    

	    if (closedir(rep) == -1)
	        exit(-1);

	    return 0;
	}
// Function cvCreateFGDStatModel initializes foreground detection process
// parameters:
//      first_frame - frame from video sequence
//      parameters  - (optional) if NULL default parameters of the algorithm will be used
//      p_model     - pointer to CvFGDStatModel structure
CV_IMPL CvBGStatModel*
cvCreateFGDStatModel( IplImage* first_frame, CvFGDStatModelParams* parameters )
{
    CvFGDStatModel* p_model = 0;

    CV_FUNCNAME( "cvCreateFGDStatModel" );

    __BEGIN__;

    int i, j, k, pixel_count, buf_size;
    CvFGDStatModelParams params;

    if( !CV_IS_IMAGE(first_frame) )
        CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );

    if (first_frame->nChannels != 3)
        CV_ERROR( CV_StsBadArg, "first_frame must have 3 color channels" );

    // Initialize parameters:
    if( parameters == NULL )
    {
        params.Lc      = CV_BGFG_FGD_LC;
        params.N1c     = CV_BGFG_FGD_N1C;
        params.N2c     = CV_BGFG_FGD_N2C;

        params.Lcc     = CV_BGFG_FGD_LCC;
        params.N1cc    = CV_BGFG_FGD_N1CC;
        params.N2cc    = CV_BGFG_FGD_N2CC;

        params.delta   = CV_BGFG_FGD_DELTA;

        params.alpha1  = CV_BGFG_FGD_ALPHA_1;
        params.alpha2  = CV_BGFG_FGD_ALPHA_2;
        params.alpha3  = CV_BGFG_FGD_ALPHA_3;

        params.T       = CV_BGFG_FGD_T;
        params.minArea = CV_BGFG_FGD_MINAREA;

        params.is_obj_without_holes = 1;
        params.perform_morphing     = 1;
    }
    else
    {
        params = *parameters;
    }

    CV_CALL( p_model = (CvFGDStatModel*)cvAlloc( sizeof(*p_model) ));
    memset( p_model, 0, sizeof(*p_model) );
    p_model->type = CV_BG_MODEL_FGD;
    p_model->release = (CvReleaseBGStatModel)icvReleaseFGDStatModel;
    p_model->update = (CvUpdateBGStatModel)icvUpdateFGDStatModel;;
    p_model->params = params;

    // Initialize storage pools:
    pixel_count = first_frame->width * first_frame->height;

    buf_size = pixel_count*sizeof(p_model->pixel_stat[0]);
    CV_CALL( p_model->pixel_stat = (CvBGPixelStat*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat, 0, buf_size );

    buf_size = pixel_count*params.N2c*sizeof(p_model->pixel_stat[0].ctable[0]);
    CV_CALL( p_model->pixel_stat[0].ctable = (CvBGPixelCStatTable*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat[0].ctable, 0, buf_size );

    buf_size = pixel_count*params.N2cc*sizeof(p_model->pixel_stat[0].cctable[0]);
    CV_CALL( p_model->pixel_stat[0].cctable = (CvBGPixelCCStatTable*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat[0].cctable, 0, buf_size );

    for(     i = 0, k = 0; i < first_frame->height; i++ ) {
        for( j = 0;        j < first_frame->width;  j++, k++ )
        {
            p_model->pixel_stat[k].ctable = p_model->pixel_stat[0].ctable + k*params.N2c;
            p_model->pixel_stat[k].cctable = p_model->pixel_stat[0].cctable + k*params.N2cc;
        }
    }

    // Init temporary images:
    CV_CALL( p_model->Ftd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
    CV_CALL( p_model->Fbd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
    CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));

    CV_CALL( p_model->background = cvCloneImage(first_frame));
    CV_CALL( p_model->prev_frame = cvCloneImage(first_frame));
    CV_CALL( p_model->storage = cvCreateMemStorage());

    __END__;

    if( cvGetErrStatus() < 0 )
    {
        CvBGStatModel* base_ptr = (CvBGStatModel*)p_model;

        if( p_model && p_model->release )
            p_model->release( &base_ptr );
        else
            cvFree( &p_model );
        p_model = 0;
    }

    return (CvBGStatModel*)p_model;
}
Example #7
0
static void showImage()
{
    IplImage* image_clone = cvCloneImage(image);
    char distance_string[32];
    CvFont dfont;
    float hscale      = 0.7f;
    float vscale      = 0.7f;
    float italicscale = 0.0f;
    int  thickness    = 1;

    std::string objectLabel;
    CvFont      dfont_label;
    float       hscale_label = 0.5f;
    float       vscale_label = 0.5f;
    CvSize      text_size;
    int         baseline     = 0;

    cvInitFont(&dfont_label, CV_FONT_HERSHEY_COMPLEX, hscale_label, vscale_label, italicscale, thickness, CV_AA);
    objectLabel = car_fused_objects.type;
    cvGetTextSize(objectLabel.data(),
                  &dfont_label,
                  &text_size,
                  &baseline);

    /*
     * Plot obstacle frame
     */
    showRects(image_clone,
              car_fused_objects.obj,
              ratio,
              cvScalar(255.0,255.0,0.0));
    showRects(image_clone,
              pedestrian_fused_objects.obj,
              ratio,
              cvScalar(0.0,255.0,0.0));


    /*
     * Plot car distance data on image
     */
    for (unsigned int i = 0; i < car_fused_objects.obj.size(); i++) {
      if(!isNearlyNODATA(car_fused_objects.obj.at(i).range)) {
          int rect_x      = car_fused_objects.obj.at(i).rect.x;
          int rect_y      = car_fused_objects.obj.at(i).rect.y;
          int rect_width  = car_fused_objects.obj.at(i).rect.width;
          int rect_height = car_fused_objects.obj.at(i).rect.height;
          float range     = car_fused_objects.obj.at(i).range;

          /* put label */
          CvPoint labelOrg = cvPoint(rect_x - OBJ_RECT_THICKNESS,
                                     rect_y - baseline - OBJ_RECT_THICKNESS);
          cvRectangle(image_clone,
                      cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                      cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                      CV_RGB(0, 0, 0), // label background color is black
                      -1, 8, 0
                      );
          cvPutText(image_clone,
                    objectLabel.data(),
                    labelOrg,
                    &dfont_label,
                    CV_RGB(255, 255, 255) // label text color is white
                    );

          /* put distance data */
            cvRectangle(image_clone,
                        cv::Point(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 45),
                                  rect_y + rect_height + 5),
                        cv::Point(rect_x + (rect_width/2) + (((int)log10(range/100)+1) * 8 + 38),
                                  rect_y + rect_height + 30),
                        cv::Scalar(255,255,255), -1);
            cvInitFont (&dfont, CV_FONT_HERSHEY_COMPLEX , hscale, vscale, italicscale, thickness, CV_AA);
            sprintf(distance_string, "%.2f m", range / 100); //unit of length is meter
            cvPutText(image_clone,
                      distance_string,
                      cvPoint(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 40),
                              rect_y + rect_height + 25),
                      &dfont,
                      CV_RGB(255, 0, 0));
        }
    }

    objectLabel = pedestrian_fused_objects.type;
    cvGetTextSize(objectLabel.data(),
                  &dfont_label,
                  &text_size,
                  &baseline);

    /*
     * Plot pedestrian distance data on image
     */
    for (unsigned int i = 0; i < pedestrian_fused_objects.obj.size(); i++) {
      if(!isNearlyNODATA(pedestrian_fused_objects.obj.at(i).range)) {
          int rect_x      = pedestrian_fused_objects.obj.at(i).rect.x;
          int rect_y      = pedestrian_fused_objects.obj.at(i).rect.y;
          int rect_width  = pedestrian_fused_objects.obj.at(i).rect.width;
          int rect_height = pedestrian_fused_objects.obj.at(i).rect.height;
          float range     = pedestrian_fused_objects.obj.at(i).range;

          /* put label */
          CvPoint labelOrg = cvPoint(rect_x - OBJ_RECT_THICKNESS,
                                     rect_y - baseline - OBJ_RECT_THICKNESS);
          cvRectangle(image_clone,
                      cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                      cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                      CV_RGB(0, 0, 0), // label background color is black
                      -1, 8, 0
                      );
          cvPutText(image_clone,
                    objectLabel.data(),
                    labelOrg,
                    &dfont_label,
                    CV_RGB(255, 255, 255) // label text color is white
                    );

          /* put distance data */
            cvRectangle(image_clone,
                        cv::Point(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 45),
                                  rect_y + rect_height + 5),
                        cv::Point(rect_x + (rect_width/2) + (((int)log10(range/100)+1) * 8 + 38),
                                  rect_y + rect_height + 30),
                        cv::Scalar(255,255,255), -1);
            cvInitFont (&dfont, CV_FONT_HERSHEY_COMPLEX , hscale, vscale, italicscale, thickness, CV_AA);
            sprintf(distance_string, "%.2f m", range / 100); //unit of length is meter
            cvPutText(image_clone,
                      distance_string,
                      cvPoint(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 40),
                              rect_y + rect_height + 25),
                      &dfont,
                      CV_RGB(255, 0, 0));
        }
    }

    /*
     * Show image
     */
    if (cvGetWindowHandle(window_name.c_str()) != NULL) // Guard not to write destroyed window by using close button on the window
      {
        cvShowImage(window_name.c_str(), image_clone);
        cvWaitKey(2);
      }
    cvReleaseImage(&image_clone);
}
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
	//bridge that will transform the message (image) from ROS code back to "image" code
  sensor_msgs::CvBridge bridge;
  fprintf(stderr, "\n call Back funtion \n");
  
  //publish data (obstacle waypoints) back to the boat
  ros::NodeHandle n;
  std_msgs::Float32 xWaypoint_msg;         // X coordinate obstacle message object
  std_msgs::Float32 zWaypoint_msg;         // Y coordinate obstacle message object
  //std::stringstream ss;
  //publish the waypoint data             
  ros::Publisher Xwaypoint_info_pub = n.advertise<std_msgs::Float32>("Xwaypoint_info", 0.01);
  ros::Publisher Zwaypoint_info_pub = n.advertise<std_msgs::Float32>("Zwaypoint_info", 0.01);
  
  /***********************************************************************/
    //live image coming streamed straight from the boat's camera
    IplImage* boatFront = bridge.imgMsgToCv(msg, "bgr8");
    //The boat takes flipped images, so you need to flip them back to normal
    cvFlip(boatFront, boatFront, 0);
    IplImage* backUpImage = cvCloneImage(boatFront);
    boatFront->origin = IPL_ORIGIN_TL;   //sets image origin to top left corner
    int X = boatFront->height;
    int Y = boatFront->width;
    //cout << "height " << X << endl;
    //cout << "width " << Y << endl;    
    
    /*********************Image Filtering variables****************************/
    //these images are used for segmenting objects from the overall background 
    //create a one channel image to convert from RGB to GRAY
    IplImage* grayImage = cvCreateImage(cvGetSize(boatFront),IPL_DEPTH_8U,1);
    //convert grayImage to binary (final step after converting from GRAY)
    IplImage* bwImage = cvCreateImage(cvGetSize(grayImage),IPL_DEPTH_8U,1);
    //variables used for the flood fill segmentation
    CvPoint seed_point = cvPoint(boatFront->height/2 + 70,0);       //not sure how this variable works
    CvScalar color = CV_RGB(250,0,0);
    CvMemStorage* grayStorage = NULL;     //memory storage for contour sequence
    CvSeq* contours = 0;
    // get blobs and filter them using their area
    //IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL);
    //IplImage* original, *originalThr;
    //IplImage* segmentated = cvCreateImage(cvGetSize(boatFront), 8, 1);
    //unsigned int blobNumber = 0;
    //IplImage* labelImg = cvCreateImage(cvGetSize(boatFront), IPL_DEPTH_LABEL, 1);
    CvMoments moment;
    
    /***********************************************************************/
    //boat's edge distance from the camera. This is used for visual calibration
    //to know the distance from the boat to the nearest obstacles.
    //With respect to the mounted camera, distance is 21 inches (0.5334 m) side to side
    //and 15 inches (0.381 m).
    //float boatFrontDistance = 0.381;    //distance in meters
    //float boatSideDistance = 0.5334;    //distance in meters
    // These variables tell the distance from the center bottom of the image
    // (the camera) to the square surrounding a the obstacle
    float obstacleDistance = 0.0;            //Euclidean distance to object
    float obstacleHeading = 0.0;
    //distance variables from the camera calibration matrix
    int xPixel = 0;       //pixels from left to right
    int yPixel = 0;       //pixels from bottom to top
    float zObstacleDistance = 0;    //object distance from the camera
    float xObstacleDistance = 0;
    float yObstacleDistance = 0.1143;  //distance in meters from water to camera.
                          //its gonna be constant assuming boat barely moves up and down in the water
    
    int pixelsNumber = 50;  //number of pixels for an n x n matrix and # of neighbors
    const int arraySize = pixelsNumber;
    const int threeArraySize = pixelsNumber;
    //if n gets changed, then the algorithm might have to be
    //recalibrated. Try to keep it constant
    //these variables are used for the k nearest neighbors
    //int accuracy;
    //reponses for each of the classifications
    float responseWaterH, responseWaterS, responseWaterV; 
    float responseGroundH, responseGroundS, responseGroundV;
    float responseSkyH, responseSkyS, responseSkyV;
    float averageHue = 0.0;
    float averageSat = 0.0;
    float averageVal = 0.0;
    CvMat* trainClasses = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
    //used with the classifier 
    CvMat* trainClassesH = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClassesS = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClassesV = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
    //used with the classifier 
    /*CvMat* nearestWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    //Distance
    CvMat* distanceWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1); */
    //these variables are use to traverse the picture by blocks of n x n pixels at
    //a time. 
    //Index(0,0) does not exist, so make sure kj and ki start from 1 (in the
    //right way, of course)
    //x and y are the dimensions of the local patch of pixels
    int x = (boatFront->height)/2 + 70;//(boatFront->height)/2.5 + 105; 
    int y = 0;  
    int skyX = 0; 
    int skyY = 0;
    int row1 = 0;
    int column1 = 0;
    //these two variables are used in order to divide the grid in the
    //resample segmentation part
    int xDivisor = 200;
    int yDivisor = 200;
    //ground sample
    //CvMat* groundTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    //CvMat* groundTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    //CvMat* groundTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    //water sample
    CvMat* waterTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    CvMat* waterTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    CvMat* waterTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    //n x n sample patch taken from the picture
    CvMat* sampleHue = cvCreateMat(1,arraySize,CV_32FC1);
    CvMat* sampleSat = cvCreateMat(1,arraySize,CV_32FC1);
    CvMat* sampleVal = cvCreateMat(1,arraySize,CV_32FC1);
    CvMat* resampleHue0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    CvMat* resampleSat0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    CvMat* resampleVal0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    CvMat* resampleHue = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1);
    CvMat* resampleSat = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1);
    CvMat* resampleVal = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1);
    int xDiv = 20;
    int yDiv = 20;
    CvMat* resampleHue2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1);
    CvMat* resampleSat2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1);
    CvMat* resampleVal2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1);
    //sky training sample
    CvMat* skyTrainingHue = cvCreateMat(arraySize,arraySize,CV_32FC1);
    CvMat* skyTrainingSat = cvCreateMat(arraySize,arraySize,CV_32FC1);
    CvMat* skyTrainingVal = cvCreateMat(arraySize,arraySize,CV_32FC1);
    //initialize each matrix element to zero for ease of use
    //cvZero(groundTrainingHue);
    //cvZero(groundTrainingSat);
    //cvZero(groundTrainingVal);
    cvZero(waterTrainingHue);
    cvZero(waterTrainingSat);
    cvZero(waterTrainingVal);
    cvZero(sampleHue);
    cvZero(sampleSat);
    cvZero(sampleVal);
    cvZero(resampleHue0);
    cvZero(resampleSat0);
    cvZero(resampleVal0);
    cvZero(resampleHue);
    cvZero(resampleSat);
    cvZero(resampleVal);
    cvZero(skyTrainingHue);
    cvZero(skyTrainingSat);
    cvZero(skyTrainingVal);    
    //Stores the votes for each channel (whether it belongs to water or not
    //1 is part of water, 0 not part of water
    //if sum of votes is bigger than 1/2 the number of elements, then it belongs to water
    int votesSum = 0;
    int comparator[3];        //used when only three votes are needed
    //int comparatorTwo [3][3];    //used when six votes are needed
    //initial sum of votes is zero
    //Error if initialize both matrices inside a single for loop. Dont know why
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
 
    /***********************************************************************/
    //Convert from RGB to HSV to control the brightness of the objects.
    //work with reflexion
    /*Sky recognition. Might be useful for detecting reflexion on the water. If
      the sky is detected, and the reflection has the same characteristics of
      something below the horizon, that "something" might be water. Assume sky
      wont go below the horizon
    */
    //convert from RGB to HSV
    cvCvtColor(boatFront, boatFront, CV_BGR2HSV);
    cvCvtColor(backUpImage, backUpImage, CV_BGR2HSV);
    HsvImage I(boatFront);
    HsvImage IBackUp(backUpImage);
    //Sky detection
    /*
    for (int i=0; i<boatFront->height;i++)
    {
        for (int j=0; j<boatFront->width;j++)
        {
        //if something is bright enough, consider it sky and store the
        //value. HSV values go from 0 to 180 ... RGB goes from 0 to 255
            if (((I[i][j].v >= 180) && (I[i][j].s <= 16)))
                // && ((I[i][j].h >=10)))) //&& (I[i][j].h <= 144))))
            {
                //The HSV values vary between 0 and 1
                cvmSet(skyTrainingHue,skyX,skyY,I[i][j].h);
                cvmSet(skyTrainingSat,skyX,skyY,I[i][j].s);
                cvmSet(skyTrainingVal,skyX,skyY,I[i][j].v);
                //I[i][j].h = 0.3*180;       //H (color)
                //I[i][j].s = 0.3*180;          //S (color intensity)
                //I[i][j].v = 0.6*180;          //V (brightness)
                if (skyY == pixelsNumber-1)
                {
                   if (skyX == pixelsNumber-1)
                     skyX = 1;
                   else
                     skyX = skyX + 1;
                   skyY = 1;
                }
                else
                  skyY = skyY + 1;
           }   
        }
    }
    
    /***********************************************************************/
    //offline input pictures. Samples of water properties are taken from these 
    //pictures to get a range of values for H, S, V that will be stored into a 
    //pre-defined classifier
    IplImage* imageSample1 = cvLoadImage("20110805_032255.jpg");
    cvSetImageROI(imageSample1, cvRect(0,0,imageSample1->height/0.5,imageSample1->width/1.83));
    cvCvtColor(imageSample1, imageSample1, CV_BGR2HSV);
    HsvImage I1(imageSample1);
    IplImage* imageSample2 = cvLoadImage("20110805_032257.jpg");
    cvCvtColor(imageSample2, imageSample2, CV_BGR2HSV);
    HsvImage I2(imageSample2);
    IplImage* imageSample3 = cvLoadImage("20110805_032259.jpg");
    cvCvtColor(imageSample3, imageSample3, CV_BGR2HSV);
    HsvImage I3(imageSample3);
    IplImage* imageSample4 = cvLoadImage("20110805_032301.jpg");
    cvCvtColor(imageSample4, imageSample4, CV_BGR2HSV);
    HsvImage I4(imageSample4);
    IplImage* imageSample5 = cvLoadImage("20110805_032303.jpg");
    cvCvtColor(imageSample5, imageSample5, CV_BGR2HSV);
    HsvImage I5(imageSample5);
    IplImage* imageSample6 = cvLoadImage("20110805_032953.jpg");
    cvCvtColor(imageSample6, imageSample6, CV_BGR2HSV);
    HsvImage I6(imageSample6);
    IplImage* imageSample7 = cvLoadImage("20110805_032955.jpg");
    cvCvtColor(imageSample7, imageSample7, CV_BGR2HSV);
    HsvImage I7(imageSample7);
    IplImage* imageSample8 = cvLoadImage("20110805_032957.jpg");
    cvCvtColor(imageSample8, imageSample8, CV_BGR2HSV);
    HsvImage I8(imageSample8);
    IplImage* imageSample9 = cvLoadImage("20110805_032959.jpg");
    cvCvtColor(imageSample9, imageSample9, CV_BGR2HSV);
    HsvImage I9(imageSample9);
    IplImage* imageSample10 = cvLoadImage("20110805_033001.jpg");
    cvCvtColor(imageSample10, imageSample10, CV_BGR2HSV);
    HsvImage I10(imageSample10);
    IplImage* imageSample11 = cvLoadImage("20110805_033009.jpg");
    cvCvtColor(imageSample11, imageSample11, CV_BGR2HSV);
    HsvImage I11(imageSample11);
    IplImage* imageSample12 = cvLoadImage("20110805_033011.jpg");
    cvCvtColor(imageSample12, imageSample12, CV_BGR2HSV);
    HsvImage I12(imageSample12);
    //IplImage* imageSample13 = cvLoadImage("20110812_110924.jpg");
    //cvCvtColor(imageSample13, imageSample13, CV_BGR2HSV);
    //HsvImage I13(imageSample13);
    
    for (int i=0; i < threeArraySize; i++)
    {
        for (int j=0; j < arraySize; j++)
        {
            row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8);
            column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615);
            averageHue = (I1[row1][column1].h + I2[row1][column1].h + I3[row1][column1].h + I4[row1][column1].h + I5[row1][column1].h + 	
            I6[row1][column1].h + I7[row1][column1].h + I8[row1][column1].h + I9[row1][column1].h + I10[row1][column1].h + I11[row1][column1].h + I12[row1][column1].h) / 12;
            averageSat = (I1[row1][column1].s + I2[row1][column1].s + I3[row1][column1].s + I4[row1][column1].s + I5[row1][column1].s + 
            I6[row1][column1].s + I7[row1][column1].s + I8[row1][column1].s + I9[row1][column1].s + I10[row1][column1].s + I11[row1][column1].s + I12[row1][column1].s) / 12;
            averageVal = (I1[row1][column1].v + I2[row1][column1].v + I3[row1][column1].v + I4[row1][column1].v + I5[row1][column1].v + 
            I6[row1][column1].v + I7[row1][column1].v + I8[row1][column1].v + I9[row1][column1].v + I10[row1][column1].v + I11[row1][column1].v + I12[row1][column1].v) / 12;
            //water patch sample (n X n matrix)
            cvmSet(waterTrainingHue,i,j,averageHue);
            cvmSet(waterTrainingSat,i,j,averageSat);
            cvmSet(waterTrainingVal,i,j,averageVal);  
             //patch is red (this is for me to know where the ground patch sample is)
            //I[row1][column1].h = 0;
            //I[row1][column1].s = 255;
            //I[row1][column1].v = 255;
        }
    }
    //creating a training sample from the an image taken on the fly
    row1 = 0;
    column1 = 0;
    for (int i=0; i<pixelsNumber; i++)
    {
        for (int j=0; j<pixelsNumber; j++)
        {
           row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8);
           column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615);
           cvmSet(trainClassesH,i,0,I[row1][column1].h);
           cvmSet(trainClassesS,i,0,I[row1][column1].s);
           cvmSet(trainClassesV,i,0,I[row1][column1].v);
           
        }
    }
    //order the water samples in ascending order on order to know a range
    cvSort(waterTrainingHue, waterTrainingHue, CV_SORT_ASCENDING);
    cvSort(waterTrainingSat, waterTrainingSat, CV_SORT_ASCENDING);
    cvSort(waterTrainingVal, waterTrainingVal, CV_SORT_ASCENDING);
    // find the maximum and minimum values in the array to create a range
    int maxH = cvmGet(waterTrainingHue,0,0);
    int maxS = cvmGet(waterTrainingSat,0,0);
    int maxV = cvmGet(waterTrainingVal,0,0);
    int minH = cvmGet(waterTrainingHue,0,0);
    int minS = cvmGet(waterTrainingSat,0,0);
    int minV = cvmGet(waterTrainingVal,0,0);
    for (int i=0; i < threeArraySize; i++)
    {
        for (int j=0; j < arraySize; j++)
        {
            if (cvmGet(waterTrainingHue,i,j) > maxH)
                maxH = cvmGet(waterTrainingHue,i,j);
            if (cvmGet(waterTrainingSat,i,j) > maxS)
                maxS = cvmGet(waterTrainingSat,i,j);
            if (cvmGet(waterTrainingVal,i,j) > maxV)
                maxV = cvmGet(waterTrainingVal,i,j);
            if (cvmGet(waterTrainingHue,i,j) < minH)
                minH = cvmGet(waterTrainingHue,i,j);
            if (cvmGet(waterTrainingSat,i,j) < minS)
                minS = cvmGet(waterTrainingSat,i,j);
            if (cvmGet(waterTrainingVal,i,j) < minV)
                minV = cvmGet(waterTrainingVal,i,j);
        }
    }

    /*********** Main loop. It traverses through the picture**********/
    
    /******************** Live water samples *******************************/
    //learn how "current water" looks like on the fly
    row1 = 0;
    column1 = 0;
    for (int i=0; i<pixelsNumber; i++)
    {
        for (int j=0; j<pixelsNumber; j++)
        {
           //front of boat might appear in the image. Account for that
           row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8) - 55;
           column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615);
           cvmSet(resampleHue0,i,j,I[row1][column1].h);
           cvmSet(resampleSat0,i,j,I[row1][column1].s);
           cvmSet(resampleVal0,i,j,I[row1][column1].v);
           //visualize "resample" patch
          // I[row1][column1].h = 0;
           //I[row1][column1].s = 0;
           //I[row1][column1].v = 0;
        }
    }
    //order the water samples in ascending order on order to know a range
    cvSort(resampleHue0, resampleHue0, CV_SORT_ASCENDING);
    cvSort(resampleSat0, resampleSat0, CV_SORT_ASCENDING);
    cvSort(resampleVal0, resampleVal0, CV_SORT_ASCENDING);
    // find the maximum and minimum values in the array to create a range
    int maxH0 = cvmGet(resampleHue0,0,0);
    int maxS0 = cvmGet(resampleSat0,0,0);
    int maxV0 = cvmGet(resampleVal0,0,0);
    int minH0 = cvmGet(resampleHue0,0,0);
    int minS0 = cvmGet(resampleSat0,0,0);
    int minV0 = cvmGet(resampleVal0,0,0);
    for (int i=0; i < threeArraySize; i++)
    {
        for (int j=0; j < arraySize; j++)
        {
            if (cvmGet(resampleHue0,i,j) > maxH0)
                maxH0 = cvmGet(resampleHue0,i,j);
            if (cvmGet(resampleSat0,i,j) > maxS0)
                maxS0 = cvmGet(resampleSat0,i,j);
            if (cvmGet(resampleVal0,i,j) > maxV0)
                maxV0 = cvmGet(resampleVal0,i,j);
            if (cvmGet(resampleHue0,i,j) < minH0)
                minH0 = cvmGet(resampleHue0,i,j);
            if (cvmGet(resampleSat0,i,j) < minS0)
                minS0 = cvmGet(resampleSat0,i,j);
            if (cvmGet(resampleVal0,i,j) < minV0)
                minV0 = cvmGet(resampleVal0,i,j);
        }
    }
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/2 + 70;
    y = 0;
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i<6;i++)
        {
            column1 = y+i;
            if (column1 > Y-1)
                column1 = Y-1;
            cvmSet(sampleHue,0,i,I[x][column1].h);
            cvmSet(sampleSat,0,i,I[x][column1].s);
            cvmSet(sampleVal,0,i,I[x][column1].v);
        }
        for (int i=0;i<6;i++)
        {
            for (int j=0;j<6;j++)
            {
                if ((minH0 < cvmGet(sampleHue,0,j)) && (maxH0 > cvmGet(sampleHue,0,j)))
                    //mark water samples as green
                    comparator[0] = 1;
                else
                    comparator[0] = 0;
                if ((minS0 < cvmGet(sampleSat,0,j)) && (maxS0 > cvmGet(sampleSat,0,j)))
                //mark water samples as green
                    comparator[1] = 1;
                else
                    comparator[1] = 0;
                if ((minV0 < cvmGet(sampleVal,0,j)) && (maxV0 > cvmGet(sampleVal,0,j)))
                //mark water samples as red
                    comparator[2] = 1;
                else
                    comparator[2] = 0;
                //count votes
                for (int i3=0; i3 < 3; i3++)
                    votesSum = votesSum + comparator[i3];
                if (votesSum > 1)
                {
                    //use the known water samples as new training data
                    //if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor))
                    //{
                     //   cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j));
                      //  cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j));
                       // cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j));
                    //}
                    //6 use to be equal to pixelsNumber. 
                    I[x][y-6+j].h = 0;
                    I[x][y-6+j].s = 255;
                    I[x][y-6+j].v = 255;   
                }
                votesSum = 0;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
        //ix = 0;
    }
    
    /*********************************************************************
    // Use nearest neighbors to increase accuracy
    skyX = 0; 
    skyY = 0;
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i<6;i++)
        {
            column1 = y+i;
            if (column1 > Y-1)
                column1 = Y-1;
            cvmSet(sampleHue,0,i,I[x][column1].h);
            cvmSet(sampleSat,0,i,I[x][column1].s);
            cvmSet(sampleVal,0,i,I[x][column1].v);
        }
        //Find the shortest distance between a pixel and the neighbors from each of
        //the training samples (sort of inefficient, but might do the job...sometimes)
        //HSV for water sample
        // learn classifier
        //CvKNearest knn(trainData, trainClasses, 0, false, itemsNumber);
        CvKNearest knnWaterHue(waterTrainingHue, trainClassesH, 0, false, pixelsNumber);
        CvKNearest knnWaterSat(waterTrainingSat, trainClassesS, 0, false, pixelsNumber);
        CvKNearest knnWaterVal(waterTrainingVal, trainClassesV, 0, false, pixelsNumber);
        //HSV for ground sample
        //CvKNearest knnGroundHue(groundTrainingHue, trainClasses2, 0, false, pixelsNumber);
        //CvKNearest knnGroundSat(groundTrainingSat, trainClasses2, 0, false, pixelsNumber);
        //CvKNearest knnGroundVal(groundTrainingVal, trainClasses2, 0, false, pixelsNumber);
        //HSV for sky sample
        //if (cvmGet(skyTrainingHue,0,0)!=0.0 && cvmGet(skyTrainingSat,0,0)!=0.0 && cvmGet(skyTrainingVal,0,0)!=0.0)
        //{
        //    CvKNearest knnSkyHue(skyTrainingHue, trainClasses, 0, false, pixelsNumber);
        //    CvKNearest knnSkySat(skyTrainingSat, trainClasses, 0, false, pixelsNumber);
        //    CvKNearest knnSkyVal(skyTrainingVal, trainClasses, 0, false, pixelsNumber);
        //}

        //scan nearest neighbors to each pixel
        responseWaterH = knnWaterHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestWaterH,0);
        responseWaterS = knnWaterSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestWaterS,0);
        responseWaterV = knnWaterVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestWaterV,0);
        //responseGroundH = knnGroundHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestGroundH,0);
        //responseGroundS = knnGroundSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestGroundS,0);
        //responseGroundV = knnGroundVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestGroundV,0);
        //for (int i=0;i<pixelsNumber;i++)
        //{
            for (int j=0;j<pixelsNumber;j++)
            {
                if ((nearestWaterH->data.fl[j] == responseWaterH) )//&& (nearestWaterH->data.fl[j] == responseWaterH + 5))
                        // mark water samples as green
                    comparator[0] = 1;
                else
                    comparator[0] = 0;
                if ((nearestWaterS->data.fl[j] == responseWaterS) )//&& (nearestWaterS->data.fl[j] < responseWaterS + 5))
                    //mark water samples as green
                    comparator[1] = 1;
                else
                    comparator[1] = 0;
                if ((nearestWaterV->data.fl[j] == responseWaterV) )//&& (nearestWaterV->data.fl[j] < responseWaterV + 5))
                //mark water samples as green
                    comparator[2] = 1;
                else
                    comparator[2] = 0;
                // similar sky pixels on the water
                //count votes
                for (int i3=0; i3 < 3; i3++)
                    votesSum = votesSum + comparator[i3]; 
                if (votesSum > 1)
                {
                    I[x][y-6+j].h = 0;
                    I[x][y-6+j].s = 255;
                    I[x][y-6+j].v = 255;
                }
                votesSum = 0;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
       // ix = 0;
    }
    
    /*********************************************************************/
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/2 + 70;
    y = 0;
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i<6;i++)
        {
            column1 = y+i;
            if (column1 > Y-1)
                column1 = Y-1;
            cvmSet(sampleHue,0,i,I[x][column1].h);
            cvmSet(sampleSat,0,i,I[x][column1].s);
            cvmSet(sampleVal,0,i,I[x][column1].v);
        }
        for (int i=0;i<6;i++)
        {
            for (int j=0;j<6;j++)
            {
                if ((minH < cvmGet(sampleHue,0,j)) && (maxH > cvmGet(sampleHue,0,j)))
                    //mark water samples as green
                    comparator[0] = 1;
                else
                    comparator[0] = 0;
                if ((minS < cvmGet(sampleSat,0,j)) && (maxS > cvmGet(sampleSat,0,j)))
                //mark water samples as green
                    comparator[1] = 1;
                else
                    comparator[1] = 0;
                if ((minV < cvmGet(sampleVal,0,j)) && (maxV > cvmGet(sampleVal,0,j)))
                //mark water samples as red
                    comparator[2] = 1;
                else
                    comparator[2] = 0;
                //count votes
                for (int i3=0; i3 < 3; i3++)
                    votesSum = votesSum + comparator[i3];
                if (votesSum > 1)
                {
                    //use the known water samples as new training data
                    if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor))
                    {
                        cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j));
                        cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j));
                        cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j));
                    }
                    //6 use to be equal to pixelsNumber. 
                    I[x][y-6+j].h = 0;
                    I[x][y-6+j].s = 255;
                    I[x][y-6+j].v = 255;   
                }
                votesSum = 0;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
        //ix = 0;
    }
    
    /***************Deal with reflection*****************/
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    votesSum = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/2 + 70;
    y = 0;
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i<6;i++)
        {
            column1 = y+i;
            if (column1 > Y-1)
                column1 = Y-1;
            cvmSet(sampleHue,0,i,I[x][column1].h);
            cvmSet(sampleSat,0,i,I[x][column1].s);
            cvmSet(sampleVal,0,i,I[x][column1].v);
        }
        for (int i=0;i<6;i++)
        {
            for (int j=0;j<6;j++)
            {
                if ((minH < cvmGet(sampleHue,0,j)) && (maxH > cvmGet(sampleHue,0,j)))
                    //mark water samples as green
                    comparator[0] = 1;
                else
                    comparator[0] = 0;
                if ((0.8*255 > cvmGet(sampleSat,0,j)))// && (maxS < cvmGet(sampleSat,0,j)))
                //mark water samples as green
                    comparator[1] = 1;
                else
                    comparator[1] = 0;
                if ((0.6*255 < cvmGet(sampleVal,0,j)))// || (maxV < cvmGet(sampleVal,0,j)))
                //mark water samples as green
                    comparator[2] = 1;
                else
                    comparator[2] = 0;
                //count votes
                for (int i3=0; i3 < 3; i3++)
                    votesSum = votesSum + comparator[i3]; 
                if (votesSum > 1)
                {
                    //use the known water samples as new training data
                    if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor))
                    {
                        cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j));
                        cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j));
                        cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j));
                    }
                    //6 use to be equal to pixelsNumber. 
                    I[x][y-6+j].h = 0;
                    I[x][y-6+j].s = 255;
                    I[x][y-6+j].v = 255;   
                }
                votesSum = 0;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
        //ix = 0;
    }
    
    /**********Resample the entire patch**********/
    /*********find a new min and max for a new sample range*************/
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    votesSum = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/2 + 70;
    y = 0;
    maxH = cvmGet(resampleHue,0,0);
    maxS = cvmGet(resampleSat,0,0);
    maxV = cvmGet(resampleVal,0,0);
    minH = cvmGet(resampleHue,0,0);
    minS = cvmGet(resampleSat,0,0);
    minV = cvmGet(resampleVal,0,0);
    for (int i=0; i < boatFront->height/xDivisor; i++)
    {
        for (int j=0; j < boatFront->width/yDivisor; j++)
        {
            if (cvmGet(resampleHue,i,j) > maxH)
                maxH = cvmGet(resampleHue,i,j);
            if (cvmGet(resampleSat,i,j) > maxS)
                maxS = cvmGet(resampleSat,i,j);
            if (cvmGet(resampleVal,i,j) > maxV)
                maxV = cvmGet(resampleVal,i,j);
            if (cvmGet(resampleHue,i,j) < minH)
                minH = cvmGet(resampleHue,i,j);
            if (cvmGet(resampleSat,i,j) < minS)
                minS = cvmGet(resampleSat,i,j);
            if (cvmGet(resampleVal,i,j) < minV)
                minV = cvmGet(resampleVal,i,j);
        }
    }
    while (x < X-1)
    {
        for (int i=0;i<6;i++)
        {
            for (int j=0;j<6;j++)
            {
                if ((minH < I[x][y-6+j].h) && (maxH > I[x][y-6+j].h))
                    //mark water samples as red
                    I[x][y-6+j].h = 0;
                else
                    comparator[0] = 0;
                if ((minS < I[x][y-6+j].s) && (maxS > I[x][y-6+j].s))
                //mark water samples as red
                    I[x][y-6+j].s = 255;
                else
                    comparator[1] = 0;
                if ((minV < I[x][y-6+j].v) && (maxV > I[x][y-6+j].v))
                //mark water samples as red
                    I[x][y-6+j].v = 255;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
    }
    //cout << "Sample data from current images" << endl;
    //for (int i = 0; i<20;i++)
    //{
      //  cout << "HUE: " << cvmGet(sampleHue,0,i) << endl;
      //  cout << "Saturation: " << cvmGet(sampleSat,0,i) << endl;
      //  cout << "Value: " << cvmGet(sampleVal,0,i) << endl;
    //}
    //traverse through the image one more time, divide the image in grids of
    // 500x500 pixels, and see how many pixels of water are in each grid. If
    // most of the pixels are labeled water, then mark all the other pixels
    // as water as well
    //int counter = 0;
    votesSum = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/2 + 70;
    y = 0;
    
    /***************Divide the picture in cells for filtering**********/
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i < boatFront->height/xDivisor; i++)
        {
            for(int j = 0; j < boatFront->width/yDivisor; j++)
            {
                cvmSet(resampleHue2,i,j,I[x+i][y+j].h);
                cvmSet(resampleSat2,i,j,I[x+i][y+j].s);
                cvmSet(resampleVal2,i,j,I[x+i][y+j].v);
                if(cvmGet(resampleHue2,i,j)==0 && cvmGet(resampleSat2,i,j)==255 && cvmGet(resampleVal2,i,j)==255)
                {
                    votesSum++;
                }
            }
        }
        if (votesSum > (((boatFront->height/xDivisor)*(boatFront->width/yDivisor))*(4/5)))
        {   
        // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water  
        // We might need to use other smaller quantities (like 5/6 maybe?)
            for (int i = 0; i < boatFront->height/xDivisor;i++)
            {
                for (int j = 0; j < boatFront->width/yDivisor; j++)
                {
                    row1 = x + i;
                    if (row1 > X-1)
                        row1 = X-1;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    I[row1][column1].h = 0;
                    I[row1][column1].s = 255;
                    I[row1][column1].v = 255;
                }
            }
        }
        else
        {   
        // If not water, eliminate all red pixels and turn those pixels
        // back to the original color
            for (int i = 0; i < boatFront->height/xDivisor;i++)
            {
                for (int j = 0; j < boatFront->width/yDivisor; j++)
                {
                    row1 = x + i;
                    if (row1 > X-1)
                        row1 = X-1;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    I[row1][column1].h = IBackUp[row1][column1].h;//255;//IBackUp[row1][column1].h;
                    I[row1][column1].s = IBackUp[row1][column1].s;//255;//IBackUp[row1][column1].s;
                    I[row1][column1].v = IBackUp[row1][column1].v;//255;//IBackUp[row1][column1].v;
                }
            }
        }
        y = y + boatFront->width/xDivisor;
        if (y > Y-1)
        {
            x = x + boatFront->height/yDivisor;
            y = 0;
        }
        votesSum = 0;
    }
    
    /********************Isolate obstacles************************/
    votesSum = 0;
    int paint = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/2 + 40;
    y = 0;
    xDiv = 40;
    yDiv = 40;
    /***************Divide the picture in cells for filtering**********/
    // Small pixel areas (noise) are going to be eliminated from the picture
    // living only the big obstacles
    while (x < X-2)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i < boatFront->height/xDiv; i++)
        {
            for(int j = 0; j < boatFront->width/yDiv; j++)
            {
                row1 = x + i;
                if (row1 > X-2)
                    row1 = X-2;
                column1 = y+j;
                if (column1 > Y-1)
                    column1 = Y-1;
                cvmSet(resampleHue2,i,j,I[row1][column1].h);
                cvmSet(resampleSat2,i,j,I[row1][column1].s);
                cvmSet(resampleVal2,i,j,I[row1][column1].v);
                if(cvmGet(resampleHue2,i,j)==0 && cvmGet(resampleSat2,i,j)==255 && cvmGet(resampleVal2,i,j)==255)
                {
                    votesSum++;
                }
            }
        }
        if (votesSum > (((boatFront->height/xDiv)*(boatFront->width/yDiv))*(4/5)))
        {   
        // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water  
        // We might need to use other smaller quantities (like 5/6 maybe?)
            for (int i = 0; i < boatFront->height/xDiv;i++)
            {
                for (int j = 0; j < boatFront->width/yDiv; j++)
                {
                    row1 = x + i;
                    if (row1 > X-2)
                        row1 = X-2;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    I[row1][column1].h = 0;
                    I[row1][column1].s = 255;
                    I[row1][column1].v = 255;
                }
            }
        }
        else
        {   
            int count = 0;
        // If not water, eliminate all red pixels and turn those pixels
        // back to the original color
            for (int i = 0; i < boatFront->height/xDiv;i++)
            {
                for (int j = 0; j < boatFront->width/yDiv; j++)
                {
                    row1 = x + i;
                    if (row1 > X-2)
                        row1 = X-2;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    I[row1][column1].h = IBackUp[row1][column1].h;//255;
                    I[row1][column1].s = IBackUp[row1][column1].s;//255;
                    I[row1][column1].v = IBackUp[row1][column1].v;//255;
                   // count++;
                }
            }
        }
        y = y + boatFront->width/yDiv;
        if (y > Y-1)
        {
            x = x + boatFront->height/xDiv;
            if (x > X-2)
                x = X-2;
            y = 0;
        }
        votesSum = 0;
    }
    
    /****************Find Obstacles boundaries*********************************/
    if( grayStorage == NULL )
    {
        grayStorage = cvCreateMemStorage(0);
    } 
    else 
    {
        cvClearMemStorage(grayStorage);
    }
    //backUpImage = cvCloneImage(boatFront);
    //IBackUp(backUpImage);
    //Ignore unused parts of the image and convert them to black
    for (int i=0; i<backUpImage->height;i++)
    {
        for (int j=0; j<backUpImage->width;j++)
        {
            if(i < backUpImage->height/2 + 70)
            {
                IBackUp[i][j].h = 0;
                IBackUp[i][j].s = 0;
                IBackUp[i][j].v = 0;
            }
            else
            {
                IBackUp[i][j].h = I[i][j].h;
                IBackUp[i][j].s = I[i][j].s;
                IBackUp[i][j].v = I[i][j].v;
            }
        }
    }
    //convert from HSV to RGB
    cvCvtColor(boatFront, boatFront, CV_HSV2BGR);
    cvCvtColor(backUpImage, backUpImage, CV_HSV2BGR);
    //do flood fill for obstacles
    cvFloodFill( backUpImage, seed_point, color, cvScalarAll(255), cvScalarAll(2), NULL, 8, NULL);
    //convert to to gray to do more obstacle segmentation
    cvCvtColor(backUpImage, grayImage, CV_BGR2GRAY);
    //convert to binary
    cvThreshold(grayImage, bwImage, 100, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
    //eliminate small unnecessary pixel areas
    //bwImage is a pointer, so no need to reuse findCountours
    int findCountours = bwareaopen_(bwImage, 100);          
    //find contours of obstacles in image
    cvFindContours(bwImage, grayStorage, &contours);
    cvZero( bwImage );                      //redraw clean contours
    for( CvSeq* c=contours; c!=NULL; c=c->h_next)
    {
        cvDrawContours(bwImage, c, cvScalarAll(255), cvScalarAll(255), 8);
        //ignore obstacles/contours with are less than 100 pixels or bigger than 100000 pixels
        if ((cvContourArea(c, CV_WHOLE_SEQ) >= 60) && (cvContourArea(c, CV_WHOLE_SEQ) <= 100000))
        {
            cout << "Contour area: " << cvContourArea(c, CV_WHOLE_SEQ) << endl;     //area in pixels                        
            //find the x,y coordinate of the center of a contour
            cvMoments(c, &moment, 0);
            //centroid/moment of the contour/obstacle
            cout << "Contour center in x,y: " << moment.m10/moment.m00 << ", " << moment.m01/moment.m00 << endl;
            //The distance formula calculated by plotting points is given by:
            // Xc/Zc = Xp-cc(1)/Fc(1)
            // Yc/Zc = Yp-cc(2)/Fc(2)
            //For boat one Yc = 4.5 inches = 0.0635 meters
            //These formulas only work for 640X480 images
            // x,y coordinates of the obstacle from the bottom center of the image
            //Ignore everything less than 0.3 meters apart (anything too close to the boat)
            zObstacleDistance = 5*(yObstacleDistance*619.33108)/(X - (moment.m01/moment.m00));
            xObstacleDistance = 5*zObstacleDistance*((moment.m10/moment.m00)-324.36738)/618.62586;
            //copy data to be published
            xWaypoint_msg.data = xObstacleDistance;
            zWaypoint_msg.data = zObstacleDistance;
            //publish data
            Xwaypoint_info_pub.publish(xWaypoint_msg);
  					Zwaypoint_info_pub.publish(zWaypoint_msg);
            //try to ignore obstacle that are too close. Robot shall tell operator if there is
            //a problem with a close by obstacle
            //obstacle distance
            obstacleDistance = sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2) + pow(zObstacleDistance,2));
            //Just use the 2D angle
            obstacleHeading = tan((zObstacleDistance/xObstacleDistance)*PI/180);
            cout << "Obstacle polar coordinates: " << endl;
            cout << "z: " << zObstacleDistance << " x: " << xObstacleDistance << endl;
            cout << "Distance (meters) " << obstacleDistance << endl;
            cout << "Direction (degrees): " << obstacleHeading << endl << endl;
        }
    }
    /**************************************************************************/
    //deal with memory management. How do I get read of the arrays and pointers I am not using inside the callback function??????
	  try
    {
  	  //fprintf(stderr,"\n boatFront\n");
      cvShowImage("Boat Front", backUpImage);
      //cvShowImage("Color Segment", backUpImage);
      //cvShowImage("Obstacles", bwImage);
    }
    catch (sensor_msgs::CvBridgeException& e)
    {
      ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
    }
}
Example #9
0
int icvStartSampleDistortion( const char* imgfilename, int bgcolor, int bgthreshold,
                              CvSampleDistortionData* data )
{
    memset( data, 0, sizeof( *data ) );
    data->src = cvLoadImage( imgfilename, 0 );
    if( data->src != NULL && data->src->nChannels == 1 
        && data->src->depth == IPL_DEPTH_8U )
    {
        int r, c;
        uchar* pmask;
        uchar* psrc;
        uchar* perode;
        uchar* pdilate;
        uchar dd, de;
        
        data->dx = data->src->width / 2;
        data->dy = data->src->height / 2;
        data->bgcolor = bgcolor;

        data->mask = cvCloneImage( data->src );
        data->erode = cvCloneImage( data->src );
        data->dilate = cvCloneImage( data->src );
            
        /* make mask image */
        for( r = 0; r < data->mask->height; r++ )
        {
            for( c = 0; c < data->mask->width; c++ )
            {
                pmask = ( (uchar*) (data->mask->imageData + r * data->mask->widthStep)
                        + c );
                if( bgcolor - bgthreshold <= (int) (*pmask) &&
                    (int) (*pmask) <= bgcolor + bgthreshold )
                {
                    *pmask = (uchar) 0;
                }
                else
                {
                    *pmask = (uchar) 255;
                }
            }
        }
            
        /* extend borders of source image */
        cvErode( data->src, data->erode, 0, 1 );
        cvDilate( data->src, data->dilate, 0, 1 );
        for( r = 0; r < data->mask->height; r++ )
        {
            for( c = 0; c < data->mask->width; c++ )
            {
                pmask = ( (uchar*) (data->mask->imageData + r * data->mask->widthStep)
                        + c );
                if( (*pmask) == 0 )
                {
                    psrc = ( (uchar*) (data->src->imageData + r * data->src->widthStep)
                           + c );
                    perode = 
                        ( (uchar*) (data->erode->imageData + r * data->erode->widthStep)
                                + c );
                    pdilate = 
                        ( (uchar*)(data->dilate->imageData + r * data->dilate->widthStep)
                                + c );
                    de = bgcolor - (*perode);
                    dd = (*pdilate) - bgcolor;
                    if( de >= dd && de > bgthreshold )
                    {
                        (*psrc) = (*perode);
                    }
                    if( dd > de && dd > bgthreshold )
                    {
                        (*psrc) = (*pdilate);
                    }
                }
            }
        }

        data->img = cvCreateImage( cvSize( data->src->width + 2 * data->dx,
                                           data->src->height + 2 * data->dy ),
                                   IPL_DEPTH_8U, 1 );
        data->maskimg = cvCloneImage( data->img );

        return 1;
    }

    return 0;
}
int main(int argc, char* argv[])
{
	// Set up variables
	CvPoint2D32f srcTri[3], dstTri[3];
	CvMat* rot_mat = cvCreateMat(2,3,CV_32FC1);
	CvMat* warp_mat = cvCreateMat(2,3,CV_32FC1);
	IplImage *src, *dst;
	const char* name = "Affine_Transform";

	// Load image
	src=cvLoadImage("airplane.jpg");
	dst = cvCloneImage( src );
	dst->origin = src->origin;
	cvZero( dst );
	cvNamedWindow( name, 1 );

	// Create angle and scale
	double angle = 0.0;
	double scale = 1.0;

	// Create trackbars
	cvCreateTrackbar( "Angle", name, &angle_switch_value, 4, switch_callback_a );
	cvCreateTrackbar( "Scale", name, &scale_switch_value, 4, switch_callback_s );

	// Compute warp matrix
	srcTri[0].x = 0;
	srcTri[0].y = 0;
	srcTri[1].x = src->width - 1;
	srcTri[1].y = 0;
	srcTri[2].x = 0;
	srcTri[2].y = src->height - 1;

	dstTri[0].x = src->width*0.0;
	dstTri[0].y = src->height*0.25;
	dstTri[1].x = src->width*0.90;
	dstTri[1].y = src->height*0.15;
	dstTri[2].x = src->width*0.10;
	dstTri[2].y = src->height*0.75;

	cvGetAffineTransform( srcTri, dstTri, warp_mat );
	cvWarpAffine( src, dst, warp_mat );
	cvCopy ( dst, src );

	while( 1 ) {
		switch( angleInt ){
			case 0:
				angle = 0.0;
				break;
			case 1:
				angle = 20.0;
				break;
			case 2:
				angle = 40.0;
				break;
			case 3:
				angle = 60.0;
				break;
			case 4:
				angle = 90.0;
				break;
		}
		switch( scaleInt ){
			case 0:
				scale = 1.0;
				break;
			case 1:
				scale = 0.8;
				break;
			case 2:
				scale = 0.6;
				break;
			case 3:
				scale = 0.4;
				break;
			case 4:
				scale = 0.2;
				break;
		}

		// Compute rotation matrix
		CvPoint2D32f center = cvPoint2D32f( src->width/2, src->height/2 );
		cv2DRotationMatrix( center, angle, scale, rot_mat );

		// Do the transformation
		cvWarpAffine( src, dst, rot_mat );

		cvShowImage( name, dst );

		if( cvWaitKey( 15 ) == 27 )
			break;
	}

	cvReleaseImage( &dst );
	cvReleaseMat( &rot_mat );
	cvReleaseMat( &warp_mat );

	return 0;
}
Example #11
0
int main(int argc, char** argv)
{
	IplImage* img = NULL;
	IplImage* dst = NULL;
	IplImage* dst2 = NULL;
	IplImage* dst3 = NULL;
	IplImage* dst4 = NULL;
	IplImage* dst5 = NULL;

	char* cha = "./temp/";//le dossier de base
	DIR* rep = NULL;
	struct dirent* fichierLu = NULL; /* Déclaration d'un pointeur vers la structure dirent. */
	struct dirent* repLu = NULL; /* Déclaration d'un pointeur vers la structure dirent. */
	char* ch;
	rep = opendir(cha);
	if (rep == NULL)
	{
		exit(1);
	}
	while((repLu = readdir(rep)) != NULL )
	{
		while (strcmp(repLu->d_name, ".") == 0 || strcmp(repLu->d_name, "..") == 0)
		{
			repLu = readdir(rep);
			printf("%s\n",repLu->d_name);
		}
		DIR* rep2 = NULL;
		ch = concat(cha,repLu->d_name);
		rep2 = opendir(ch);
		char* c = ch;
		c = concat(ch,"/");
		while ((fichierLu = readdir(rep2)) != NULL )
		{
			if (strcmp(fichierLu->d_name, ".") != 0 && strcmp(fichierLu->d_name, "..") != 0)
			{
				printf("Le fichier lu s'appelle %s\n", fichierLu->d_name);

				ch = concat(c,fichierLu->d_name);
				img = cvLoadImage( ch , CV_LOAD_IMAGE_COLOR);
				if (img == NULL)
				{
					printf("Oups j'ai eu un problème.\n");
					return -1;
				}

				IplImage *hsv;
				hsv = cvCloneImage(img);
				cvCvtColor(img, hsv, CV_BGR2HSV);
				IplImage *hsv2;
				hsv2 = cvCloneImage(img);
				cvCvtColor(img, hsv2, CV_BGR2HSV);
				IplImage *hsv3;
				hsv3 = cvCloneImage(img);
				cvCvtColor(img, hsv3, CV_BGR2HSV);

				IplImage *maskrouge;
				maskrouge = cvCreateImage(cvGetSize(img), img->depth, 1);
				IplImage *maskbleu;
				maskbleu = cvCreateImage(cvGetSize(img), img->depth, 1);
				IplImage *maskblanc;
				maskblanc = cvCreateImage(cvGetSize(img), img->depth, 1);

				int h1=0, s1=200, v1= 80;
				int h2=106, s2=138;
				int h3=350, s3=83;
				cvInRangeS(hsv, cvScalar(h1 -tolerance, s1 - tolerance, v1 - tolerance, 0.0), cvScalar(h1 + tolerance, s1 + tolerance, v1 + tolerance, 0.0), maskrouge);
				cvInRangeS(hsv2, cvScalar(h2 -tolerance, s2 - tolerance, 0, 0.0), cvScalar(h2 + tolerance, s2 + tolerance, 255, 0.0), maskbleu);
				cvInRangeS(hsv3, cvScalar(h3 -tolerance, s3 - tolerance, 0, 0.0), cvScalar(h3 + tolerance, s3 + tolerance, 255, 0.0), maskblanc);

				//IplConvKernel *kernel;
				//kernel = cvCreateStructuringElementEx(5, 5, 2, 2, CV_SHAPE_ELLIPSE, NULL);

				//cvDilate(maskrouge, maskrouge, kernel, 1);
				//cvDilate(maskbleu, maskbleu, kernel, 1);
				//cvDilate(maskblanc, maskblanc, kernel, 1);cvDilate(maskrouge, maskrouge, kernel, 1);
				//cvDilate(maskbleu, maskbleu, kernel, 1);
				//cvDilate(maskblanc, maskblanc, kernel, 1);
				//cvErode(maskrouge, maskrouge, kernel, 1);

				// affichage

				dst = cvCreateImage(cvSize( img->width / 6, img->height / 6 ), img->depth,img->nChannels );
				dst2 = cvCreateImage(cvSize( maskrouge->width / 6, maskrouge->height / 6 ), maskrouge->depth,maskrouge->nChannels );
				dst3 = cvCreateImage(cvSize( maskbleu->width / 6, maskbleu->height / 6 ), maskbleu->depth,maskbleu->nChannels );
				dst4 = cvCreateImage(cvSize( maskblanc->width / 6, maskblanc->height / 6 ), maskblanc->depth,maskblanc->nChannels );
				//dst5 = cvCreateImage(cvSize( hsv->width / 2, hsv->height / 2 ), hsv->depth, hsv->nChannels );

				cvResize(img, dst, CV_INTER_AREA );
				cvResize(maskrouge, dst2, CV_INTER_AREA );
				cvResize(maskbleu, dst3, CV_INTER_AREA );
				cvResize(maskblanc, dst4, CV_INTER_AREA );
				//cvResize(hsv, dst5, CV_INTER_AREA );

				cvShowImage("test1",dst);
				cvShowImage("test2",dst2);
				cvShowImage("test3",dst3);
				cvShowImage("test4",dst4);
				//cvShowImage("test5",dst5);
				cvMoveWindow("test1" ,0,0);
				cvMoveWindow("test2" ,dst->width,dst->height+60);
				cvMoveWindow("test3" ,dst->width,0);
				cvMoveWindow("test4" ,0,dst->height+60);
				//cvMoveWindow("test5" , 0, 0);

				cvWaitKey(0);

				cvReleaseImage(&maskrouge);
				cvReleaseImage(&maskbleu);
				cvReleaseImage(&maskblanc);
				cvReleaseImage(&hsv);
				cvReleaseImage(&hsv2);
				cvReleaseImage(&hsv3);
				cvReleaseImage(&img);
				cvReleaseImage(&dst);
				cvReleaseImage(&dst2);
				cvReleaseImage(&dst3);
				cvReleaseImage(&dst4);
				cvReleaseImage(&dst5);
				cvDestroyWindow("test1");
				cvDestroyWindow("test2");
				cvDestroyWindow("test3");
				cvDestroyWindow("test4");
				//cvDestroyWindow("test5");
			}
		}
	}


	if (closedir(rep) == -1)
		exit(-1);

	return 0;
}