コード例 #1
0
ファイル: Interpreter.cpp プロジェクト: jammy112/OCR-Math
vector< IplImage* > Interpreter::ExtractAllSymbolBlobs(IplImage* Source)
{
	/*
	* Source: Image to be ripped from
	*/

	vector< IplImage* > AllBlobImages;

	IplImage* LabelImage = cvCreateImage(cvGetSize(Source), IPL_DEPTH_LABEL, 1);

	CvBlobs Blobs;
	unsigned int Result = cvLabel(Source, LabelImage, Blobs);


	// Crop out all the symbols, yeah
	for (CvBlobs::const_iterator it = Blobs.begin(); it != Blobs.end(); ++it)
	{
		// AllBlobImages.push_back( ConvertToSquare( CropOutBlob(Source, it->second), 300) );
		AllBlobImages.push_back( CropOutBlob(Source, it->second) );
	}

	// This next part is going to be tricky. We have to take out all _important_ blobs. There are a couple ways to do this:
	// 1. By assuming that all symbols are going to be above a certain size - this gets rid of "noise"
	// 2. By keeping all pulled regions, and later discarding any with really low confidence values - possibly slower
	// ...
	// I'll sleep on it.
	return AllBlobImages;
}
コード例 #2
0
CvBlobs blobsCirculares(CvBlobs intBlobs){
	CvBlobs *OBlobs = new CvBlobs;
	int i = 0;
	//CvBlobs::const_iterator i = OBlobs->begin();
	double difMom;
	//double excentric;
	//CvBlob *blob= new CvBlob;

	for (CvBlobs::const_iterator it=intBlobs.begin(); it!=intBlobs.end(); ++it)
		{
			
			CvBlob *blob;
				blob = (*it).second;
				*blob = *it->second;
			//if ((it->second->m10-it->second->m01 < 5) && (it->second->u02-it->second->u20 < 5) && (it->second->u11 < 5) )
			//difMom = abs((blob->n02-blob->n20)/(blob->n02));
			difMom = abs((blob->n02-blob->n20)/((blob->n20)+(blob->n02)));
			//excentric = (((blob->u20)-(blob->u02))*((blob->u20)-(blob->u02)) + 4*(blob->u11))/(blob->m00);
			if ((difMom < 0.5) && (abs(blob->n11/((blob->n20)+(blob->n02))) < 0.4) )
			//if ((excentric < 1.2) && (excentric > 0.8) )
			{
				//OBlobs->insert(it,(*it).second);
				OBlobs->insert(CvLabelBlob(blob->label,blob));
			}
			
		}
	//delete blob;
	//delete OBlobs;
	return *OBlobs;
}
コード例 #3
0
ファイル: cvblob.cpp プロジェクト: panpeter90/GUI
  void cvRenderBlobs(const IplImage *imgLabel, CvBlobs &blobs, IplImage *imgSource, IplImage *imgDest, unsigned short mode, double alpha)
  {
    CV_FUNCNAME("cvRenderBlobs");
    __CV_BEGIN__;
    {

      CV_ASSERT(imgLabel&&(imgLabel->depth==IPL_DEPTH_LABEL)&&(imgLabel->nChannels==1));
      CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));

      Palete pal;
      if (mode&CV_BLOB_RENDER_COLOR)
      {

	unsigned int colorCount = 0;
	for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
	{
	  CvLabel label = (*it).second->label;

	  double r, g, b;

	  _HSV2RGB_((double)((colorCount*77)%360), .5, 1., r, g, b);
	  colorCount++;

	  pal[label] = CV_RGB(r, g, b);
	}
      }

      for (CvBlobs::iterator it=blobs.begin(); it!=blobs.end(); ++it)
	cvRenderBlob(imgLabel, (*it).second, imgSource, imgDest, mode, pal[(*it).second->label], alpha);
    }
    __CV_END__;
  }
コード例 #4
0
void blobs_arena(IplImage* filter,char ch)  
{
    //temp_blob to store the blobs.
    temp_blob=cvCreateImage(cvGetSize(filter),IPL_DEPTH_LABEL,1);
    //working with the blobs.
    CvBlobs blobs;
    cvLabel(filter, temp_blob, blobs);
    area=0;
    x=0;
    y=0;
    liftbox.centroid.x=10000;
    liftbox.centroid.y=10000;
    if(ch=='y')            
    cvFilterByArea(blobs, 250,650);
    else
    cvFilterByArea(blobs, 200,500);
    //cvRenderBlobs(temp_blob,blobs,input,input, CV_BLOB_RENDER_BOUNDING_BOX);
    for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
    {
           area= it->second->area;
           x= it->second->centroid.x;
           y= it->second->centroid.y;
           printf("\n arenaBlob # %d : Area= %d , Centroid=(%f,%f) \n",it->second->label, it->second->area, it->second->centroid.x, it->second->centroid.y);
           if( zoneDecider(it->second->centroid.x,it->second->centroid.y)==-1)
               if(find_Distance_pit( liftbox.centroid.x,liftbox.centroid.y) >  find_Distance_pit(x,y))
               {                 
                                 liftbox.centroid.x=x;
                                 liftbox.centroid.y=y;
                                 liftbox.color=ch;
            //cout<<"\ncentroid.x "<<liftbox.centroid.x<<"\t centroid.y "<<liftbox.centroid.y<<"\tcolor "<<liftbox.color<<"   distance  "<<liftbox.distance;
               }
     }
    cvReleaseBlobs(blobs);
    cvReleaseImage( &temp_blob );
}
コード例 #5
0
void MultiCursorAppCpp::detectHandGesture(CvBlobs blobs)
{
	INT blobID = 0;
	for (CvBlobs::const_iterator it = blobs.begin(); it != blobs.end(); ++it) {
		//
		// 未実装
		//
		++blobID;
	}
}
コード例 #6
0
  void cvFilterLabels(IplImage *imgIn, IplImage *imgOut, const CvBlobs &blobs)
  {
    CV_FUNCNAME("cvFilterLabels");
    __CV_BEGIN__;
    {
      CV_ASSERT(imgIn&&(imgIn->depth==IPL_DEPTH_LABEL)&&(imgIn->nChannels==1));
      CV_ASSERT(imgOut&&(imgOut->depth==IPL_DEPTH_8U)&&(imgOut->nChannels==1));

      int stepIn = imgIn->widthStep / (imgIn->depth / 8);
      int stepOut = imgOut->widthStep / (imgOut->depth / 8);
      int imgIn_width = imgIn->width;
      int imgIn_height = imgIn->height;
      int imgIn_offset = 0;
      int imgOut_width = imgOut->width;
      int imgOut_height = imgOut->height;
      int imgOut_offset = 0;
      if(imgIn->roi)
      {
	imgIn_width = imgIn->roi->width;
	imgIn_height = imgIn->roi->height;
	imgIn_offset = imgIn->roi->xOffset + (imgIn->roi->yOffset * stepIn);
      }
      if(imgOut->roi)
      {
	imgOut_width = imgOut->roi->width;
	imgOut_height = imgOut->roi->height;
	imgOut_offset = imgOut->roi->xOffset + (imgOut->roi->yOffset * stepOut);
      }

      char *imgDataOut=imgOut->imageData + imgOut_offset;
      CvLabel *imgDataIn=(CvLabel *)imgIn->imageData + imgIn_offset;

      for (unsigned int r=0;r<(unsigned int)imgIn_height;r++,
	  imgDataIn+=stepIn,imgDataOut+=stepOut)
      {
	for (unsigned int c=0;c<(unsigned int)imgIn_width;c++)
	{
	  if (imgDataIn[c])
	  {
	    if (blobs.find(imgDataIn[c])==blobs.end()) imgDataOut[c]=0x00;
	    else imgDataOut[c]=(char)0xff;
	  }
	  else
	    imgDataOut[c]=0x00;
	}
      }
    }
    __CV_END__;
  }
コード例 #7
0
ファイル: cvblob.cpp プロジェクト: panpeter90/GUI
  void cvFilterByLabel(CvBlobs &blobs, CvLabel label)
  {
    CvBlobs::iterator it=blobs.begin();
    while(it!=blobs.end())
    {
      CvBlob *blob=(*it).second;
      if (blob->label!=label)
      {
	delete blob;
	CvBlobs::iterator tmp=it;
	++it;
	blobs.erase(tmp);
      }
      else
	++it;
    }
  }
コード例 #8
0
//ubica un blob dado en otro conjunto de blobs.
//(POR AHORA ENCUENTRA SOLO EL MAS CERCANO, la idea es hacerlo más 
//robusto, por ejemplo con el sistema de puntajes.)
CvBlob ubicarBlob(CvBlob blobanterior, CvBlobs blobs){ 
	
	//inicializar objetos
	CvPoint centroideanterior;
	centroideanterior.x = blobanterior.centroid.x;
	centroideanterior.y = blobanterior.centroid.y;
	CvPoint centroide;
	CvBlob actual;
	
	//Lista de <label,blob>										//Probablemente esto sea innecesario, pero no pude recorrer los CvBlobs
	vector< pair<CvLabel, CvBlob*> > blobList;
    copy(blobs.begin(), blobs.end(), back_inserter(blobList));

	actual = *blobList[0].second;
	double distancia;
	double distanciaNueva;
	distancia = Distance2(centroideanterior.x,centroideanterior.y,actual.centroid.x,actual.centroid.y);
	int tamanio = blobList.size();

	//recorre todos los blobs y se queda con el que tiene el centroide más cerca
	for (int i = 0; i < tamanio; i++)
	{
		centroide.x = (*blobList[i].second).centroid.x;
		centroide.y = (*blobList[i].second).centroid.y;
		distanciaNueva = Distance2(centroideanterior.x,centroideanterior.y,centroide.x,centroide.y);
		if (distanciaNueva < distancia )
		{
			actual = (*blobList[i].second);
			distancia = distanciaNueva;
		}
	}

	//supongo que de un cuadro a otro el blob no se mueve más de 100 pixeles
	//Esto habría que acomodarlo (cuando estén implementadas las otras condiciones) para que
	//en vez de solo la distancia, evalúe si encontró el blob o el mismo desapareció.
	if (distancia < 100)
	{
		return actual;
	}else
	{
		return blobanterior;
	}
	

}
コード例 #9
0
ファイル: cvblob.cpp プロジェクト: walchko/tennis_ball
CvLabel cvGreaterBlob(const CvBlobs &blobs)
{
    CvLabel label=0;
    unsigned int maxArea=0;

    for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
    {
        CvBlob *blob=(*it).second;
        //if ((!blob->_parent)&&(blob->area>maxArea))
        if (blob->area>maxArea)
        {
            label=blob->label;
            maxArea=blob->area;
        }
    }

    return label;
}
コード例 #10
0
ファイル: cvblob.cpp プロジェクト: panpeter90/GUI
  CvLabel cvLargestBlob(const CvBlobs &blobs)
  {
    CvLabel label=0;
    unsigned int maxArea=0;

    for (CvBlobs::const_iterator it=blobs.begin();it!=blobs.end();++it)
    {
      CvBlob *blob=(*it).second;

      if (blob->area > maxArea)
      {
		label=blob->label;
		maxArea=blob->area;
      }
    }

    return label;
  }
コード例 #11
0
ファイル: cvblob.cpp プロジェクト: panpeter90/GUI
  void cvFilterByArea(CvBlobs &blobs, unsigned int minArea, unsigned int maxArea)
  {
    CvBlobs::iterator it=blobs.begin();
    while(it!=blobs.end())
    {
      CvBlob *blob=(*it).second;
      if ((blob->area<minArea)||(blob->area>maxArea))
      {
	cvReleaseBlob(blob);

	CvBlobs::iterator tmp=it;
	++it;
	blobs.erase(tmp);
      }
      else
	++it;
    }
  }
コード例 #12
0
//general blobs in arena and bot        
void blobs(IplImage* filter,char ch)  
{
    //temp_blob to store the blobs.
    temp_blob=cvCreateImage(cvGetSize(filter),IPL_DEPTH_LABEL,1);
    //working with the blobs.
    CvBlobs blobs;
    cvLabel(filter, temp_blob, blobs);
    area=0;
    x=0;
    y=0;
    //cvRenderBlobs(temp_blob,blobs,input,input, CV_BLOB_RENDER_BOUNDING_BOX);
    if(ch=='p' ||ch== 'o')
    {
        cvFilterByArea(blobs, 400,3000);
        for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
        {
              area= it->second->area;
               x= it->second->centroid.x;
               y= it->second->centroid.y;
               //printf("\n Blob # %d : Area= %d , Centroid=(%f,%f) (x,y):(%d,%d)",it->second->label, it->second->area, it->second->centroid.x, it->second->centroid.y,x,y);
        }
     }
    
    else
    {
        cvFilterByArea(blobs, 100,700);
        for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
        {
              area= it->second->area;
               x= it->second->centroid.x;
               y= it->second->centroid.y;
               if(zoneDecider(it->second->centroid.x,it->second->centroid.y)==1)
               {                 
                    puzzle[k].centroid.x=x;
                    puzzle[k].centroid.y=y;
                    puzzle[k].color=ch;
                    k++;
                printf("\n Blob # %d : Area= %d , Centroid=(%f,%f) \n",it->second->label, it->second->area, it->second->centroid.x, it->second->centroid.y);
                }
        }
     }
    cvReleaseBlobs(blobs);
    cvReleaseImage( &temp_blob );
}
int main(){
	Mat frame;
	Mat frame_small;
	Mat frame_hsv;

	VideoCapture cap;
	cap.open(0);

	namedWindow("Captured Frame", CV_WINDOW_AUTOSIZE);
	namedWindow("Detected Blobs", CV_WINDOW_AUTOSIZE);

	while (1){
		cap >> frame;
		resize(frame,frame_small,Size(),1,1,CV_INTER_AREA);
		cvtColor(frame_small, frame_small, CV_BGR2HSV);
		inRange(frame_small,Scalar(YELLOW_HUE_LOWER,50,50),Scalar(YELLOW_HUE_UPPER,255,255),frame_hsv);
		//frame_hsv = 255-frame_hsv;
		//resize(image_hsv,image_hsv,Size(),4,4,CV_INTER_AREA);
		Mat frame_bw = frame_hsv>128;
		IplImage image_bw = frame_bw;
		IplImage image_small = frame_small;
		//cvThreshold(&hsv_1, &hsv_1, 100, 200, CV_THRESH_BINARY);
		IplImage *labelImg = cvCreateImage(cvGetSize(&image_bw), IPL_DEPTH_LABEL, 1);
		CvBlobs blobs;
		unsigned int result = cvLabel(&image_bw, labelImg, blobs);
		for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
		{
  			// cout << "Blob #" << it->second->label << ": Area=" << it->second->area << ", Centroid=(" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
  			x[it->second->label] = 0.8*x[it->second->label]+0.2*it->second->centroid.x;
  			y[it->second->label] = 0.8*x[it->second->label]+0.2*it->second->centroid.y;
		}
		cvRenderBlobs(labelImg, blobs, &image_small, &image_small);
		cvShowImage("Detected Blobs", &image_small);
		imshow("Captured Frame", frame);
		if(waitKey(10)>=0) break;
		cout << "(x,y) => (" << x[1] << "," << y[1] <<"); (x,y) => (" << x[2] << "," << y[2] << ")\n";
	}
	return 0;
}
コード例 #14
0
ファイル: test.cpp プロジェクト: 553406268/cvblob
int main()
{
  IplImage *img = cvLoadImage("test.png", 1);

  cvSetImageROI(img, cvRect(100, 100, 800, 500));

  IplImage *grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
  cvCvtColor(img, grey, CV_BGR2GRAY);
  cvThreshold(grey, grey, 100, 255, CV_THRESH_BINARY);

  IplImage *labelImg = cvCreateImage(cvGetSize(grey),IPL_DEPTH_LABEL,1);

  CvBlobs blobs;
  unsigned int result = cvLabel(grey, labelImg, blobs);

  IplImage *imgOut = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); cvZero(imgOut);
  cvRenderBlobs(labelImg, blobs, img, imgOut);

  //unsigned int i = 0;

  // Render contours:
  for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
  {
    //cvRenderBlob(labelImg, (*it).second, img, imgOut);
    
    CvScalar meanColor = cvBlobMeanColor((*it).second, labelImg, img);
    cout << "Mean color: r=" << (unsigned int)meanColor.val[0] << ", g=" << (unsigned int)meanColor.val[1] << ", b=" << (unsigned int)meanColor.val[2] << endl;

    CvContourPolygon *polygon = cvConvertChainCodesToPolygon(&(*it).second->contour);

    CvContourPolygon *sPolygon = cvSimplifyPolygon(polygon, 10.);
    CvContourPolygon *cPolygon = cvPolygonContourConvexHull(sPolygon);

    cvRenderContourChainCode(&(*it).second->contour, imgOut);
    cvRenderContourPolygon(sPolygon, imgOut, CV_RGB(0, 0, 255));
    cvRenderContourPolygon(cPolygon, imgOut, CV_RGB(0, 255, 0));

    delete cPolygon;
    delete sPolygon;
    delete polygon;

    // Render internal contours:
    for (CvContoursChainCode::const_iterator jt=(*it).second->internalContours.begin(); jt!=(*it).second->internalContours.end(); ++jt)
      cvRenderContourChainCode((*jt), imgOut);

    //stringstream filename;
    //filename << "blob_" << setw(2) << setfill('0') << i++ << ".png";
    //cvSaveImageBlob(filename.str().c_str(), imgOut, (*it).second);
  }

  cvNamedWindow("test", 1);
  cvShowImage("test", imgOut);
  //cvShowImage("grey", grey);
  cvWaitKey(0);
  cvDestroyWindow("test");

  cvReleaseImage(&imgOut);
  cvReleaseImage(&grey);
  cvReleaseImage(&labelImg);
  cvReleaseImage(&img);

  cvReleaseBlobs(blobs);

  return 0;
}
コード例 #15
0
ファイル: obstacle_detection.cpp プロジェクト: p-kar/vision
int main()
{
	//INITIALIZE CAMERA
	HIDS hCam = 1;
	initializeCam(hCam);
	setImgMem(hCam);

	//Windows
	cvNamedWindow("Live",CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Threshed", CV_WINDOW_AUTOSIZE);

	while(1)
		{

			//Image Variables
			IplImage* frame=cvCreateImage(cvSize(752, 480), 8, 3);//fisheye image
			IplImage* img_hsv=cvCreateImage(cvSize(752, 480), 8, 3);//Image in HSV color space
			IplImage* threshy = cvCreateImage(cvSize(752, 480), 8, 1);
			IplImage* labelImg=cvCreateImage(cvSize(752, 480),IPL_DEPTH_LABEL,1);//Image Variable for blobs

			CvBlobs blobs;

			int xu, yu;		//coordinates of undistorted image
			int xd, yd;		//coordinates in distorted image

			//Getting the current frame
			getFrame(hCam, frame);
			//If failed to get break the loop
			if(!frame)
				break;

			cvCvtColor(frame,img_hsv,CV_BGR2HSV);
			//Thresholding the frame for yellow
			cvInRangeS(img_hsv, cvScalar(20, 100, 20), cvScalar(30, 255, 255), threshy);
			// cvInRangeS(img_hsv, cvScalar(0,0,0), cvScalar(16,255,255), threshy);
			// cvInRangeS(img_hsv, cvScalar(0,0,0),cvScalar(16,255,255), threshy);//cvScalar(0, 120, 40), cvScalar(255, 255, 255)
			//Filtering the frame
			cvSmooth(threshy,threshy,CV_MEDIAN,7,7);
			//Finding the blobs
			unsigned int result=cvLabel(threshy,labelImg,blobs);
			//Filtering the blobs
			cvFilterByArea(blobs,100,10000);
			//Rendering the blobs
			cvRenderBlobs(labelImg,blobs,frame,frame);

			for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
			{
				xd =(it->second->maxx + it->second->minx )/2;
				// yd =(it->second->maxy + it->second->miny )/2;
				yd =(it->second->maxy);

				cvCircle(frame,cvPoint(xd,yd),2,CV_RGB(255,0,0),3);
				
				xd = xd - 752/2;
				yd = -yd + 480/2;

				cout<<"\nnon-linear coords: xd="<<xd<<"     yd="<<yd<<endl;

				getLinearCoords(xd, yd, &xu, &yu);
				cout<<"\nlinear coords: x="<<xu<<"     y="<<yu<<endl;			
				// getPt(xu, yu,current.angle,current.focal,current.pix2cmy,current.s_view_compensation);
			}
			//Showing the images
			cvShowImage("Live",frame);
			cvShowImage("Threshed",threshy);

			char c=cvWaitKey(10);

			if(int(c) == 27)
				break;

			cvReleaseImage(&frame);
			cvReleaseImage(&threshy);
			cvReleaseImage(&img_hsv);
			cvReleaseImage(&labelImg);
		}

	//Cleanup
	cvDestroyAllWindows();
	exitCam(hCam);
	return 0;
}
コード例 #16
0
ファイル: cvtrack.cpp プロジェクト: chcbaram/BeagleBone
  void cvUpdateTracks(CvBlobs const &blobs, CvTracks &tracks, const double thDistance, const unsigned int thInactive, const unsigned int thActive)
  {
    CV_FUNCNAME("cvUpdateTracks");
    __CV_BEGIN__;

    unsigned int nBlobs = blobs.size();
    unsigned int nTracks = tracks.size();

    // Proximity matrix:
    // Last row/column is for ID/label.
    // Last-1 "/" is for accumulation.
    CvID *close = new unsigned int[(nBlobs+2)*(nTracks+2)]; // XXX Must be same type than CvLabel.

    try
    {
      // Inicialization:
      unsigned int i=0;
      for (CvBlobs::const_iterator it = blobs.begin(); it!=blobs.end(); ++it, i++)
      {
	AB(i) = 0;
	IB(i) = it->second->label;
      }

      CvID maxTrackID = 0;

      unsigned int j=0;
      for (CvTracks::const_iterator jt = tracks.begin(); jt!=tracks.end(); ++jt, j++)
      {
	AT(j) = 0;
	IT(j) = jt->second->id;
	if (jt->second->id > maxTrackID)
	  maxTrackID = jt->second->id;
      }

      // Proximity matrix calculation and "used blob" list inicialization:
      for (i=0; i<nBlobs; i++)
	for (j=0; j<nTracks; j++)
	  if (C(i, j) = (distantBlobTrack(B(i), T(j)) < thDistance))
	  {
	    AB(i)++;
	    AT(j)++;
	  }

      /////////////////////////////////////////////////////////////////////////////////////////////////////////////////
      // Detect inactive tracks
      for (j=0; j<nTracks; j++)
      {
	unsigned int c = AT(j);

	if (c==0)
	{
	  //cout << "Inactive track: " << j << endl;

	  // Inactive track.
	  CvTrack *track = T(j);
	  track->inactive++;
	  track->label = 0;
	}
      }

      // Detect new tracks
      for (i=0; i<nBlobs; i++)
      {
	unsigned int c = AB(i);

	if (c==0)
	{
	  //cout << "Blob (new track): " << maxTrackID+1 << endl;
	  //cout << *B(i) << endl;

	  // New track.
	  maxTrackID++;
	  CvBlob *blob = B(i);
	  CvTrack *track = new CvTrack;
	  track->id = maxTrackID;
	  track->label = blob->label;
	  track->minx = blob->minx;
	  track->miny = blob->miny;
	  track->maxx = blob->maxx;
	  track->maxy = blob->maxy;
	  track->centroid = blob->centroid;
	  track->lifetime = 0;
	  track->active = 0;
	  track->inactive = 0;
	  tracks.insert(CvIDTrack(maxTrackID, track));
	}
      }

      // Clustering
      for (j=0; j<nTracks; j++)
      {
	unsigned int c = AT(j);

	if (c)
	{
	  list<CvTrack*> tt; tt.push_back(T(j));
	  list<CvBlob*> bb;

	  getClusterForTrack(j, close, nBlobs, nTracks, blobs, tracks, bb, tt);

	  // Select track
	  CvTrack *track;
	  unsigned int area = 0;
	  for (list<CvTrack*>::const_iterator it=tt.begin(); it!=tt.end(); ++it)
	  {
	    CvTrack *t = *it;

	    unsigned int a = (t->maxx-t->minx)*(t->maxy-t->miny);
	    if (a>area)
	    {
	      area = a;
	      track = t;
	    }
	  }

	  // Select blob
	  CvBlob *blob;
	  area = 0;
	  //cout << "Matching blobs: ";
	  for (list<CvBlob*>::const_iterator it=bb.begin(); it!=bb.end(); ++it)
	  {
	    CvBlob *b = *it;

	    //cout << b->label << " ";

	    if (b->area>area)
	    {
	      area = b->area;
	      blob = b;
	    }
	  }
	  //cout << endl;

	  // Update track
	  //cout << "Matching: track=" << track->id << ", blob=" << blob->label << endl;
	  track->label = blob->label;
	  track->centroid = blob->centroid;
	  track->minx = blob->minx;
	  track->miny = blob->miny;
	  track->maxx = blob->maxx;
	  track->maxy = blob->maxy;
	  if (track->inactive)
	    track->active = 0;
	  track->inactive = 0;

	  // Others to inactive
	  for (list<CvTrack*>::const_iterator it=tt.begin(); it!=tt.end(); ++it)
	  {
	    CvTrack *t = *it;

	    if (t!=track)
	    {
	      //cout << "Inactive: track=" << t->id << endl;
	      t->inactive++;
	      t->label = 0;
	    }
	  }
	}
      }
      /////////////////////////////////////////////////////////////////////////////////////////////////////////////////

      for (CvTracks::iterator jt=tracks.begin(); jt!=tracks.end();)
	if ((jt->second->inactive>=thInactive)||((jt->second->inactive)&&(thActive)&&(jt->second->active<thActive)))
	{
	  delete jt->second;
	  tracks.erase(jt++);
	}
	else
	{
	  jt->second->lifetime++;
	  if (!jt->second->inactive)
	    jt->second->active++;
	  ++jt;
	}
    }
    catch (...)
    {
      delete[] close;
      throw; // TODO: OpenCV style.
    }

    delete[] close;

    __CV_END__;
  }
コード例 #17
0
ファイル: blobtest.cpp プロジェクト: Hefestos/cup2015
int main()
{
	int hmin = 0, hmax = 180;
	int smin = 0, smax = 255;
	int vmin = 0, vmax = 255;
	int amin = 0, amax = 1000;

	//Structure to get feed from CAM
	CvCapture* capture=cvCreateCameraCapture(1);
	//Structure to hold blobs
	CvBlobs blobsBlue;
	CvBlobs blobsYellow;

	//Windows
	
	
	cvNamedWindow("Live");
	cvNamedWindow("Level");
	cvResizeWindow("Level",400,400);
	
	cvCreateTrackbar("Hmin", "Level", &hmin, 180, NULL);
	cvCreateTrackbar("Hmax", "Level", &hmax, 180, NULL);
	cvCreateTrackbar("Smin", "Level", &smin, 255, NULL);
	cvCreateTrackbar("Smax", "Level", &smax, 255, NULL);
	cvCreateTrackbar("Vmin", "Level", &vmin, 255, NULL);
	cvCreateTrackbar("Vmax", "Level", &vmax, 255, NULL);
	cvCreateTrackbar("Amin", "Level", &amin, 1000, NULL);
	cvCreateTrackbar("Amax", "Level", &amax, 1000, NULL);

	
	//Image Variables
	IplImage *frame=cvCreateImage(cvSize(w,h),8,3);   //Original Image
	IplImage *hsvframe=cvCreateImage(cvSize(w,h),8,3);//Image in HSV color space
	IplImage *labelImgYellow=cvCreateImage(cvSize(w,h),IPL_DEPTH_LABEL,1);//Image Variable for blobs
	IplImage *labelImgBlue=cvCreateImage(cvSize(w,h),IPL_DEPTH_LABEL,1);//Image Variable for blobs
	IplImage *threshYellow=cvCreateImage(cvSize(w,h),8,1); //Threshold image of yellow color
	IplImage *threshBlue=cvCreateImage(cvSize(w,h),8,1); //Threshold image of blue color
	//IplImage *threshYellow=cvCreateImage(cvSize(w,h),8,1); //Threshold image of yellow color
 
	//Getting the screen information
	int screenx = 1600;
	int screeny = 900;
 
	while(1)
	{
		//Getting the current frame
		IplImage *fram=cvQueryFrame(capture);
		//If failed to get break the loop
		if(!fram)
			break;
		//Resizing the capture
		cvResize(fram,frame,CV_INTER_LINEAR );
		//Flipping the frame
		cvFlip(frame,frame,1);
		//Changing the color space
		cvCvtColor(frame,hsvframe,CV_BGR2HSV);
		//Thresholding the frame for yellow
		cvInRangeS(hsvframe,cvScalar(hmin,smin,vmin),cvScalar(hmax,smax,vmax),threshBlue);
		cvInRangeS(hsvframe,cvScalar(23,41,133),cvScalar(40,150,255),threshYellow);
		//cvInRangeS(hsvframe,cvScalar(153,102,76),cvScalar(178,255,255),threshYellow);

		//Filtering the frame
		cvSmooth(threshYellow,threshYellow,CV_MEDIAN,7,7);
		cvSmooth(threshBlue,threshBlue,CV_MEDIAN,7,7);



		//Finding the blobs
		unsigned int resultYellow=cvLabel(threshYellow,labelImgYellow,blobsYellow);
		unsigned int resultBlue=cvLabel(threshBlue,labelImgBlue,blobsBlue);
		//Filtering the blobs
		cvFilterByArea(blobsYellow,amin,amax);
		cvFilterByArea(blobsBlue,amin,amax);

		cvNamedWindow("FilterYellow");
		cvShowImage("FilterYellow",threshYellow);
		//cvResizeWindow("FilterYellow",400,400);

		cvNamedWindow("FilterBlue");
		cvShowImage("FilterBlue",threshBlue);
		//cvResizeWindow("FilterBlue",400,400);
		//Rendering the blobs
		//cvRenderBlobs(labelImgYellow,blobsYellow,frame,frame);
	
		for (CvBlobs::const_iterator it=blobsYellow.begin(); it!=blobsYellow.end(); ++it)
		{
			double moment10 = it->second->m10;
			double moment01 = it->second->m01;
			double area = it->second->area;
			//Variable for holding position
			int x1;
			int y1;
			//Calculating the current position
			x1 = moment10/area;
			y1 = moment01/area;
			//Mapping to the screen coordinates
			//int x=(int)(x1*screenx/w);
			//int y=(int)(y1*screeny/h);
			//Printing the position information
			cout<<"Yellow X: "<<x1<<" Yellow Y: "<<y1<<endl;
		}

		for (CvBlobs::const_iterator it=blobsBlue.begin(); it!=blobsBlue.end(); ++it)
		{
			double moment10 = it->second->m10;
			double moment01 = it->second->m01;
			double area = it->second->area;
			//Variable for holding position
			int x1;
			int y1;
			//Calculating the current position
			x1 = moment10/area;
			y1 = moment01/area;
			//Mapping to the screen coordinates
			//int x=(int)(x1*screenx/w);
			//int y=(int)(y1*screeny/h);
			//Printing the position information
			cout<<"Blue X: "<<x1<<" Blue Y: "<<y1<<endl;
		}
		//Showing the images
		cvShowImage("Live",frame);
		//Escape Sequence
		char c=cvWaitKey(33);
		if(c==27)
		break;
	}
	//Cleanup
	cvReleaseCapture(&capture);
	//cvDestroyAllWindows();
 
}
コード例 #18
0
// -------------------------------------------------------------------------------------------------
tResult ObjectFilter::processImage(IMediaSample* sample) {
// -------------------------------------------------------------------------------------------------
  // Check if the sample is valid
  RETURN_IF_POINTER_NULL(sample);
  
  // Create the new image sample to transmit at the end
  cObjectPtr<IMediaSample> image_sample;
  RETURN_IF_FAILED(_runtime->CreateInstance(OID_ADTF_MEDIA_SAMPLE, IID_ADTF_MEDIA_SAMPLE, (tVoid**) &image_sample));
  RETURN_IF_FAILED(image_sample->AllocBuffer(output_format_.nSize));
  
  // Initialize the data buffers
  const tVoid* source_buffer;
  tVoid* dest_buffer;
  
  std::vector<Object> objects;
  std::vector<Object> mapped_objects;
  
  if (IS_OK(sample->Lock(&source_buffer))) {
    if (IS_OK(image_sample->WriteLock(&dest_buffer))) {
      int source_width = input_format_.nWidth;
      int source_height = input_format_.nHeight;

      // Create the source image matrix    
      Mat source_image(source_height, source_width, CV_8UC2, (uchar*)source_buffer);
      
      // Retrieve the actual depth image
      Mat source_channels[2];
      split(source_image, source_channels);
      Mat depth_image = source_channels[1];
      
      // Retrieve the base image
      Mat base_image = imread(GetPropertyStr("basePath"), 0);
      
      int base_threshold = GetPropertyInt("diffThreshold");
      int white_threshold = GetPropertyInt("whiteThreshold");
      
      for (int i = 0; i < depth_image.rows; i++) {
        for (int j = 0; j < depth_image.cols; j++) {
          // Merge white and black noise and substract from base  
          if (depth_image.at<uchar>(i,j) >= white_threshold) depth_image.at<uchar>(i,j) = 0;
          
          // Substract the base image from the actual image
          int grey_diff = depth_image.at<uchar>(i,j) - base_image.at<uchar>(i,j);
          if (depth_image.at<uchar>(i,j) == 0 || abs(grey_diff) < base_threshold) {
            depth_image.at<uchar>(i,j) = 0;
          } else if (i >= 113 && i <= 130) {
            depth_image.at<uchar>(i,j) = depth_image.at<uchar>(i-1, j);
          }
        }
      }
      
      // Create objects used for blob detection
      Mat blob_image;
      cvtColor(depth_image, blob_image, COLOR_GRAY2BGR);
      CvBlobs blobs;
      IplImage *blob_label = cvCreateImage(cvSize(depth_image.cols, depth_image.rows), IPL_DEPTH_LABEL, 1);
      IplImage ipl_depth_image = depth_image;
      IplImage ipl_blob_image = blob_image;

      cvLabel(&ipl_depth_image, blob_label, blobs);
      cvFilterByArea(blobs, GetPropertyInt("minBlobSize"), GetPropertyInt("maxBlobSize"));
      
      for (CvBlobs::iterator i = blobs.begin(); i != blobs.end(); i++) {
        // Retrieve the blob data
        int minx = i->second->minx;
        int miny = i->second->miny;
        int maxx = i->second->maxx;
        int maxy = i->second->maxy;
        int width = i->second->maxx - i->second->minx;
        int height = i->second->maxy - i->second->miny;
        
        // Add blob object
        /*Vector2 min_point(2 * minx - GetPropertyInt("OffsetHor"), 2 * miny);
        Vector2 max_point(2 * maxx - GetPropertyInt("OffsetHor"), 2 * maxy);
        Vector2 source_scale(2 * source_width, 2 * source_height);
        
        min_point = PointTransformer::map_to_aerial_view(min_point);
        max_point = PointTransformer::map_to_aerial_view(max_point);
        source_scale = PointTransformer::map_to_aerial_view(source_scale);
        Object cur_mapped(
          min_point.get_x(), min_point.get_y(), 
            (max_point.get_x() - min_point.get_x()) * GetPropertyFloat("ScaleWidth"), (max_point.get_y() - min_point.get_y()) * GetPropertyFloat("ScaleHeight"), 
              source_scale.get_x(), source_scale.get_y());
        mapped_objects.push_back(cur_mapped);*/
          
        Object cur(
          2 * minx - GetPropertyInt("OffsetHor"), 2 * miny,
            2 * width * GetPropertyFloat("ScaleWidth"), 2* height * GetPropertyFloat("ScaleHeight"),
              2 * source_width, 2 * source_height);
        objects.push_back(cur);
        
        if (cur.get_relative_height() <= cur.get_relative_width() * 1.2) {
          Vector2 cur_origin(cur.get_absolute_x(), cur.get_absolute_y() + cur.get_absolute_height());
          Vector2 cur_max(cur.get_absolute_x() + cur.get_absolute_width(), cur.get_absolute_y() + cur.get_absolute_height());
          Vector2 cur_source(cur.get_absolute_x() + cur.get_absolute_width(), cur.get_absolute_y() + cur.get_absolute_height());
          cur_origin = PointTransformer::map_to_aerial_view(cur_origin);
          cur_max = PointTransformer::map_to_aerial_view(cur_max);
          cur_source = PointTransformer::map_to_aerial_view(cur_source);
          
          int obj_width = cur_max.get_x() - cur_origin.get_x();
          int obj_height = cur_max.get_y() - cur_max.get_y();
          Object cur_mapped(cur_origin.get_x(), cur_origin.get_y(), obj_width, obj_height, cur_source.get_y(), cur_source.get_x());
          mapped_objects.push_back(cur_mapped);
       }
      }
 
      // Render the blobs into the image to be transmitted
      cvRenderBlobs(blob_label, blobs, &ipl_blob_image, &ipl_blob_image);
      
      // Copy the blobbed image into the destination buffer
      int output_height = output_format_.nHeight;
      int output_width = output_format_.nWidth;
      resize(blob_image, blob_image, Size(output_width, output_height));
      memcpy((uchar*)dest_buffer, (uchar*)blob_image.data, 3 * output_height * output_width);
      
      // Release the images used for blobbing
      cvReleaseImage(&blob_label);
      cvReleaseBlobs(blobs);
      
      image_sample->Unlock(dest_buffer);
    }
    
    image_sample->SetTime(sample->GetTime());
    sample->Unlock(source_buffer);
  }
  
  // Transmit the blobs via the object list output pin
  transmitObjects(objects, object_output_pin_);
  transmitObjects(mapped_objects, mapped_object_output_pin_);
  
  RETURN_IF_FAILED(video_output_pin_.Transmit(image_sample));
  
  RETURN_NOERROR;
}
コード例 #19
0
ファイル: goal_detect.cpp プロジェクト: p-kar/vision
int main()
{

	//INITIALIZE CAMERA
    HIDS hCam = 1;
    initializeCam(hCam);
    setImgMem(hCam);

	while(1)
	{

		IplImage* frame=cvCreateImage(cvSize(752,480), 8, 3);					//Captured Frame
		IplImage* img_hsv=cvCreateImage(cvSize(752, 480), 8, 3);				//Image in HSV color space
		IplImage* threshy = cvCreateImage(cvSize(752, 480), 8, 1);				//Threshed Image
		IplImage* labelImg=cvCreateImage(cvSize(752, 480),IPL_DEPTH_LABEL,1);	//Image Variable for blobs

		int xd1, xd2, yd1, yd2;				//Goalpost Coordinates

		//Getting the current frame
		getFrame(hCam, frame);
		//If failed to get break the loop
		if(!frame)
			break;

		cvCvtColor(frame,img_hsv,CV_BGR2HSV);					
		//Thresholding the frame for yellow
		// cvInRangeS(img_hsv, cvScalar(20, 100, 20), cvScalar(30, 255, 255), threshy);					
		cvInRangeS(img_hsv, cvScalar(0, 120, 100), cvScalar(255, 255, 255), threshy);
		//Filtering the frame - subsampling??
		cvSmooth(threshy,threshy,CV_MEDIAN,7,7);

		CvBlobs blobs;

		//Finding the blobs
		unsigned int result = cvLabel(threshy,labelImg,blobs);
		//Filtering the blobs
		cvFilterByArea(blobs,100,10000);
		//Rendering the blobs
		cvRenderBlobs(labelImg,blobs,frame,frame);

		for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
		{			
			xd1 = it->second->minx;
			yd1 = it->second->maxy;

			xd2 = it->second->maxx;
			yd2 = it->second->maxy;

			cvCircle(frame,cvPoint(xd1,yd1),2,CV_RGB(255,0,0),3);
			cvCircle(frame,cvPoint(xd2,yd2),2,CV_RGB(255,0,0),3);

			xd1 = xd1 - 752/2;
			yd1 = -yd1 + 480/2;

			xd2 = xd2 - 752/2;
			yd2 = -yd2 + 480/2;
			
			cout<<"\nnon-linear coords: xd1="<<xd1<<"     yd1="<<yd1;
			cout<<"\nnon-linear coords: xd2="<<xd2<<"     yd2="<<yd2;
			cout<<"\n\n\n\n";
		}

		//Showing the images
		cvShowImage("Live",frame);
		cvShowImage("Threshed",threshy);

		int c = cvWaitKey(10);

		if(c == 27)
			break;

		cvReleaseImage(&frame);
		cvReleaseImage(&threshy);
		cvReleaseImage(&img_hsv);
		cvReleaseImage(&labelImg);
	}

	//Cleanup
	cvDestroyAllWindows();
	exitCam(hCam);
	return 0;
}
コード例 #20
0
ファイル: viewer.cpp プロジェクト: Diegojnb/JdeRobot
void Viewer::createImageHSV(const colorspaces::Image& imageDepth)
{
	float r,g,b;

	imgHSV.create(imgOrig.size(), CV_8UC1);
	imgOrig.copyTo(imgHSV);

	IplImage *threshy=cvCreateImage(imgOrig.size(),8,1);

	for (int i=0;i< imgHSV.size().width*imgHSV.size().height; i++)
	{
		r = (float)(unsigned int)(unsigned char) imgOrig.data[i*3];
		g = (float)(unsigned int)(unsigned char) imgOrig.data[i*3+1];
		b = (float)(unsigned int)(unsigned char) imgOrig.data[i*3+2];

		const HSV* hsvData =  RGB2HSV_getHSV (r,g,b);

		if( hmax >= hsvData->H*DEGTORAD && hmin <= hsvData->H*DEGTORAD
				&& smax >= hsvData->S && smin <= hsvData->S
				&& vmax >= hsvData->V && vmin <=  hsvData->V )
		{
			threshy->imageData[i] = 1;
		}
		else
		{
			imgHSV.data[i*3] = imgHSV.data[i*3+1] = imgHSV.data[i*3+2] = 0;
			threshy->imageData[i] = 0;
		}


	}

	//Structure to hold blobs
	CvBlobs blobs;

	IplImage *iplOrig = new IplImage(imgOrig);

	if (mFrameBlob)
		cvReleaseImage(&mFrameBlob);
	mFrameBlob=cvCreateImage(imgOrig.size(),8,3);

	IplImage *labelImg=cvCreateImage(imgOrig.size(),IPL_DEPTH_LABEL,1);

	cvResize(iplOrig,mFrameBlob,CV_INTER_LINEAR );

	//Threshy is a binary image
	cvSmooth(threshy,threshy,CV_MEDIAN,7,7);

	//Finding the blobs
	// unsigned int result=cvLabel(threshy,labelImg,blobs); // Unused

	//Rendering the blobs
	cvRenderBlobs(labelImg,blobs,mFrameBlob,mFrameBlob);

	//Filter Blobs
	cvFilterByArea(blobs,500,5000);

	double area = 0.0;
	int x=0;
	int y=0;

	for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
	{
		//std::cout << "BLOB found: " << it->second->area  <<std::endl;

		double moment10 = it->second->m10;
		double moment01 = it->second->m01;

		if (it->second->area >= area)
		{
			area = it->second->area;
			x = moment10/area;
			y = moment01/area;
		}

	}

	std::cout << "Max BLOB: " << area << ": " << x << " , " << y  <<std::endl;

	//cvShowImage("Live",mFrameBlob);

	if (area != 0)
	{
		Eigen::Vector3d pixel;
		pixel(0) = x;
		pixel(1) = y;
		pixel(2) = 1.0;

		Eigen::Vector4d target;

		mCalibration->BackProjectWithDepth(pixel, imageDepth, target);

	}

	// Release and free memory
	delete(iplOrig);
	cvReleaseImage(&threshy);
	cvReleaseImage(&labelImg);

}
コード例 #21
0
int main()
{


//Structure to get feed from CAM
CvCapture* capture=cvCreateCameraCapture(0);

//Structure to hold blobs
CvBlobs blobs;
CvBlobs blobs2;

//Windows
cvNamedWindow("Live",CV_WINDOW_AUTOSIZE);
cvNamedWindow("Threshold Filter",CV_WINDOW_AUTOSIZE);
cvNamedWindow("Threshold Filter2",CV_WINDOW_AUTOSIZE);
cvNamedWindow("Control",CV_WINDOW_AUTOSIZE);
cvNamedWindow("Control_RCE",CV_WINDOW_AUTOSIZE);

//Image Variables
IplImage *frame=cvCreateImage(cvSize(w,h),8,3); //Original Image
IplImage *hsvframe=cvCreateImage(cvSize(w,h),8,3);//Image in HSV color space
IplImage *labelImg=cvCreateImage(cvSize(w,h),IPL_DEPTH_LABEL,1);//Image Variable for blobs
IplImage *threshy=cvCreateImage(cvSize(w,h),8,1); //Threshold image of yellow color
IplImage *labelImg2=cvCreateImage(cvSize(w,h),IPL_DEPTH_LABEL,1);//Image Variable for blobs2
IplImage *threshy2=cvCreateImage(cvSize(w,h),8,1); //Threshold image of skin color
IplImage *threshy3=cvCreateImage(cvSize(w,h),8,1); //Threshold image of combine



//Getting the screen information
int screenx = GetSystemMetrics(SM_CXSCREEN);
int screeny = GetSystemMetrics(SM_CYSCREEN);

//Variables for trackbars
int h1=23;int s1=229;int v1=8;
int h2=39;int s2=255;int v2=255;

//Variables for trackbars 2
int h3=70;int s3=62;int v3=0;
int h4=98;int s4=110;int v4=61;

//Variables for time counting
double beginTime=0;
double endTime=0;
double StopTime=0;
double beginTime2=0;
double endTime2=0;
double StopTime2=0;
double beginTime3=0;
double endTime3=0;
double StopTime3=0;

//variable to confirm double click
int clc=0;
bool doubleclick = false;

//variable to confirm long click
bool ToEnableLongClick = false;
bool longclick = false;


//Creating the trackbars
cvCreateTrackbar("Hue_1","Control",&h1,255,0);
cvCreateTrackbar("Hue_2","Control",&h2,255,0);
cvCreateTrackbar("Sat_1","Control",&s1,255,0);
cvCreateTrackbar("Sat_2","Control",&s2,255,0);
cvCreateTrackbar("Val_1","Control",&v1,255,0);
cvCreateTrackbar("Val_2","Control",&v2,255,0);

//Creating the trackbars 2
cvCreateTrackbar("Hue_1","Control_RCE",&h3,255,0);
cvCreateTrackbar("Hue_2","Control_RCE",&h4,255,0);
cvCreateTrackbar("Sat_1","Control_RCE",&s3,255,0);
cvCreateTrackbar("Sat_2","Control_RCE",&s4,255,0);
cvCreateTrackbar("Val_1","Control_RCE",&v3,255,0);
cvCreateTrackbar("Val_2","Control_RCE",&v4,255,0);


while(1)
{
	//Getting the current frame
	IplImage *fram=cvQueryFrame(capture);

	//Getting time in seconds
	double timestamp = (double)clock()/CLOCKS_PER_SEC;

	//If failed to get break the loop
	if(!fram)
	break;

	//Resizing the capture
	cvResize(fram,frame,CV_INTER_LINEAR );

	//Flipping the frame
	cvFlip(frame,frame,1);

	//Changing the color space
	cvCvtColor(frame,hsvframe,CV_BGR2HSV);

	//Thresholding the frame for yellow
	cvInRangeS(hsvframe,cvScalar(h1,s1,v1),cvScalar(h2,s2,v2),threshy);

	//Thresholding the fram for skin colour
	cvInRangeS(hsvframe,cvScalar(h3,s3,v3),cvScalar(h4,s4,v4),threshy2);


	//Filtering the frame
	cvSmooth(threshy,threshy,CV_MEDIAN,7,7);

	//Filtering the frame
	cvSmooth(threshy2,threshy2,CV_MEDIAN,7,7);

	//Finding the blobs
	unsigned int result=cvLabel(threshy,labelImg,blobs);

	//Finding the blobs 2
	unsigned int result2=cvLabel(threshy2,labelImg2,blobs2);

	//Rendering the blobs
	cvRenderBlobs(labelImg,blobs,frame,frame);

	//Rendering the blobs
	cvRenderBlobs(labelImg2,blobs2,frame,frame);

	
	//Filtering the blobs
	//cvFilterByArea(blobs,60,1000);

	//Filtering the blobs
	//cvFilterByArea(blobs2,60,1000);

	// Start capturing cursor (found cursor on screen)
	int x = 0;
	int y = 0;


	//time counting to enable click
	beginTime = timestamp;
	beginTime3 = timestamp;
	
	//calculate time span to enable left mouse click
	StopTime = beginTime - endTime;

	// Implement left mouse click event or disable long click
	if(StopTime > 0.3  && endTime >0 && clc == 0 )
	{
		//reset clc to enable left click again if long click enabled
		if( longclick)
		{
			ToEnableLongClick = false;
			longclick =  false;
			endTime3 = 0;
			//release long mouse click
			mouse_event(MOUSEEVENTF_LEFTUP, x, y, 0, 0);
			cout<<" Left_Up"<<endl;
			
		}

		else
		{
			//MouseClick Event
			mouse_event(MOUSEEVENTF_LEFTDOWN, x, y, 0, 0);
			mouse_event(MOUSEEVENTF_LEFTUP, x, y, 0, 0);
			cout<<" click "<<endl;
			endTime=0;
			clc = 1;
					
			endTime3 = timestamp;
			doubleclick = true;
			ToEnableLongClick = true;
		}

	}

	//stamp time for calculate double click event
	if(doubleclick)
	{
		beginTime2 = timestamp;
	}


	//counting for enable double clicks.
	StopTime2 = beginTime2 - endTime2; 
	
	//Double click event triggered 
	if(doubleclick && StopTime2 >0.3 && StopTime2 < 1.5 && endTime2 > 0)
	{
		//Double MouseClick Event
		mouse_event(MOUSEEVENTF_LEFTDOWN, x, y, 0, 0);
		mouse_event(MOUSEEVENTF_LEFTUP, x, y, 0, 0);
		mouse_event(MOUSEEVENTF_LEFTDOWN, x, y, 0, 0);
		mouse_event(MOUSEEVENTF_LEFTUP, x, y, 0, 0);
		cout<<" Double click "<<endl;
		endTime = 0;
		endTime2 = 0;
		endTime3 = 0;
		doubleclick = false;
		ToEnableLongClick = false;
		clc = 0; 
	}

	//disable double click condition
	if(StopTime2 >1.5 && doubleclick && endTime2 > 0)
	{
		endTime = 0;
		endTime2 = 0;
		doubleclick = false;
		clc = 0;		
	}

	//counting for long click condition
	StopTime3 = beginTime3 - endTime3;
	
	//enable long click 
	if(StopTime3 > 1.5 && endTime3 >0 && ToEnableLongClick)
	{
			mouse_event(MOUSEEVENTF_LEFTDOWN, x, y, 0, 0);
			cout<<" long click "<<endl;
			clc = 0;
			endTime2=0;
			doubleclick = false;
			ToEnableLongClick = false;
			longclick = true;
		
	}

	/*disable long click if time more than 4 second
	if(StopTime3 > 4 && longclick )
	{
		mouse_event(MOUSEEVENTF_LEFTUP, x, y, 0, 0);
		cout<<" release long click"<<endl;
		clc = 0;
		endTime3=0;
		longclick = false;
	}*/


	for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
	{
		//record time for first click
		endTime = timestamp;

		//reset clc to enable left click again if long click enabled
		if( ToEnableLongClick)
		{
			ToEnableLongClick = false;
			longclick =  false;
			endTime3 = 0;
			
			
		}

		//enable time for double click if first click is trigger
		if(doubleclick)
		{
			
			endTime2 = timestamp;
		}

		//disable double click if time elaspe more than 1.5 second
		float stopdoubleclick = endTime2 - endTime3;
		if(stopdoubleclick > 1.5 && doubleclick )
		{
			beginTime2 = 0;
			clc = 0;
			doubleclick = false;
		}

						
		double moment10 = it->second->m10;
		double moment01 = it->second->m01;
		double area = it->second->area;

		//Variable for holding position
		int x1;
		int y1;

		//Calculating the current position
		x1 = moment10/area;
		y1 = moment01/area;

		//Mapping to the screen coordinates
		x=(int)(x1*screenx/w);
		y=(int)(y1*screeny/h);

		//Printing the position information
		//cout<<"X-CursorPosition: "<<x<<" Y-CursorPosition: "<<y<<endl;

		//Moving the mouse pointer
		SetCursorPos(x,y);
		
	}

	
	for (CvBlobs::const_iterator ti=blobs2.begin(); ti!=blobs2.end(); ++ti)
		{
		double moment10_2 = ti->second->m10;
		double moment01_2 = ti->second->m01;
		double area2 = ti->second->area;

		//Variable for holding position
		int x2;
		int y2;

		//Calculating the current position
		x2 = moment10_2/area2;
		y2 = moment01_2/area2;

		//Mapping to the screen coordinates
		int x3=(int)(x2*screenx/w);
		int y3=(int)(y2*screeny/h);

		//trigger right mouse click event
		mouse_event(MOUSEEVENTF_RIGHTDOWN, x, y, 0, 0);
		mouse_event(MOUSEEVENTF_RIGHTUP, x, y, 0, 0);
		cout<<" Right click"<<endl;
		}


	//Showing the images
	cvShowImage("Live",frame);
	cvAnd(threshy,threshy2,threshy3);
	cvShowImage("Threshold Filter",threshy);
	cvShowImage("Threshold Filter2",threshy2);


	//Escape Sequence
	char c=cvWaitKey(33);
	if(c==27)
	break;
}

//Cleanup
cvReleaseCapture(&capture);
cvDestroyAllWindows();
}
コード例 #22
0
ファイル: main.cpp プロジェクト: EVMakers/ballbot
void process(Mat &img, Mat& out)
{
  print_time("get frame");
  // Convert to HSV
  Size size = img.size();
  Mat hsv(size, CV_8UC3);
  cvtColor(img, hsv, CV_BGR2HSV);

  if (display) imshow("hsv", hsv);

  // Filter by hue
  Mat mask(size, CV_8UC1);
  inRange(hsv, Scalar(0.11*255, 0.3*255, 0.20*255, 0),
	  Scalar(0.15*255, 1.00*255, 1.00*255, 0), mask);
  print_time("convert to hsv & threshold");

  if (display) {
    IplImage maskCopy = mask;
    cvShowImage("mask", &maskCopy);
  }

  // Clean up noise
  static Mat closeElement = getStructuringElement(MORPH_RECT, Size(21, 21));
  static Mat openElement = getStructuringElement(MORPH_RECT, Size(3, 3));
  open(mask, mask, openElement);
  close(mask, mask, closeElement);
  print_time("morphological ops");

  // Find blobs
  CvBlobs blobs;
  IplImage maskCopy = mask,
    imgCopy = img;
  IplImage *labelImg = cvCreateImage(size, IPL_DEPTH_LABEL, 1);
  cvLabel(&maskCopy, labelImg, blobs);
  cvRenderBlobs(labelImg, blobs, &imgCopy, &imgCopy);
  cvReleaseImage(&labelImg);
  print_time("find blobs");


  CvBlob *largest = NULL;
  // Print blobs
  // Find largest blob
  for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it) {
    if (verbose) {
      cout << "Blob #" << it->second->label;
      cout << ": Area=" << it->second->area;
      cout << ", Centroid=(" << it->second->centroid.x <<
	", " << it->second->centroid.y << ")" << endl;
    }

    if (largest == NULL || it->second->area > largest->area)
      largest = it->second;
  }

  if (largest != NULL) {
    // Distance to target
    double theta = (double)(largest->centroid.y - frame_height/2)
      * RADIANS_PER_PX,
      y = camera_height / tan(theta + camera_angle);
    // Angle/X offset to target
    double phi = (double)(frame_width/2 - largest->centroid.x)
      * RADIANS_PER_PX,
      x = -y * tan(phi);
    
    printf("Ball at x,y = %.2f, %.2f cm\n", x, y);
  } else {
    printf("No ball found\n");
  }

  out = mask;
}
コード例 #23
0
ファイル: new3.cpp プロジェクト: asachin16/IIT-Kanpur
void Grain_size()
{
	  IplImage* labelImg;
      imgOut=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,3);
      labelImg=cvCreateImage(cvGetSize(image),IPL_DEPTH_LABEL,1);
      CvBlobs blobs;
      cvLabel(grey, labelImg, blobs);
      cvFilterByArea(blobs,100,1000000);
      cvRenderBlobs(labelImg, blobs, image, imgOut,CV_BLOB_RENDER_COLOR);
      final_binary();
      cvShowImage("blobs",imgOut);
      cvShowImage("Binary_image",grey);
      cvWaitKey(0);  
	char win[] = "source";
    int i;
    CvRect rect = { 0, 0, image->width,image->height };
    CvMemStorage* storage;
    CvSubdiv2D* subdiv;
    IplImage* img;
    CvScalar active_facet_color, delaunay_color, voronoi_color, bkgnd_color;

    active_facet_color = CV_RGB( 255, 0, 0 );
    delaunay_color  = CV_RGB( 0,0,0);
    voronoi_color = CV_RGB(0, 180, 0);
    bkgnd_color = CV_RGB(255,255,255);

    img = cvCreateImage( cvGetSize(image), 8, 3 );
    cvSet( img, bkgnd_color, 0 );

    cvNamedWindow( win, 1 );

    storage = cvCreateMemStorage(0);
    subdiv = init_delaunay( storage, rect );
     int area;
     int x,y;
     //int i=0;
    
       for(CvBlobs::const_iterator it=blobs.begin();it!=blobs.end();++it)
    {
         area=it->second->area;
         //printf("%d\n",area);
		 x=(int)it->second->centroid.x;
		 y=(int)it->second->centroid.y; 
        // printf("%d \t %d \n",x[i],y[i]);
		 fprintf(fb,"%d \t %d \n",x,y);  
		CvPoint2D32f fp = cvPoint2D32f( x,y);

        locate_point( subdiv, fp, img, active_facet_color );
        cvShowImage( win, img );
        if( cvWaitKey( 100 ) >= 0 )
            break;

        cvSubdivDelaunay2DInsert( subdiv, fp );
        cvCalcSubdivVoronoi2D( subdiv );
        cvSet( img, bkgnd_color, 0 );
        draw_subdiv( img, subdiv, delaunay_color, voronoi_color );
        cvShowImage( win, img );

        if( cvWaitKey( 100 ) >= 0 )
            break;                    
    }  
    cvSet( img, bkgnd_color, 0 );
    paint_voronoi( subdiv, img );
    cvShowImage( win, img );
    cvSaveImage ("123.jpg",img);

    cvWaitKey(0);

    cvReleaseMemStorage( &storage );
}
コード例 #24
0
ファイル: calculate_alpha.cpp プロジェクト: p-kar/vision
int main()
{
	//Image Variables
	IplImage* img_hsv = cvCreateImage(cvSize(320, 240), 8, 3);					//Image in HSV color space
	IplImage* threshy = cvCreateImage(cvSize(320, 240), 8, 1);					//Threshed Image
	IplImage* labelImg = cvCreateImage(cvSize(320, 240), IPL_DEPTH_LABEL, 1);	//Image Variable for blobs
	IplImage* debug_console = cvCreateImage(cvSize(50,50), 8, 1);				//Display values across code

	CvBlobs blobs;
	int xu, yu;													//coordinates of undistorted image
	int xd, yd;													//coordinates in distorted image
	int detected_blobs;
	ifstream filenames ("filenames_objdis.txt");
	// ofstream constants ("acyut_constants_objdis.dat",ios::binary);
	double optimal_ax;
	double min_ax = 1000.0;
	for (ax = -4.0e-06; ax > -10.0e-06 ; ax-=0.1e-06)
	{
		if(filenames.eof())
			break;

		char filename[20];		
		// getline(filenames,filename);
		filenames>>filename;
		char motor_loc[3];
		int j=0;

		for(int i=0;;++i)										//get motor location from filename
		{
			if(filename[i]=='.')
				break;
			motor_loc[i]=filename[i];
		}			

		// string optimum_focal;
		// string pix2cmyf;
		// string angle_s;
		float optimum_f=0;
		int min=1000;
		float pix2cmy;
		int pixel_difference=0;
		float angle;
		int motor;

		IplImage* frame = cvLoadImage(filename);
		// cvAddS(frame, cvScalar(70,70,70), frame);
		cvCvtColor(frame,img_hsv,CV_BGR2HSV);				
		//Thresholding the frame for yellow
		//cvInRangeS(img_hsv, cvScalar(20, 100, 20), cvScalar(30, 255, 255), threshy);				
		cvInRangeS(img_hsv, cvScalar(0, 60, 100), cvScalar(255, 255, 255), threshy);
		//Filtering the frame - subsampling??
		cvSmooth(threshy,threshy,CV_MEDIAN,7,7);
		int counter = 0;

		while(counter!=1)
		{
			//Finding the blobs
			unsigned int result = cvLabel(threshy,labelImg,blobs);
			//Filtering the blobs
			cvFilterByArea(blobs,100,10000);
			//Rendering the blobs
			cvRenderBlobs(labelImg,blobs,frame,frame);
			cvShowImage("frame",frame);
			cvWaitKey(5);

			int i=0;
			for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
			{			
				xd = (it->second->maxx+it->second->minx)/2;
				yd = (it->second->maxy+it->second->miny)/2;
				xd = xd - 320/2;
				yd = -yd + 240/2;
				// cout<<"non-linear coords: xd="<<xd<<"     yd="<<yd<<endl;
				getLinearCoords(xd, yd, &xu, &yu);
				XU[i]=xu;
				YU[i]=yu;
				++i;
			}
			detected_blobs=i;

			if ( detected_blobs!=3 )							//if the number of blobs detected will be not equal to 3
				break;											//then the program will not consider to that instance
			int motor_pos=0;
			for(int k=0;k<3;++k)								//calculate motor_pos from motor_loc
			{
				int x;
				x=int(motor_loc[k]-'0');
				motor_pos=(motor_pos*10)+x;
			}
			motor=motor_pos;
			angle=((-512.0+motor_pos)/1024.0)*300.0;			//can depend on how you have attached the camera to the motor
																//basically required to calculate the angle at which IPM needs
			cout<<"\ndetected blobs :"<<detected_blobs;			//to be calculated
			cout<<"\nmotor_pos :"<<motor_pos;			
			cout<<"\n"<<"angle :"<<angle;					

			for(float f=10.0;f<1500.0;f+=0.01)					//calculate constants and optimal focal length
			{													//it scans for f from 10 - 1500 to check where
				for (int j=0;j<detected_blobs;++j)				//the deviation in pixel difference is minimum
					getPt(XU[j],YU[j],f,j,angle);				//for that value pix2cmy and side view compensation
																//constant is evaluated (assuming that the first blob
				// sort_Y(Y,3);									//is at a distance of 40cm from the camera)			
				for (int z=0;z<2;++z)
					diff[z]= abs(Y[z+1]-Y[z]);
				// sort_diff(diff);
				if (min>=abs(diff[1] - diff[0]))
				{
					min=abs(diff[1] - diff[0]);
					optimum_f=f;
					pixel_difference= abs(diff[1]);
				}
			}
			++counter;
			if(min==1000)
			break;
		}
		pix2cmy=40.0/pixel_difference;							//calculated based on the ASSUMPTION that individual blobs
																//are placed at a distance of 40 cm
		for(int h=0;h<detected_blobs;++h)
			getPt(XU[h],YU[h],optimum_f,h,angle);

		for(int l=0;l<3;++l)
			Y[l]=Y[l]*pix2cmy;
		
		// cout<<"\nOptimum Focal Length :"<<optimum_f;
		// cout<<"\nmin deviation :"<<min;
		// cout<<"\npix2cmy :"<<pix2cmy;
		// cout<<"\nDistance :";

		// for(int l=0;l<3;++l)
		// 	cout<<Y[l]<<"\t\t";

		// cout<<"\nDifference :";
		
		// for(int l=0;l<2;++l)
		// 	cout<<Y[l+1]-Y[l]<<"\t\t";
		
		// cout<<"\n\n\n";

		entry.motor_pos=motor;
		entry.angle=angle;
		entry.focal=optimum_f;
		entry.pix2cmy=pix2cmy;
		entry.s_view_compensation=-(Y[2]-40.0);

  //       if(prev<optimum_f)										//ASSUMPTION that overall the net f values are increasing
  //       {
  //       	constants.write((char*)&entry,sizeof(entry));		//writing to file
		// 	prev=optimum_f;
		// }
		cvZero(frame);
		cvZero(img_hsv);
		cvZero(threshy);
		cvZero(debug_console);
		frame = cvLoadImage(filename);
		// cvAddS(frame, cvScalar(70,70,70), frame);
		cvCvtColor(frame,img_hsv,CV_BGR2HSV);				
		//Thresholding the frame for yellow
		cvInRangeS(img_hsv, cvScalar(20, 100, 20), cvScalar(30, 255, 255), threshy);				
		// cvInRangeS(img_hsv, cvScalar(0, 60, 100), cvScalar(255, 255, 255), threshy);
		//Filtering the frame - subsampling??
		cvSmooth(threshy,threshy,CV_MEDIAN,7,7);
		//Finding the blobs
		unsigned int result = cvLabel(threshy,labelImg,blobs);
		//Filtering the blobs
		cvFilterByArea(blobs,100,10000);
		//Rendering the blobs
		cvRenderBlobs(labelImg,blobs,frame,frame);
		for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
		{			
			xd = (it->second->maxx+it->second->minx)/2;
			yd = (it->second->maxy+it->second->miny)/2;
			xd = xd - 320/2;
			yd = -yd + 240/2;
			// cout<<"non-linear coords: xd="<<xd<<"     yd="<<yd<<endl;
			getLinearCoords(xd, yd, &xu, &yu);
			getPt(xu,yu,entry.focal,0,entry.angle);
			Y[0] = Y[0]*entry.pix2cmy + entry.s_view_compensation;
			if(abs(Y[0] - 250.0) < min_ax)
			{
				optimal_ax = ax;
				min_ax = abs(Y[0] - 250.0);
			}
		}
		CvFont font;
		cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5);
		char A[500];
		sprintf(A,"CURRENT ax : %lf\nOPTIMAL ax : %lf\nMIN_AX : %lf\n\nOPTIMAL FOCAL : %f\nANGLE : %f\nMOTOR POS : %d\nPIX2CMY : %f\n",
				ax,optimal_ax,min_ax,entry.focal,entry.angle,entry.motor_pos,entry.pix2cmy);
		cvPutText(debug_console, A, cvPoint(10,10), &font, cvScalar(255));
		cvShowImage("debug_console",debug_console);
		cvWaitKey(5);
	}
	cvWaitKey();
	// constants.close();
	filenames.close();
	// converttotext();	
	return 0;
}
コード例 #25
0
void processVideo(char* videoFilename) {
    //create the capture object
    IplImage *labelImg;//foreground
    CTracker openTracker((float)0.033, (float)0.6, (double)20.0, 10, 3000);
    CvTracks tracks;
    VideoCapture capture(videoFilename);
    if(!capture.isOpened()){
        //error in opening the video input
        cerr << "Unable to open video file: " << videoFilename << endl;
        exit(EXIT_FAILURE);
    }
    bool bInitialized = false;
    //read input data. ESC or 'q' for quitting
    while( (char)keyboard != 'q' && (char)keyboard != 27 ){
    //read the current frame
    if(!capture.read(frame)) {
        cerr << "Unable to read next frame." << endl;
        cerr << "Exiting..." << endl;
        exit(EXIT_FAILURE);
    }

    if(bInitialized==false)
    {
        cv::Size frameSize(static_cast<int>(frame.cols), static_cast<int>(frame.rows)); 					
        labelImg = cvCreateImage(frameSize, IPL_DEPTH_LABEL, 1);         
        bInitialized = true;
    }
    //update the background model
    pMOG2.operator ()(frame,fgMaskMOG2);

    //open operator. 
    cv::erode(fgMaskMOG2,fgMaskMOG2,cv::Mat(),cv::Point(-1,-1),1);
    cv::dilate(fgMaskMOG2,fgMaskMOG2,cv::Mat(),cv::Point(-1,-1),4);

    // step 2::blob analysis
    CvBlobs blobs;          
    unsigned int result = cvLabel(&(IplImage)fgMaskMOG2, labelImg, blobs);          
    cvFilterByArea(blobs, 125, 10000);
    cvRenderBlobs(labelImg, blobs, &(IplImage)frame, &(IplImage)frame, CV_BLOB_RENDER_BOUNDING_BOX);
    //cvUpdateTracks(blobs, tracks, 200, 5);
    //cvRenderTracks(tracks, &(IplImage)frame, &(IplImage)frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_LOG);
   
    //convert the blobs into detection structure;
    vector<Detection*> detections;
    for (CvBlobs::const_iterator it=blobs.begin();it!=blobs.end();++it)
    {
      CvBlob *blob=(*it).second;
      Detection *_detection = new Detection;
      _detection->centroid.x= blob->centroid.x;
      _detection->centroid.y= blob->centroid.y;
      _detection->brect.x  = blob->minx;
      _detection->brect.y  = blob->miny;
      _detection->brect.height = blob->maxy - blob->miny;
      _detection->brect.width  = blob->maxx - blob->minx;
      detections.push_back(_detection);
    }

    //Step 3 : give the list of all centroids of all detected contours to tracker. Track return the trace of the track, whose values are Kalman-filtered
    if(blobs.size() > 0)
    {			
        openTracker.Update(detections);
        int i, j;
        for(i=0; i < openTracker.tracks.size(); i++)
        {
        //add a threshold to de-noise, if the contour just appeared, maybe noise. set a threshold
        if(openTracker.tracks[i]->trace.size() > 10)
        {
            for(j = 0; j < (openTracker.tracks[i]->trace.size() - 2); j++)
            {
            cv::rectangle(frame, openTracker.tracks[i]->brect, Scalar(255,0,0));
            //line(fore, openTracker.tracks[i]->trace[j], openTracker.tracks[i]->trace[j+1], Colors[openTracker.tracks[i]->track_id % 9], 1, CV_AA);							
            line(frame, openTracker.tracks[i]->trace[j], openTracker.tracks[i]->trace[j+1], Scalar(255,0,0), 1, CV_AA);
            }

        stringstream ss;
        ss << openTracker.tracks[i]->track_id;
        string str = ss.str();
		
        putText(frame, str, openTracker.tracks[i]->trace[j], FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255,0,0), 1);
      }
        }
    }
    //get the frame number and write it on the current frame
    stringstream ss;
    rectangle(frame, cv::Point(10, 2), cv::Point(100,20),cv::Scalar(255,255,255), -1);
    //show the current frame and the fg masks
    imshow("Frame", frame);
    imshow("FG Mask MOG 2", fgMaskMOG2);
    //get the input from the keyboard
    keyboard = waitKey( 30 );
    }
    //delete capture object
    capture.release();
    cvReleaseImage(&labelImg); 

}
コード例 #26
0
int main(int argc, char** argv)
{
  CvTracks tracks;
  
  /*drawing*/
  CvPoint pt3;
  /*colors*/
  CvScalar green = CV_RGB(0,255,0);

  cvNamedWindow("red_object_tracking", CV_WINDOW_AUTOSIZE);

  CvCapture* capture;
  capture = cvCreateFileCapture (argv[1]);
  assert( capture != NULL );
  IplImage *img = cvQueryFrame( capture );

  CvSize imgSize = cvGetSize(img);

  IplImage *frame = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);

  IplConvKernel* morphKernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL);

  //unsigned int frameNumber = 0;
  unsigned int blobNumber = 0;

  bool quit = false;
  while (!quit)
  {
    IplImage *img = cvQueryFrame( capture );

    cvConvertScale(img, frame, 1, 0);

    IplImage *segmentated = cvCreateImage(imgSize, 8, 1);
    
    // Detecting red pixels:
    // (This is very slow, use direct access better...)
    for (unsigned int j=0; j<imgSize.height; j++)
      for (unsigned int i=0; i<imgSize.width; i++)
      {
		CvScalar c = cvGet2D(frame, j, i);

		double b = ((double)c.val[0])/255.;
		double g = ((double)c.val[1])/255.;
		double r = ((double)c.val[2])/255.;
		unsigned char f = 255*((r<0.15)&&(b<0.15)&&(g<0.15));
		//unsigned char f = 255*((r>0.5+g)&&(r>0.5+b));

		cvSet2D(segmentated, j, i, CV_RGB(f, f, f));
      }

    cvMorphologyEx(segmentated, segmentated, NULL, morphKernel, CV_MOP_OPEN, 1);

    cvShowImage("segmentated", segmentated);

    IplImage *labelImg = cvCreateImage(cvGetSize(frame), IPL_DEPTH_LABEL, 1);

    CvBlobs blobs;
    unsigned int result = cvLabel(segmentated, labelImg, blobs);
    cvFilterByArea(blobs, 50, (frame->width*frame->height)/30);
    //cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX);
    //cvUpdateTracks(blobs, tracks, 200., 5);
    //cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);

    int l = 0;
    for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it, l++)
	{
	//define marker corners
	pt3 = cvPoint(it->second->centroid.x,it->second->centroid.y);
	// draw small corner circle
	cvCircle(frame, pt3, 3, green, 1, 8, 0);
	
	CvContourPolygon *polygon = cvConvertChainCodesToPolygon(&(*it).second->contour);

	double perimeter = 	cvContourPolygonPerimeter(polygon);

    CvContourPolygon *sPolygon = cvSimplifyPolygon(polygon, 5. );
    CvContourPolygon *cPolygon = cvPolygonContourConvexHull(sPolygon);
  
	if (1)  
//    if (sPolygon->size()==4)
	{
		for (CvContourPolygon::const_iterator itp=sPolygon->begin(); itp!=sPolygon->end(); ++itp)
		{

			int x = itp->x;
			int y = itp->y;
			CvPoint pt1 = cvPoint(x,y);
			cvCircle(frame, pt1, 3, CV_RGB(255, 128, 50), 1, 8, 0);
			cvRenderContourPolygon(sPolygon, frame, CV_RGB(0, 0, 255));
		}
	}

    //cvRenderContourChainCode(&(*it).second->contour, frame);
    //cvRenderContourPolygon(sPolygon, frame, CV_RGB(0, 0, 255));
    //cvRenderContourPolygon(cPolygon, frame, CV_RGB(0, 255, 0));
	}

	cvNamedWindow("red_object_tracking", CV_WINDOW_NORMAL) ;
    cvShowImage("red_object_tracking", frame);

    /*std::stringstream filename;
    filename << "redobject_" << std::setw(5) << std::setfill('0') << frameNumber << ".png";
    cvSaveImage(filename.str().c_str(), frame);*/

    cvReleaseImage(&labelImg);
    cvReleaseImage(&segmentated);

    char k = cvWaitKey(0)&0xff;
    switch (k)
    {
      case 27:
      case 'q':
      case 'Q':
        quit = true;
        break;
      case 's':
      case 'S':
        for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
        {
          std::stringstream filename;
          filename << "redobject_blob_" << std::setw(5) << std::setfill('0') << blobNumber << ".png";
          cvSaveImageBlob(filename.str().c_str(), img, it->second);
          blobNumber++;

          std::cout << filename.str() << " saved!" << std::endl;
        }
        break;
    }
    
    for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
	{
  		std::cout << "Blob #" << it->second->label << ": Area=" << it->second->area << ", Centroid=(" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << std::endl;
	}

    cvReleaseBlobs(blobs);

    //frameNumber++;
  }

  cvReleaseStructuringElement(&morphKernel);
  cvReleaseImage(&frame);

  cvDestroyWindow("red_object_tracking");

  return 0;
}
コード例 #27
0
  unsigned int cvLabel (IplImage const *img, IplImage *imgOut, CvBlobs &blobs)
  {
    CV_FUNCNAME("cvLabel");
    __CV_BEGIN__;
    {
      CV_ASSERT(img&&(img->depth==IPL_DEPTH_8U)&&(img->nChannels==1));
      CV_ASSERT(imgOut&&(imgOut->depth==IPL_DEPTH_LABEL)&&(imgOut->nChannels==1));

      unsigned int numPixels=0;

      cvSetZero(imgOut);

      CvLabel label=0;
      cvReleaseBlobs(blobs);

      unsigned int stepIn = img->widthStep / (img->depth / 8);
      unsigned int stepOut = imgOut->widthStep / (imgOut->depth / 8);
      unsigned int imgIn_width = img->width;
      unsigned int imgIn_height = img->height;
      unsigned int imgIn_offset = 0;
      unsigned int imgOut_width = imgOut->width;
      unsigned int imgOut_height = imgOut->height;
      unsigned int imgOut_offset = 0;
      if(img->roi)
      {
	imgIn_width = img->roi->width;
	imgIn_height = img->roi->height;
	imgIn_offset = img->roi->xOffset + (img->roi->yOffset * stepIn);
      }
      if(imgOut->roi)
      {
	imgOut_width = imgOut->roi->width;
	imgOut_height = imgOut->roi->height;
	imgOut_offset = imgOut->roi->xOffset + (imgOut->roi->yOffset * stepOut);
      }

      unsigned char *imgDataIn = (unsigned char *)img->imageData + imgIn_offset;
      CvLabel *imgDataOut = (CvLabel *)imgOut->imageData + imgOut_offset;

#define imageIn(X, Y) imgDataIn[(X) + (Y)*stepIn]
#define imageOut(X, Y) imgDataOut[(X) + (Y)*stepOut]

      CvLabel lastLabel = 0;
      CvBlob *lastBlob = NULL;

      for (unsigned int y=0; y<imgIn_height; y++)
      {
	for (unsigned int x=0; x<imgIn_width; x++)
	{
	  if (imageIn(x, y))
	  {
	    bool labeled = imageOut(x, y);

	    if ((!imageOut(x, y))&&((y==0)||(!imageIn(x, y-1))))
	    {
	      labeled = true;

	      // Label contour.
	      label++;
	      CV_ASSERT(label!=CV_BLOB_MAX_LABEL);

	      imageOut(x, y) = label;
	      numPixels++;

	      if (y>0)
		imageOut(x, y-1) = CV_BLOB_MAX_LABEL;

	      CvBlob *blob = new CvBlob;
	      blob->label = label;
	      blob->area = 1;
	      blob->minx = x; blob->maxx = x;
	      blob->miny = y; blob->maxy = y;
	      blob->m10=x; blob->m01=y;
	      blob->m11=x*y;
	      blob->m20=x*x; blob->m02=y*y;
	      blob->internalContours.clear();
	      blobs.insert(CvLabelBlob(label,blob));

              lastLabel = label;
	      lastBlob = blob;

	      blob->contour.startingPoint = cvPoint(x, y);

	      unsigned char direction=1;
	      unsigned int xx = x;
	      unsigned int yy = y;


	      bool contourEnd = false;

	      do
	      {
		for (unsigned int numAttempts=0; numAttempts<3; numAttempts++)
		{
		  bool found = false;

		  for (unsigned char i=0; i<3; i++)
		  {
		    int nx = xx+movesE[direction][i][0];
		    int ny = yy+movesE[direction][i][1];
		    if ((nx<imgIn_width)&&(nx>=0)&&(ny<imgIn_height)&&(ny>=0))
		    {
		      if (imageIn(nx, ny))
		      {
			found = true;

			blob->contour.chainCode.push_back(movesE[direction][i][3]);

			xx=nx;
			yy=ny;

			direction=movesE[direction][i][2];
			break;
		      }
		      else
		      {
			imageOut(nx, ny) = CV_BLOB_MAX_LABEL;
		      }
		    }
		  }

		  if (!found)
		    direction=(direction+1)%4;
		  else
		  {
		    if (imageOut(xx, yy) != label)
		    {
		      imageOut(xx, yy) = label;
		      numPixels++;

		      if (xx<blob->minx) blob->minx = xx;
		      else if (xx>blob->maxx) blob->maxx = xx;
		      if (yy<blob->miny) blob->miny = yy;
		      else if (yy>blob->maxy) blob->maxy = yy;

		      blob->area++;
		      blob->m10+=xx; blob->m01+=yy;
		      blob->m11+=xx*yy;
		      blob->m20+=xx*xx; blob->m02+=yy*yy;
		    }

		    break;
		  }
		  
		  if (contourEnd = ((xx==x) && (yy==y) && (direction==1)))
		    break;
		}
	      }
	      while (!contourEnd);

	    }

	    if ((y+1<imgIn_height)&&(!imageIn(x, y+1))&&(!imageOut(x, y+1)))
	    {
	      labeled = true;

	      // Label internal contour
	      CvLabel l;
	      CvBlob *blob = NULL;

	      if (!imageOut(x, y))
	      {
		l = imageOut(x-1, y);

		imageOut(x, y) = l;
		numPixels++;

                if (l==lastLabel)
                  blob = lastBlob;
                else
                {
                  blob = blobs.find(l)->second;
                  lastLabel = l;
                  lastBlob = blob;
                }
		blob->area++;
		blob->m10+=x; blob->m01+=y;
		blob->m11+=x*y;
		blob->m20+=x*x; blob->m02+=y*y;
	      }
	      else
	      {
		l = imageOut(x, y);

                if (l==lastLabel)
                  blob = lastBlob;
                else
                {
                  blob = blobs.find(l)->second;
                  lastLabel = l;
                  lastBlob = blob;
                }
	      }

	     imageOut(x, y+1) = CV_BLOB_MAX_LABEL;

	      CvContourChainCode *contour = new CvContourChainCode;
	      contour->startingPoint = cvPoint(x, y);

	      unsigned char direction = 3;
	      unsigned int xx = x;
	      unsigned int yy = y;

	      do
	      {
		for (unsigned int numAttempts=0; numAttempts<3; numAttempts++)
		{
		  bool found = false;

		  for (unsigned char i=0; i<3; i++)
		  {
		    int nx = xx+movesI[direction][i][0];
		    int ny = yy+movesI[direction][i][1];
		    if (imageIn(nx, ny))
		    {
		      found = true;

		      contour->chainCode.push_back(movesI[direction][i][3]);

		      xx=nx;
		      yy=ny;

		      direction=movesI[direction][i][2];
		      break;
		    }
		    else
		    {
		      imageOut(nx, ny) = CV_BLOB_MAX_LABEL;
		    }
		  }

		  if (!found)
		    direction=(direction+1)%4;
		  else
		  {
		    if (!imageOut(xx, yy))
		    {
		      imageOut(xx, yy) = l;
		      numPixels++;

		      blob->area++;
		      blob->m10+=xx; blob->m01+=yy;
		      blob->m11+=xx*yy;
		      blob->m20+=xx*xx; blob->m02+=yy*yy;
		    }

		    break;
		  }
		}
	      }
	      while (!(xx==x && yy==y));

	      blob->internalContours.push_back(contour);
	    }
	    if (!labeled)
	    {
	      CvLabel l = imageOut(x-1, y);

	      imageOut(x, y) = l;
	      numPixels++;

	      CvBlob *blob = NULL;
              if (l==lastLabel)
                blob = lastBlob;
              else
              {
                blob = blobs.find(l)->second;
                lastLabel = l;
                lastBlob = blob;
              }
	      blob->area++;
	      blob->m10+=x; blob->m01+=y;
	      blob->m11+=x*y;
	      blob->m20+=x*x; blob->m02+=y*y;
	    }
	  }
	}
      }

      for (CvBlobs::iterator it=blobs.begin(); it!=blobs.end(); ++it)
      {
	cvCentroid((*it).second);

        (*it).second->u11 = (*it).second->m11 - ((*it).second->m10*(*it).second->m01)/(*it).second->m00;
        (*it).second->u20 = (*it).second->m20 - ((*it).second->m10*(*it).second->m10)/(*it).second->m00;
        (*it).second->u02 = (*it).second->m02 - ((*it).second->m01*(*it).second->m01)/(*it).second->m00;

        double m00_2 = (*it).second->m00 * (*it).second->m00;

        (*it).second->n11 = (*it).second->u11 / m00_2;
        (*it).second->n20 = (*it).second->u20 / m00_2;
        (*it).second->n02 = (*it).second->u02 / m00_2;

        (*it).second->p1 = (*it).second->n20 + (*it).second->n02;

        double nn = (*it).second->n20 - (*it).second->n02;
        (*it).second->p2 = nn*nn + 4.*((*it).second->n11*(*it).second->n11);
      }

      return numPixels;

    }
    __CV_END__;
  }
コード例 #28
0
ファイル: cvtrack.cpp プロジェクト: Pablohn26/gaudii
void cvUpdateTracks(CvBlobs &b, CvTracks &t, const double thDistance, const unsigned int thInactive)
{
  CV_FUNCNAME("cvUpdateTracks");
  __BEGIN__;

  unsigned int nBlobs = b.size();
  unsigned int nTracks = t.size();

  // Proximity matrix:
  // Last row/column is for ID/label.
  // Last-1 "/" is for accumulation.
  CvID *close = new unsigned int[(nBlobs+2)*(nTracks+2)]; // XXX Must be same type than CvLabel.

  // Access to matrix
#define C(blob, track) close[((blob) + (track)*(nBlobs+2))]
  // Access to accumulators
#define AB(label) C((label), (nTracks))
#define AT(id) C((nBlobs), (id))
  // Access to identifications
#define IB(label) C((label), (nTracks)+1)
#define IT(id) C((nBlobs)+1, (id))
  // Access to registers
#define B(label) b[IB(label)]
#define T(id) t[IT(id)]

  try
  {
    // Inicialization:
    unsigned int i=0;
    for (CvBlobs::const_iterator it = b.begin(); it!=b.end(); ++it, i++)
    {
      AB(i) = 0;
      IB(i) = it->second->label;
    }

    CvID maxTrackID = 0;

    unsigned int j=0;
    for (CvTracks::const_iterator jt = t.begin(); jt!=t.end(); ++jt, j++)
    {
      AT(j) = 0;
      IT(j) = jt->second->id;
      if (jt->second->id > maxTrackID)
	maxTrackID = jt->second->id;
    }

    // Proximity matrix calculation:
    for (i=0; i<nBlobs; i++)
      for (j=0; j<nTracks; j++)
      {
	if (C(i, j) = (distantBlobTrack(B(i), T(j)) < thDistance))
	{
	  AB(i)++;
	  AT(j)++;
	}
      }

    // Run over tracks:
    for (j=0; j<nTracks; j++)
    {
      //unsigned int c = C(nBlobs, j);
      unsigned int c = AT(j);

      if (c == 1)
      {
	// Match track-blob
	
	// Search for the blob
	for (i=0; (i<nBlobs)&&(!C(i, j)); i++) {}

	// Update track
	CvBlob *blob = B(i);
	CvTrack *track = T(j);
	track->label = blob->label;
	track->centroid = blob->centroid;
	track->minx = blob->minx;
	track->miny = blob->miny;
	track->maxx = blob->maxx;
	track->maxy = blob->maxy;
	track->inactive = 0;
      }
      else if (c > 1)
      {
	// Track divides
	CvTrack *track = T(j);
	track->inactive++;
	track->label=0;

	// Create new tracks
	for (i=0; i<nBlobs; i++)
	{
	  if (C(i, j))
	  {
	    maxTrackID++;
	    CvBlob *blob = B(i);
	    CvTrack *track = new CvTrack;
	    track->id = maxTrackID;
	    track->label = blob->label;
	    track->minx = blob->minx;
	    track->miny = blob->miny;
	    track->maxx = blob->maxx;
	    track->maxy = blob->maxy;
	    track->centroid = blob->centroid;
	    track->inactive = 0;
	    t.insert(CvIDTrack(maxTrackID, track));
	  }
	}
      }
      else // if (c == 0)
      {
	// Inactive track
	CvTrack *track = T(j);
	track->inactive++;
	track->label = 0;
      }
    }

    // Run over blobs:
    for (i=0; i<nBlobs; i++)
    {
      //unsigned int c = C(i, nTracks);
      unsigned int c = AB(i);

      if (c == 0)
      {
	// New track
	maxTrackID++;
	CvBlob *blob = B(i);
	CvTrack *track = new CvTrack;
	track->id = maxTrackID;
	track->label = blob->label;
	track->minx = blob->minx;
	track->miny = blob->miny;
	track->maxx = blob->maxx;
	track->maxy = blob->maxy;
	track->centroid = blob->centroid;
	track->inactive = 0;
	t.insert(CvIDTrack(maxTrackID, track));
      }
      else if (c > 1)
      {
	// Tracks joins
	
	// New track
	maxTrackID++;
	CvBlob *blob = B(i);
	CvTrack *track = new CvTrack;
	track->id = maxTrackID;
	track->label = blob->label;
	track->minx = blob->minx;
	track->miny = blob->miny;
	track->maxx = blob->maxx;
	track->maxy = blob->maxy;
	track->centroid = blob->centroid;
	track->inactive = 0;
	t.insert(CvIDTrack(maxTrackID, track));
	
	// Others tracks inactives
	for (j=0; j<nTracks; j++)
	{
	  T(j)->inactive++;
	  T(j)->label = 0;
	}
      }
    }

    for (CvTracks::iterator jt=t.begin(); jt!=t.end();)
      if (jt->second->inactive>=thInactive)
      {
	delete jt->second;
	t.erase(jt++);
      }
      else
	++jt;
  }
  catch (...)
  {
    delete[] close;
    throw; // TODO: OpenCV style.
  }

  delete[] close;

  __END__;
}
コード例 #29
0
int main()
{
  CvTracks tracks;

  cvNamedWindow("red_object_tracking", CV_WINDOW_AUTOSIZE);

  CvCapture *capture = cvCaptureFromCAM(0);
  cvGrabFrame(capture);
  IplImage *img = cvRetrieveFrame(capture);

  CvSize imgSize = cvGetSize(img);

  IplImage *frame = cvCreateImage(imgSize, img->depth, img->nChannels);

  IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL);

  //unsigned int frameNumber = 0;
  unsigned int blobNumber = 0;

  bool quit = false;
  while (!quit&&cvGrabFrame(capture))
  {
    IplImage *img = cvRetrieveFrame(capture);

    cvConvertScale(img, frame, 1, 0);

    IplImage *segmentated = cvCreateImage(imgSize, 8, 1);
    
    // Detecting red pixels:
    // (This is very slow, use direct access better...)
    for (unsigned int j=0; j<imgSize.height; j++)
      for (unsigned int i=0; i<imgSize.width; i++)
      {
	CvScalar c = cvGet2D(frame, j, i);

	double b = ((double)c.val[0])/255.;
	double g = ((double)c.val[1])/255.;
	double r = ((double)c.val[2])/255.;
	unsigned char f = 255*((r>0.2+g)&&(r>0.2+b));

	cvSet2D(segmentated, j, i, CV_RGB(f, f, f));
      }

    cvMorphologyEx(segmentated, segmentated, NULL, morphKernel, CV_MOP_OPEN, 1);

    //cvShowImage("segmentated", segmentated);

    IplImage *labelImg = cvCreateImage(cvGetSize(frame), IPL_DEPTH_LABEL, 1);

    CvBlobs blobs;
    unsigned int result = cvLabel(segmentated, labelImg, blobs);
    cvFilterByArea(blobs, 500, 1000000);
    cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX);
    cvUpdateTracks(blobs, tracks, 200., 5);
    cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);

    cvShowImage("red_object_tracking", frame);

    /*std::stringstream filename;
    filename << "redobject_" << std::setw(5) << std::setfill('0') << frameNumber << ".png";
    cvSaveImage(filename.str().c_str(), frame);*/

    cvReleaseImage(&labelImg);
    cvReleaseImage(&segmentated);

    char k = cvWaitKey(10)&0xff;
    switch (k)
    {
      case 27:
      case 'q':
      case 'Q':
        quit = true;
        break;
      case 's':
      case 'S':
        for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
        {
          std::stringstream filename;
          filename << "redobject_blob_" << std::setw(5) << std::setfill('0') << blobNumber << ".png";
          cvSaveImageBlob(filename.str().c_str(), img, it->second);
          blobNumber++;

          std::cout << filename.str() << " saved!" << std::endl;
        }
        break;
    }

    cvReleaseBlobs(blobs);

    //frameNumber++;
  }

  cvReleaseStructuringElement(&morphKernel);
  cvReleaseImage(&frame);

  cvDestroyWindow("red_object_tracking");

  return 0;
}
コード例 #30
0
ファイル: main.cpp プロジェクト: shazzadhub/G-MAC
int main(int argc, char** argv) {
    Mat mainframes;     /** HOLDS THE MAIN CAPTURE */
    Mat proxy;          /** SAVES A COPY OF MAIN CAPTURE */
    Mat skinContainer;  /** Will HOLD THE NOISELESS SKIN THRESHOLD */
    Mat skinContour;    /** HOLDS THE CONTOURS DRAWN-SKIN THRESHOLD */
    Mat skin_morphClose;

    
    Mat se_mop = getStructuringElement(MORPH_RECT, Size(5, 5), Point(1, 1));
    Mat se_mop2_1 = getStructuringElement(MORPH_RECT, Size(5, 5), Point(3, 3));
    IplConvKernel* se_mop2 = cvCreateStructuringElementEx(5, 5, 3, 3, CV_SHAPE_RECT, NULL);

    
    VideoCapture capture(0);
    if(capture.isOpened() == false){
        cout << "camera not opened" << endl;
        exit(0);
    }
    cout << "camera width = " << capture.get(CV_CAP_PROP_FRAME_WIDTH) << "  ,  "  << "camera hight = " << capture.get(CV_CAP_PROP_FRAME_HEIGHT) <<endl; 
    capture.set(CV_CAP_PROP_FRAME_WIDTH, w);      // // setting the camera width to 640 pixel
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, h);     // //
    
    
    SkinColor my_skin;
    Dis_handler del;
    Red redtrack;



    CvFont *myFont = new CvFont;
    CvFont *hudFont = new CvFont;
    cvInitFont(myFont, CV_FONT_HERSHEY_COMPLEX, 0.5, 0.5, 1, 2);
    cvInitFont(hudFont, CV_FONT_HERSHEY_COMPLEX, 0.8, 0.8, 1, 2);



    CvBlobs blobs;
    CvTracks tracks;
    IplImage *labelImg = cvCreateImage(cvSize(w,h), IPL_DEPTH_LABEL, 1);



    //namedWindow("Original feed", CV_WINDOW_OPENGL);
    //namedWindow("Skin Threshold", CV_WINDOW_OPENGL);
    namedWindow("Skin-Conture Threshold", CV_WINDOW_OPENGL);
    namedWindow("Rendering with Blobls", CV_GUI_EXPANDED);
    cvLoadWindowParameters("Rendering with Blobls");
    moveWindow("Rendering with Blobls", 1600, 1000);
    //cvResizeWindow("Rendering with Blobls", 1024, 786);


    createTrackbar("Ymin", "Original feed", &my_skin.Y_MIN, 256, 0);
   

    while(true || capture.isOpened()) {
// Aquiring the frames from the camera
        capture >> mainframes;
        mainframes.copyTo(proxy);                               // // Backing up the original frames into proxy // //
                        //moveWindow("Original feed", 0, 750);
                        //imshow("Original feed", proxy);        

        if(red == true){                                        // // Red-Object Detection Mode
            //blur(proxy, proxy, Size(10, 10));
            CvSize imgS = proxy.size();
            IplImage *seg = cvCreateImage(imgS, 8, 1);
            seg = redtrack.sampleRed(proxy);
            cvMorphologyEx(seg, seg, NULL, se_mop2, CV_MOP_OPEN, 1);
            destroyWindow("Skin_Threshold_Morphology_CLOSE");
            my_skin.destroyYCbCr();
            //cvShowImage("Skin Threshold", seg);
            unsigned int result = cvLabel(seg, labelImg, blobs);
        }

// Preliminary works for minimyzing noise
        //pyrDown(proxy, proxy);
        //blur(proxy, proxy, Size(5, 5));                       // // Blurring with gaussian kernel // // (10, 10) for my camera
        //medianBlur(proxy, proxy, 3);

// Work for Detecting the Skin Color && Smoothing
        else{
            if(default_skin_values) my_skin.setDefault();
            skinContainer = my_skin.sampleSkin(proxy);
                //morphologyEx(skinContainer, skin_morphClose, MORPH_CLOSE, se_mop, Point(-1, -1), 1);
                            //imshow("Skin_Threshold_Morphology_CLOSE", skin_morphClose);
            medianBlur(skinContainer, skinContainer, 7);
            morphologyEx(skinContainer, skinContainer, MORPH_CLOSE, se_mop2_1, Point(-1, -1), 1);
            
            skinContour = my_skin.binContours(skinContainer);
                            moveWindow("Skin-Conture Threshold", 1600, 300);    // // Fixes the window x,y position
                            imshow("Skin-Conture Threshold", skinContour);
            
            IplImage skinContainer_mask = skinContainer;
            IplImage* skinContainer_ipl = &skinContainer_mask;
            
//            cvSmooth(skinContainer_ipl, skinContainer_ipl, CV_MEDIAN, 7, 7);
//            cvMorphologyEx(skinContainer_ipl, skinContainer_ipl, NULL, se_mop2, CV_MOP_CLOSE, 1);    // // C-Api 

                            //cvShowImage("Skin Threshold", skinContainer_ipl);                          // ------------>> Noiseless Skin-threshold 
                            
            unsigned int result = cvLabel(skinContainer_ipl, labelImg, blobs);
        }


 // Passing to the CvBlobs for creating and marking the blobs ..
        IplImage frame_mask = proxy;                // // Creating IplImage container for work with the CvBlobs library
        IplImage* blobFrame = &frame_mask;              // // because these libraries are not compatible with Mat
        cvFilterByArea(blobs, 800, 1000000);        // // Filtering the blobs frame to frame to reject the blobs not in the range as we consider as noise.
        cvRenderBlobs(labelImg, blobs, blobFrame, blobFrame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE|CV_BLOB_RENDER_COLOR);
        cvUpdateTracks(blobs, tracks, 200.0, 5);     // // This is to track the blobs's position frame to frame. Every single blob on each frame is being tracked.
        cvRenderTracks(tracks, blobFrame, blobFrame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX/*|CV_TRACK_RENDER_TO_STD*/);
        
        for(CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it){

          CvContourPolygon *polygon = cvConvertChainCodesToPolygon(&(*it).second->contour);

          CvContourPolygon *sPolygon = cvSimplifyPolygon(polygon, 5.0);
          CvContourPolygon *cPolygon = cvPolygonContourConvexHull(sPolygon);

          cvRenderContourChainCode(&(*it).second->contour, blobFrame, CV_RGB(255, 0 ,0));
          cvRenderContourPolygon(sPolygon, blobFrame, CV_RGB(0, 0, 255));
          cvRenderContourPolygon(cPolygon, blobFrame, CV_RGB(0, 255, 0));

          delete cPolygon;
          delete sPolygon;
          delete polygon;

          // Render internal contours:
          //for(CvContoursChainCode::const_iterator jt=(*it).second->internalContours.begin(); jt!=(*it).second->internalContours.end(); ++jt)
            //cvRenderContourChainCode((*jt), blobFrame);
          
          renderPoint = getshPoint();
          renderPoint.x - 2.0;
          renderPoint.y - 2.0;
          //cvCircle(blobFrame, renderPoint, 3, CV_RGB(0.0, 255.0, 255.0), 1);
        }
        
                            cvCircle(blobFrame, renderPoint, 3, CV_RGB(0.0, 255.0, 255.0), 4, CV_AA, 0);
                            cvShowImage("Rendering with Blobls",blobFrame);





// **********************************************************************   MOUSE  ***************************************************************************** //
/** Working with CvBlob contures to drag out each blob moments in an image frame. Then we need to calculate the (x1, y1) posision of that
   particular blob-conture. Point p(x1, y1) actually means the pixel position on the 480 / 320 camera frame. Then We need to map the p with
   the screen Resolution (in our case it is 1920 / 1080) we are using on our OP. This p(x, y) is then passed to the Display Handler class for working it out .**/
        if(mouse){
            if(blobs.empty()){                       // // if the hands gets out of the camera frame
                //cout << "No blobs .." <<endl;
                frameout = true;                // // turning the FRAMEOUT flag to true to indicate hand is out of frame
                t_holder = 0;                   // // IF hand is out the time clock will reset to zero
                currentGest.clear();            // // clears the gesture buffer
            }
                       
            
            for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it){
        // calculating the conture pixel into (640 x 480) camera frame
                double moment10 = it->second->m10;
                double moment01 = it->second->m01;
                double area = it->second->area;

                int x1 = (int)moment10 / area;
                int y1 = (int)moment01 / area;
                
                
//.........................................................GESTURE RELATED.................................................................//                
                
                
                if(motiongesture){
                        //cout << "Motion Gesture Activated" <<endl;                    
                    time_t t_current = time(NULL);              // // get the current sec from 1/1/1970
                    if(t_holder == 0){
                        t_holder = (int)t_current;
                    }
                    
                    int currentsec = (int)t_current - t_holder;         // // holds time interval sinces hand is on frame
                    
                    if(currentsec <= 4){        // //Time window to perform gesture is 4 sceonds
                        //Point p(x1, y1);
                        //cout << p;
                        currentGest.push_back(Point (x1, y1));
                    }
                    //cout << endl;                    
                    //cout << currentsec << endl;                   
                    
                    else if(currentsec <= 8){
                         Mat currgest = Mat::zeros(640, 480, CV_8UC3);
                         namedWindow("current Gesture", CV_WINDOW_AUTOSIZE);
                         
                         for(int g = 0; g < currentGest.size(); g++){
                             if(g != currentGest.size() - 1)
                                line(currgest, currentGest.at(g), currentGest.at(g + 1), CV_RGB(255.0, 255.0, 255.0), CV_AA);
                             
                             cout << currentGest.at(g) << endl;
                         }
                         imshow("current Gesture", currgest);
                    }
                    
//                    else if(currentsec <= 12){
//                        destroyWindow("current Gesture");
//                        
//                        
//                    }
                    
                    
                }
                else{
                    t_holder = 0;
                    currentGest.clear();
                    destroyWindow("current Gesture");
                }
                
                
 //.........................................................GESTURE RELATED.................................................................//                
                
                
                
                

        // Mapping to our resolution
                if(x1 > 0 && y1 > 0){
                    int x = (int)(x1 * 1920 / w);   // Mapping the correct width with 1920 for (1920 x 1080) res.
                    int y = (int)(y1 * 1080 / h);   // Mapping with 1080 hight

                    holdX = x;
                    holdY = y;
                    stringstream mx;
                    mx << "Position : " << "[" << x << " , " << y << "]";
                    cvPutText(blobFrame, mx.str().c_str(), cvPoint(0, 20), hudFont, CV_RGB(255.0, 0.0, 0.0));     // // Printing the coord on frame image
                    //cvRectangle(blobFrame, cvPoint(100, 100), cvPoint(0, 0), cvScalar(0.0, 230.0, 230.0, 0.0), CV_FILLED);
                        
//                        skinContour = my_skin.contours(blobFrame);
//                        IplImage skCon = skinContour;
//                        IplImage* skinCon = &skCon;                        
                        cvShowImage("Rendering with Blobls",blobFrame);                    
                    //Point avg = del.doAvarage(x, y, 3);
                    //cout << avg << endl;
                    if(interactive){
                        if(fpflag){
                            movX = 1920 / 2;        // // for first occurance fixing the mouse in the middle
                            movY = 1080 / 2;
                            oldX = x;
                            oldY = y;
                            fpflag = false;
                            frameout = false;
                        }
                        else{
                            if(frameout){       // // if hand gets out of the frame
                                oldX = x;
                                oldY = y;
                                reinsertcount++;
                                cout << "Re-inserted .." << reinsertcount << endl;
                                frameout = false;                                                                
                            }
                            else{
                                int dx = x - oldX;      // // calculating difference between last and current position
                                int dy = y - oldY;

                                movX += dx;         // // updating the mouse from freezed position
                                movY += dy;
                                //cout << movX << " , " << movY << endl;
                                if((movX <= 0) || (movY <= 0)){
                                    movX = 0;
                                    movY = 0;
                                }

                                oldX = x; oldY = y;
                            }
                        }
                    }
                    else{       // // Normal (old) mouse nevigations
                        movX = x;
                        movY = y;
                    }

                    int finger = blobs.size();

                        stringstream blobnumPrint;      // // For printing the blob count on statusbar
                        blobnumPrint << "current blob count = " << finger;                        
                        cvDisplayStatusBar("Rendering with Blobls", blobnumPrint.str().c_str(), 1000);
                        if(motiongesture)
                            cvDisplayStatusBar("Rendering with Blobls", gesav.c_str(), 100);
                        if(clk_double)    
                            cvDisplayStatusBar("Rendering with Blobls", doubleclk.c_str(), 3000);                            
                                
                        
                                
                        
                                switch(finger){
                                    case 1:
                                        if(drag){
                                            del.mouseSingleClick(Button1);
                                            del.mouseMove(movX, movY);
                                        }
                                        else if(click){
                                            del.mouseClickandRelease(Button1);
                                        }
                                        else if(clk_double){
                                            del.mouseClickandRelease(Button1);
                                            usleep(10);
                                            del.mouseClickandRelease(Button1);
                                        }
                                        else del.mouseMove(movX, movY);
                                    break;

                                    case 2:
                                        if(application){
                                            cout << " ### Opening Pencil ###" << endl;
                                            system("pencil");
                                            application = !application;
                                        }
                                    break;

                                }
//                                if(finger == 1){
//                                    del.mouseMove(movX, movY);
//                                }
//
//                                else if(finger == 1 && drag){
//                                    del.mouseSingleClick(Button1);
//                                    del.mouseMove(movX, movY);
//                                }

//                                else if(finger == 1 && click){
//                                    del.mouseClickandRelease(Button1);
//                                }
//                                else if(finger == 2 && application){
//                                    cout << " ### Opening Pencil ###" << endl;
//                                    system("pencil");
//                                    application = !application;
//                                }

//                                switch(finger){
//                                    //case 1: // Mouse Nevigate //
//                                    //del.mouseMove(x, y);
//                                    //break;
//
//                                    case 1: // Drag
//                                    del.mouseSingleClick(Button1);
//                                    //usleep(50);
//                                    del.mouseMove(movX, movY);
//                                    break;
//
//                                    case 4: // one single Click
//                                    del.mouseClickandRelease(Button1);
//                                    del.mouseSingleClick(Button1);
//                                    usleep(100);
//                                    del.mouseSingleRelease(Button1);
//                                    break;
//
//                                    case 2: // Double Click  Left//
//                                    del.mouseClickandRelease(Button1);
//                                    del.mouseClickandRelease(Button1);
//                                    break;
//
//                                    if(clk_double){
//                                        case 3: // Double Click  Left//
//                                        del.mouseSingleClick(Button1);
//                                        del.mouseSingleRelease(Button1);
//                                        usleep(100);
//                                        del.mouseSingleClick(Button1);
//                                        del.mouseSingleRelease(Button1);
//                                        break;
//                                    }
//
//                                            // Left Button Click = Button1
//                                            // Right Button Click = Button3
//                                            // Middle Mouse Button = Button2
//                                }
                }
                //del.mouseTo(x, y);
                //cout << "blob has" <<endl;
            } // Blob Detection FOR braket

            //cout << holdX << " , " << holdY << endl;
        } // mouse switch braket

        //if(waitKey(1) == 27) break;
        int key = waitKey(1);
        keyboard(key);
        cvReleaseBlobs(blobs);
        cvReleaseStructuringElement(&se_mop2);
    }    
    return EXIT_SUCCESS;
}