int main( int argc, char** argv )
{
	int frameNum = 0;
	TrackerInfo tracker;
	DescInfo hogInfo;
	DescInfo hofInfo;
	DescInfo mbhInfo;

	char* video = argv[1];
	arg_parse(argc, argv);
	Video capture(video);

//	std::cerr << "start_frame: " << start_frame << " end_frame: " << end_frame << " track_length: " << track_length << std::endl;
//	std::cerr << "min_distance: " << min_distance << " patch_size: " << patch_size << " nxy_cell: " << nxy_cell << " nt_cell: " << nt_cell << std::endl;

	InitTrackerInfo(&tracker, track_length, init_gap);
	InitDescInfo(&hogInfo, 8, 0, 1, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&hofInfo, 9, 1, 1, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&mbhInfo, 8, 0, 1, patch_size, nxy_cell, nt_cell);

        if( show_track == 1 ){
		cvNamedWindow( "DenseTrack", 0 );
                cvNamedWindow("Original", 0);
        }


	std::vector<std::list<Track> > xyScaleTracks;
	int init_counter = 0; // indicate when to detect new feature points
	while( true ) {
		IplImageWrapper frame = 0;
		int i, j, c;

		// get a new frame
		frame = capture.getFrame();
		frameNum = capture.getFrameIndex();
		if( !frame ) {
			printf("break");
			break;
		}
		if( frameNum >= start_frame && frameNum <= end_frame ) {
		if( !image ) {
			// initailize all the buffers
			image = IplImageWrapper( cvGetSize(frame), 8, 3 );
			image->origin = frame->origin;
			prev_image= IplImageWrapper( cvGetSize(frame), 8, 3 );
			prev_image->origin = frame->origin;
			grey = IplImageWrapper( cvGetSize(frame), 8, 1 );
			grey_pyramid = IplImagePyramid( cvGetSize(frame), 8, 1, scale_stride );
			prev_grey = IplImageWrapper( cvGetSize(frame), 8, 1 );
			prev_grey_pyramid = IplImagePyramid( cvGetSize(frame), 8, 1, scale_stride );
			eig_pyramid = IplImagePyramid( cvGetSize(frame), 32, 1, scale_stride );

			cvCopy( frame, image, 0 );
			cvCvtColor( image, grey, CV_BGR2GRAY );
			grey_pyramid.rebuild( grey );

			// how many scale we can have
			scale_num = std::min<std::size_t>(scale_num, grey_pyramid.numOfLevels());
			fscales = (float*)cvAlloc(scale_num*sizeof(float));
			xyScaleTracks.resize(scale_num);

			for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) {
				std::list<Track>& tracks = xyScaleTracks[ixyScale];
				fscales[ixyScale] = pow(scale_stride, ixyScale);

				// find good features at each scale separately
				IplImage *grey_temp = 0, *eig_temp = 0;
				std::size_t temp_level = (std::size_t)ixyScale;
				grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level));
				eig_temp = cvCloneImage(eig_pyramid.getImage(temp_level));
				std::vector<CvPoint2D32f> points(0);
				cvDenseSample(grey_temp, eig_temp, points, quality, min_distance);

				// save the feature points
				for( i = 0; i < points.size(); i++ ) {
					Track track(tracker.trackLength);
					PointDesc point(hogInfo, hofInfo, mbhInfo, points[i]);
					track.addPointDesc(point);
					tracks.push_back(track);
				}

				cvReleaseImage( &grey_temp );
				cvReleaseImage( &eig_temp );
			}
		}

		// build the image pyramid for the current frame
		cvCopy( frame, image, 0 );
		cvCvtColor( image, grey, CV_BGR2GRAY );
		grey_pyramid.rebuild(grey);

		if( frameNum > 0 ) {
		init_counter++;
		for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) {
			// track feature points in each scale separately
			std::vector<CvPoint2D32f> points_in(0);
			std::list<Track>& tracks = xyScaleTracks[ixyScale];
			for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ++iTrack) {
				CvPoint2D32f point = iTrack->pointDescs.back().point;
				points_in.push_back(point); // collect all the feature points
			}
			int count = points_in.size();
			IplImage *prev_grey_temp = 0, *grey_temp = 0;
			std::size_t temp_level = ixyScale;
			prev_grey_temp = cvCloneImage(prev_grey_pyramid.getImage(temp_level));
			grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level));

			cv::Mat prev_grey_mat = cv::cvarrToMat(prev_grey_temp);
			cv::Mat grey_mat = cv::cvarrToMat(grey_temp);

			std::vector<int> status(count);
			std::vector<CvPoint2D32f> points_out(count);

			// compute the optical flow
			IplImage* flow = cvCreateImage(cvGetSize(grey_temp), IPL_DEPTH_32F, 2);
			cv::Mat flow_mat = cv::cvarrToMat(flow);
			cv::calcOpticalFlowFarneback( prev_grey_mat, grey_mat, flow_mat,
							sqrt(2)/2.0, 5, 10, 2, 7, 1.5, cv::OPTFLOW_FARNEBACK_GAUSSIAN );
			// track feature points by median filtering
			OpticalFlowTracker(flow, points_in, points_out, status);

			int width = grey_temp->width;
			int height = grey_temp->height;
			// compute the integral histograms
			DescMat* hogMat = InitDescMat(height, width, hogInfo.nBins);
			HogComp(prev_grey_temp, hogMat, hogInfo);

			DescMat* hofMat = InitDescMat(height, width, hofInfo.nBins);
			HofComp(flow, hofMat, hofInfo);

			DescMat* mbhMatX = InitDescMat(height, width, mbhInfo.nBins);
			DescMat* mbhMatY = InitDescMat(height, width, mbhInfo.nBins);
			MbhComp(flow, mbhMatX, mbhMatY, mbhInfo);

			i = 0;
			for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ++i) {
			if( status[i] == 1 ) { // if the feature point is successfully tracked
				PointDesc& pointDesc = iTrack->pointDescs.back();
				CvPoint2D32f prev_point = points_in[i];
				// get the descriptors for the feature point
				CvScalar rect = getRect(prev_point, cvSize(width, height), hogInfo);
				pointDesc.hog = getDesc(hogMat, rect, hogInfo);
				pointDesc.hof = getDesc(hofMat, rect, hofInfo);
				pointDesc.mbhX = getDesc(mbhMatX, rect, mbhInfo);
				pointDesc.mbhY = getDesc(mbhMatY, rect, mbhInfo);

				PointDesc point(hogInfo, hofInfo, mbhInfo, points_out[i]);
				iTrack->addPointDesc(point);

				// draw this track
				if( show_track == 1 ) {
					std::list<PointDesc>& descs = iTrack->pointDescs;
					std::list<PointDesc>::iterator iDesc = descs.begin();
					float length = descs.size();
					CvPoint2D32f point0 = iDesc->point;
					point0.x *= fscales[ixyScale]; // map the point to first scale
					point0.y *= fscales[ixyScale];

					float j = 0;
					for (iDesc++; iDesc != descs.end(); ++iDesc, ++j) {
						CvPoint2D32f point1 = iDesc->point;
						point1.x *= fscales[ixyScale];
						point1.y *= fscales[ixyScale];

						cvLine(image, cvPointFrom32f(point0), cvPointFrom32f(point1),
							   CV_RGB(0,cvFloor(255.0*(j+1.0)/length),0), 2, 8,0);
						point0 = point1;
					}
					cvCircle(image, cvPointFrom32f(point0), 2, CV_RGB(255,0,0), -1, 8,0);
				}
				++iTrack;
			}
			else // remove the track, if we lose feature point
				iTrack = tracks.erase(iTrack);
			}
			ReleDescMat(hogMat);
			ReleDescMat(hofMat);
			ReleDescMat(mbhMatX);
			ReleDescMat(mbhMatY);
			cvReleaseImage( &prev_grey_temp );
			cvReleaseImage( &grey_temp );
			cvReleaseImage( &flow );
		}

		for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) {
		std::list<Track>& tracks = xyScaleTracks[ixyScale]; // output the features for each scale
		for( std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ) {
			if( iTrack->pointDescs.size() >= tracker.trackLength+1 ) { // if the trajectory achieves the length we want
				std::vector<CvPoint2D32f> trajectory(tracker.trackLength+1);
				std::list<PointDesc>& descs = iTrack->pointDescs;
				std::list<PointDesc>::iterator iDesc = descs.begin();

				for (int count = 0; count <= tracker.trackLength; ++iDesc, ++count) {
					trajectory[count].x = iDesc->point.x*fscales[ixyScale];
					trajectory[count].y = iDesc->point.y*fscales[ixyScale];
				}
				float mean_x(0), mean_y(0), var_x(0), var_y(0), length(0);
				if( isValid(trajectory, mean_x, mean_y, var_x, var_y, length) == 1 ) {
					printf("%d\t", frameNum);
					printf("%f\t%f\t", mean_x, mean_y);
					printf("%f\t%f\t", var_x, var_y);
					printf("%f\t", length);
					printf("%f\t", fscales[ixyScale]);

					for (int count = 0; count < tracker.trackLength; ++count)
						printf("%f\t%f\t", trajectory[count].x,trajectory[count].y );

					iDesc = descs.begin();
					int t_stride = cvFloor(tracker.trackLength/hogInfo.ntCells);
					for( int n = 0; n < hogInfo.ntCells; n++ ) {
						std::vector<float> vec(hogInfo.dim);
						for( int t = 0; t < t_stride; t++, iDesc++ )
							for( int m = 0; m < hogInfo.dim; m++ )
								vec[m] += iDesc->hog[m];
						for( int m = 0; m < hogInfo.dim; m++ )
							printf("%f\t", vec[m]/float(t_stride));
					}

					iDesc = descs.begin();
					t_stride = cvFloor(tracker.trackLength/hofInfo.ntCells);
					for( int n = 0; n < hofInfo.ntCells; n++ ) {
						std::vector<float> vec(hofInfo.dim);
						for( int t = 0; t < t_stride; t++, iDesc++ )
							for( int m = 0; m < hofInfo.dim; m++ )
								vec[m] += iDesc->hof[m];
						for( int m = 0; m < hofInfo.dim; m++ )
							printf("%f\t", vec[m]/float(t_stride));
					}

					iDesc = descs.begin();
					t_stride = cvFloor(tracker.trackLength/mbhInfo.ntCells);
					for( int n = 0; n < mbhInfo.ntCells; n++ ) {
						std::vector<float> vec(mbhInfo.dim);
						for( int t = 0; t < t_stride; t++, iDesc++ )
							for( int m = 0; m < mbhInfo.dim; m++ )
								vec[m] += iDesc->mbhX[m];
						for( int m = 0; m < mbhInfo.dim; m++ )
							printf("%f\t", vec[m]/float(t_stride));
					}

					iDesc = descs.begin();
					t_stride = cvFloor(tracker.trackLength/mbhInfo.ntCells);
					for( int n = 0; n < mbhInfo.ntCells; n++ ) {
						std::vector<float> vec(mbhInfo.dim);
						for( int t = 0; t < t_stride; t++, iDesc++ )
							for( int m = 0; m < mbhInfo.dim; m++ )
								vec[m] += iDesc->mbhY[m];
						for( int m = 0; m < mbhInfo.dim; m++ )
							printf("%f\t", vec[m]/float(t_stride));
					}

					printf("\n");
				}
				iTrack = tracks.erase(iTrack);
			}
			else
				iTrack++;
		}
		}

		if( init_counter == tracker.initGap ) { // detect new feature points every initGap frames
		init_counter = 0;
		for (int ixyScale = 0; ixyScale < scale_num; ++ixyScale) {
			std::list<Track>& tracks = xyScaleTracks[ixyScale];
			std::vector<CvPoint2D32f> points_in(0);
			std::vector<CvPoint2D32f> points_out(0);
			for(std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); iTrack++, i++) {
				std::list<PointDesc>& descs = iTrack->pointDescs;
				CvPoint2D32f point = descs.back().point; // the last point in the track
				points_in.push_back(point);
			}

			IplImage *grey_temp = 0, *eig_temp = 0;
			std::size_t temp_level = (std::size_t)ixyScale;
			grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level));
			eig_temp = cvCloneImage(eig_pyramid.getImage(temp_level));

			cvDenseSample(grey_temp, eig_temp, points_in, points_out, quality, min_distance);
			// save the new feature points
			for( i = 0; i < points_out.size(); i++) {
				Track track(tracker.trackLength);
				PointDesc point(hogInfo, hofInfo, mbhInfo, points_out[i]);
				track.addPointDesc(point);
				tracks.push_back(track);
			}
			cvReleaseImage( &grey_temp );
			cvReleaseImage( &eig_temp );
		}
		}
		}

		cvCopy( frame, prev_image, 0 );
		cvCvtColor( prev_image, prev_grey, CV_BGR2GRAY );
		prev_grey_pyramid.rebuild(prev_grey);
		}

		if( show_track == 1 ) {
			cvShowImage( "DenseTrack", image);
                        cvShowImage("Original", frame);
			c = cvWaitKey(3);
			if((char)c == 27) break;
		}
		// get the next frame
		if (!capture.nextFrame())
			break;
	}

	if( show_track == 1 )
		cvDestroyWindow("DenseTrack");

	return 0;
}
int susan_corner_detect(char*filename)  
{  
	int cornerCount=max_corners;  
	CvPoint2D32f corners[max_corners];  
	double qualityLevel;  
	double minDistance;  
	IplImage *srcImage = 0, *grayImage = 0, *corners1 = 0, *corners2 = 0;  
	int i;  
	CvScalar color = CV_RGB(255,0,0);  
	//char* filename = argc == 2 ? argv[1] : (char*)"lena.jpg"; 
	//cvNamedWindow( "input", 1 ); // create HighGUI window with name "image"  

	//Load the image to be processed  
	srcImage = cvLoadImage(filename,1);  
	grayImage = cvCreateImage(cvGetSize(srcImage), IPL_DEPTH_8U, 1);  

	//copy the source image to copy image after converting the format  
	cvCvtColor(srcImage, grayImage, CV_BGR2GRAY);  

	//create empty images of same size as the copied images  
	corners1= cvCreateImage(cvGetSize(srcImage), IPL_DEPTH_32F, 1);  
	corners2= cvCreateImage(cvGetSize(srcImage),IPL_DEPTH_32F, 1);  

	cvGoodFeaturesToTrack (grayImage, corners1, corners2, corners, &cornerCount, 0.05, 5, 0);  
	printf("num corners found: %d/n", cornerCount);  

	// draw circles at each corner location in the gray image and print out a list the corners   
	if(cornerCount>0)   
	{  
		for (i=0; i<cornerCount;i++)       
		{  
			cvCircle(srcImage, cvPoint((int)(corners[i].x), (int)(corners[i].y)), 6, color, 1, CV_AA, 0);  
		}  
	}  

	//cvShowImage( "input", srcImage );  


	//图像缩放
	cvNamedWindow("scale_dst",1);
	CvSize dst_size;
	double scale=0.5;
	IplImage*scale_dst=0;
	dst_size.width = (int)(srcImage->width * scale);  
	dst_size.height = (int)(srcImage->height * scale);//确定新图的矩形框  
	scale_dst=cvCreateImage(dst_size,srcImage->depth,srcImage->nChannels);//创建图像头  
	cvResize(srcImage,scale_dst,CV_INTER_LINEAR);//使用双线性差值减小图像。 
	cvShowImage("scale_dst",scale_dst);
	cvSaveImage("samples//scale_dst.JPG",scale_dst);

	cvWaitKey(0);

	cvDestroyAllWindows();

	cvReleaseImage(&srcImage);  
	cvReleaseImage(&grayImage);  
	cvReleaseImage(&corners1);  
	cvReleaseImage(&corners2);  

	cvReleaseImage(&scale_dst);

	cvWaitKey(0);  
	return 0;  
}  
Example #3
0
int hue_circles_detector(const sensor_msgs::ImageConstPtr& 
			 InputImage, std::vector<Circle> &myCircles)
{
  IplImage *myImage = NULL;
  cv::Mat inputImage;
  cv::Mat hsvImage;
  cv::Mat hueImage;
  cv::Mat satImage;
  cv::Mat binaryImage;
  cv::Mat outputImage;
  sensor_msgs::CvBridge bridge;
  try
    {
      myImage = bridge.imgMsgToCv(InputImage, "bgr8");
    }
  catch (sensor_msgs::CvBridgeException& e)
    {
      ROS_ERROR("Could not convert from '%s' to 'bgr8'.", InputImage->encoding.c_str());
      return -1;
    }
  
  cvNamedWindow("Input to hue circles detector");
  cvNamedWindow("Circles");
  cvNamedWindow("Working image");
  cvStartWindowThread();
  
  cvShowImage("Input to hue circles detector", myImage);
IplImage* CirclesImage = cvCloneImage(myImage);  // just used to display our found circles
  // create memory storage that will contain all the dynamic data
  CvMemStorage* storage = 0;
  storage = cvCreateMemStorage(0);

   // Convert IplImage to cv::Mat
    inputImage = cv::Mat (myImage).clone ();


    // output = input
    outputImage = inputImage.clone ();

    // Convert Input image from BGR to HSV
    cv::cvtColor (inputImage, hsvImage, CV_BGR2HSV);

    // Zero Matrices
    hueImage = cv::Mat::zeros(hsvImage.rows, hsvImage.cols, CV_8U);
    satImage = cv::Mat::zeros(hsvImage.rows, hsvImage.cols, CV_8U);
    binaryImage = cv::Mat::zeros(hsvImage.rows, hsvImage.cols, CV_8U);

    // HSV Channel 0 -> hueImage & HSV Channel 1 -> satImage
    int from_to[] = { 0,0, 1,1};
    cv::Mat img_split[] = { hueImage, satImage};
    cv::mixChannels(&hsvImage, 3,img_split,2,from_to,2);


    // ****************************************************
    // NOTE that i is ROWS and j is COLS
    // This is not x,y, it is rows and cols
    // 0,0 is upper left;  0, max is upper right;  max, 0 is lower left; max,max is lower right
    // ****************************************************
   
    for(int i = 0; i < outputImage.rows; i++)
    {
      for(int j = 0; j < outputImage.cols; j++)
      {
        // The output pixel is white if the input pixel
        // hue is green and saturation is reasonable
	// in low light the tennis ball has a hue around 160 - 180, saturations around 30 to 40
	// in normal light it has a hue around 20 - 40, saturations around 80 - 150
	// in very high light, the tennis ball center goes full white, so saturation drops to <10
	// in general, the background doesn't seem to come up too much with a very loose constraint
	// on saturation, so it works to allow mostly any saturation value.

        if( (hueImage.at<uchar>(i,j) > 20 && hueImage.at<uchar>(i,j) < 40 && satImage.at<uchar>(i,j) > 5))
	   // || (hueImage.at<uchar>(i,j) > 160 && hueImage.at<uchar>(i,j) < 180 && satImage.at<uchar>(i,j) > 20 ))
          binaryImage.at<uchar>(i,j) = 255;
  
        else {
 	  binaryImage.at<uchar>(i,j) = 0;
          // Clear pixel blue output channel
          outputImage.at<uchar>(i,j*3+0) = 0;
          // Clear pixel green output channel
          outputImage.at<uchar>(i,j*3+1) = 0;
          // Clear pixel red output channel
          outputImage.at<uchar>(i,j*3+2) = 0;
        }
      }
    }

    cv::Size strel_size;
    strel_size.width = 3;
    strel_size.height = 3;
    cv::Mat strel = cv::getStructuringElement(cv::MORPH_ELLIPSE,strel_size);
    cv::morphologyEx(binaryImage, binaryImage,cv::MORPH_OPEN,strel,cv::Point(-1, -1),3);

   // Convert White on Black to Black on White by inverting the image
    cv::bitwise_not(binaryImage,binaryImage);
    // Blur the image to improve detection
    cv::GaussianBlur(binaryImage, binaryImage, cv::Size(7, 7), 2, 2 );



    cv::imshow ("Input to hue circles detector", inputImage);
    // Display Binary Image
    cv::imshow ("Working image", binaryImage);
    // Display segmented image
    //cv::imshow ("Circles", outputImage);


//start drawing all the circles

     IplImage InputToHough = binaryImage;
	CvSeq* circles =  
	cvHoughCircles( &InputToHough, storage, CV_HOUGH_GRADIENT, 1, 70, 140, 15, 20, 400); 
        //cvHoughCircles( &InputToHough, storage, CV_HOUGH_GRADIENT, 2, InputToHough.height/50, MIN_RADIUS, MAX_RADIUS );
      //output all the circles detected
      int NumCircles = circles->total;
      //cout << "\n\nFound " << NumCircles << " circles" << endl;

 if (NumCircles > MAX_CIRCLES) NumCircles = MAX_CIRCLES + 1;  // so we don't print out too many
    
      
      for( int i = 0; i < NumCircles; i++ ){ 
	
	float* p = (float*)cvGetSeqElem( circles, i );

	//cout << "x = " << p[0] << ", y = " << p[1] 
	//		 << ", radius = " << p[2] << endl;

		cvCircle(CirclesImage, 
					    cvPoint(cvRound(p[0]),
						    cvRound(p[1])), 
					    cvRound(p[2]), 
					    CV_RGB(0,255,0), 2, 8, 0 );


	    Circle myLocalCircle;
		int myColor = 0, actualThreshold = 100;
		myLocalCircle.setValues(cvRound(p[0]), cvRound(p[1]), cvRound(p[2]), myColor, actualThreshold);
	        myCircles.push_back(myLocalCircle);



	
          } // ends for NumCircles

	  cvShowImage("Circles", CirclesImage);

	return NumCircles;
  }
Example #4
0
public:bool analizarMhi( IplImage* img, IplImage* dst, int diff_threshold, CvRect rect ) {
        double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
        int i, idx1 = last, idx2;
        IplImage* silh;
        CvSeq* seq;
        CvRect comp_rect;
        cv::Rect result;
        double count;
        double angle;
        CvPoint center;
        double magnitude;
        CvScalar color;

        cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

        idx2 = (last + 1) % N; // index of (last - (N-1))th frame
        last = idx2;

        silh = buf[idx2];
        cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

        cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
        cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

        // convert MHI to blue 8u image
        cvCvtScale( mhi, mask, 255./MHI_DURATION,
                    (MHI_DURATION - timestamp)*255./MHI_DURATION );
        cvZero( dst );
        cvCvtPlaneToPix( mask, 0, 0, 0, dst );

        // calculate motion gradient orientation and valid orientation mask
        cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

        if( !storage )
            storage = cvCreateMemStorage(0);
        else
            cvClearMemStorage(storage);

        // segment motion: get sequence of motion components
        // segmask is marked motion components map. It is not used further
        seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

        // iterate through the motion components,
        // One more iteration (i == -1) corresponds to the whole image (global motion)
    //    for( i = -1; i < seq->total; i++ ) {
        i = 1;
        comp_rect = cvRect( 0, 0, img->width, img->height );
        color = CV_RGB(255,255,255);
        magnitude = 100;
        while (result.area() < 10 & i < seq->total) {

                comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
                if( comp_rect.width + comp_rect.height < 100 ) {// reject very small components
                    i++;
                    continue;
                }
                color = CV_RGB(255,0,0);
                magnitude = 30;

            // select component ROI
            cvSetImageROI( silh, comp_rect );
            cvSetImageROI( mhi, comp_rect );
            cvSetImageROI( orient, comp_rect );
            cvSetImageROI( mask, comp_rect );

            // calculate orientation
            angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
            angle = 360.0 - angle;  // adjust for images with top-left origin

            count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

            cvResetImageROI( mhi );
            cvResetImageROI( orient );
            cvResetImageROI( mask );
            cvResetImageROI( silh );

            center = cvPoint( (comp_rect.x + comp_rect.width/2),
                              (comp_rect.y + comp_rect.height/2) );

            cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
            cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                    cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );

            result = (cv::Rect)comp_rect & (cv::Rect)rect;
            i++;
        }

        if (result.area() > 10) {
            return true;
        } else {
            return false;
        }
    }
Example #5
0
public:void detect_and_draw( IplImage* img, IplImage* imgAnterior ) {
        static CvScalar colors[] = {
            {{0,0,255}},
            {{0,128,255}},
            {{0,255,255}},
            {{0,255,0}},
            {{255,128,0}},
            {{255,255,0}},
            {{255,0,0}},
            {{255,0,255}}
        };

        double scale = 1.3;
        IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
        IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
                             cvRound (img->height/scale)),
                         8, 1 );
        int i;

        IplImage* onlyhaart = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
        cvCopy(img, onlyhaart);

        cvCvtColor( img, gray, CV_BGR2GRAY );
        cvResize( gray, small_img, CV_INTER_LINEAR );
        cvEqualizeHist( small_img, small_img );
        cvClearMemStorage( storageHaart );

        if(cascade) {
            double t = (double)cvGetTickCount();
            CvSeq* faces = cvHaarDetectObjects(small_img, cascade, storageHaart,
                                                1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                                cvSize(30, 30) );
            t = (double)cvGetTickCount() - t;
            for(i = 0; i < (faces ? faces->total : 0); i++ ) {
                CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

                CvRect rect = cvRect(r->x, r->y, r->width, r->height);

                if ((rect.height < (img->height + 1)) & (rect.width < (img->width + 1))
                        & analizarMhi(img, imgAnterior, 30, rect)) {
                    printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*100.) );
                    CvPoint center;
                    int radius;
                    center.x = cvRound((rect.x + rect.width*0.5)*scale);
                    center.y = cvRound((rect.y + rect.height*0.5)*scale);
                    radius = cvRound((rect.width + rect.height)*0.25*scale);
                    cvCircle( img, center, radius, colors[i%8], 3, 8, 0 );
                }
                CvPoint center;
                int radius;
                center.x = cvRound((rect.x + rect.width*0.5)*scale);
                center.y = cvRound((rect.y + rect.height*0.5)*scale);
                radius = cvRound((rect.width + rect.height)*0.25*scale);
                cvCircle( onlyhaart, center, radius, colors[i%8], 3, 8, 0 );
            }
        }
        cvShowImage( "Detecta", img );
        cvShowImage( "onlyhaart", onlyhaart);
        cvShowImage("bluedetect", imgAnterior);
        cvReleaseImage( &gray );
        cvReleaseImage( &small_img );
    }
Example #6
0
int main(int argc, char* argv[]) {


	CvSize size640x480 = cvSize(640, 480);			// use a 640 x 480 size for all windows, also make sure your webcam is set to 640x480 !!

	CvCapture* p_capWebcam;						// we will assign our web cam video stream to this later . . .

	IplImage* p_imgOriginal;			// pointer to an image structure, this will be the input image from webcam
	IplImage* p_imgProcessed;			// pointer to an image structure, this will be the processed image
										/* IPL is short for Intel Image Processing Library, this is the structure used in OpenCV 1.x to work with images */

	CvMemStorage* p_strStorage;			// necessary storage variable to pass into cvHoughCircles()

	CvSeq* p_seqCircles;				// pointer to an OpenCV sequence, will be returned by cvHough Circles() and will contain all circles
										// call cvGetSeqElem(p_seqCircles, i) will return a 3 element array of the ith circle (see next variable)
	
	float* p_fltXYRadius;				// pointer to a 3 element array of floats
										// [0] => x position of detected object
										// [1] => y position of detected object
										// [2] => radius of detected object

	int i;								// loop counter
	char charCheckForEscKey;			// char for checking key press (Esc exits program)

	p_capWebcam = cvCreateCameraCapture(-1);	// 0 => use 1st webcam, may have to change to a different number if you have multiple cameras

	if(p_capWebcam == NULL) {			// if capture was not successful . . .
		printf("error: capture is NULL \n");	// error message to standard out . . .
		getchar();					
		// getchar() to pause for user see message . . .
		return(-1);								// exit program
	}

											// declare 2 windows
	cvNamedWindow("Original", CV_WINDOW_AUTOSIZE);		// original image from webcam
	cvNamedWindow("Processed", CV_WINDOW_AUTOSIZE);		// the processed image we will use for detecting circles

	p_imgProcessed = cvCreateImage(size640x480,			// 640 x 480 pixels (CvSize struct from earlier)
								   IPL_DEPTH_8U,		// 8-bit color depth
								   1);					// 1 channel (grayscale), if this was a color image, use 3

	while(1) {								// for each frame . . .
		p_imgOriginal = cvQueryFrame(p_capWebcam);		// get frame from webcam
		
		if(p_imgOriginal == NULL) {					// if frame was not captured successfully . . .
			printf("error: frame is NULL \n");		// error message to std out
			getchar();
			break;
		}

		cvInRangeS(p_imgOriginal,				// function input
				   CV_RGB(175,  0,  0),			// min filtering value (if color is greater than or equal to this)
				   CV_RGB(256,100,100),			// max filtering value (if color is less than this)
				   p_imgProcessed);				// function output

		p_strStorage = cvCreateMemStorage(0);	// allocate necessary memory storage variable to pass into cvHoughCircles()

										// smooth the processed image, this will make it easier for the next function to pick out the circles
		cvSmooth(p_imgProcessed,		// function input
				 p_imgProcessed,		// function output
				 CV_GAUSSIAN,			// use Gaussian filter (average nearby pixels, with closest pixels weighted more)
				 9,						// smoothing filter window width
				 9);					// smoothing filter window height

													// fill sequential structure with all circles in processed image
		p_seqCircles = cvHoughCircles(p_imgProcessed,		// input image, nothe that this has to be grayscale (no color)
									  p_strStorage,			// provide function with memory storage, makes function return a pointer to a CvSeq
									  CV_HOUGH_GRADIENT,	// two-pass algorithm for detecting circles, this is the only choice available
									  2,					// size of image / 2 = "accumulator resolution", i.e. accum = res = size of image / 2
									  p_imgProcessed->height / 4,	// min distance in pixels between the centers of the detected circles
									  100,						// high threshold of Canny edge detector, called by cvHoughCircles
									  50,						// low threshold of Canny edge detector, called by cvHoughCircles
									  10,						// min circle radius, in pixels
									  400);						// max circle radius, in pixels

		for(i=0; i < p_seqCircles->total; i++) {		// for each element in sequential circles structure (i.e. for each object detected)

			p_fltXYRadius = (float*)cvGetSeqElem(p_seqCircles, i);	// from the sequential structure, read the ith value into a pointer to a float

			printf("ball %d position x = %f, y = %f, r = %f \n",i,					//naming e ball
															  p_fltXYRadius[0],		// x position of center point of circle
															  p_fltXYRadius[1],		// y position of center point of circle
															  p_fltXYRadius[2]);	// radius of circle

										// draw a small green circle at center of detected object
			cvCircle(p_imgOriginal,										// draw on the original image
					 cvPoint(cvRound(p_fltXYRadius[0]), cvRound(p_fltXYRadius[1])),		// center point of circle
					 3,													// 3 pixel radius of circle
					 CV_RGB(0,255,0),									// draw pure green
					 CV_FILLED);										// thickness, fill in the circle
			
										// draw a red circle around the detected object
			cvCircle(p_imgOriginal,										// draw on the original image
					 cvPoint(cvRound(p_fltXYRadius[0]), cvRound(p_fltXYRadius[1])),		// center point of circle
					 cvRound(p_fltXYRadius[2]),							// radius of circle in pixels
					 CV_RGB(255,0,0),									// draw pure red
					 3);												// thickness of circle in pixels
		}	// end for

		cvShowImage("Original", p_imgOriginal);			// original image with detectec ball overlay
		cvShowImage("Processed", p_imgProcessed);		// image after processing

		cvReleaseMemStorage(&p_strStorage);				// deallocate necessary storage variable to pass into cvHoughCircles

		charCheckForEscKey = cvWaitKey(1);				// delay (in ms), and get key press, if any
		if(charCheckForEscKey == 27) break;				// if Esc key (ASCII 27) was pressed, jump out of while loop
	}	// end while

	cvReleaseCapture(&p_capWebcam);					// release memory as applicable
	cvReleaseImage(&p_imgProcessed);
	cvReleaseImage(&p_imgOriginal);
	cvDestroyWindow("Original");
	cvDestroyWindow("Processed");

	return(0);
}
Example #7
0
void detect_and_draw( IplImage* img )
{
    static CvScalar colors[] = 
    {
        {{0,0,255}},
        {{0,128,255}},
        {{0,255,255}},
        {{0,255,0}},
        {{255,128,0}},
        {{255,255,0}},
        {{255,0,0}},
        {{255,0,255}}
    };

    IplImage *gray, *small_img;
    int i, j;

    gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
    small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
                         cvRound (img->height/scale)), 8, 1 );

    cvCvtColor( img, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );
    cvEqualizeHist( small_img, small_img );
    cvClearMemStorage( storage );

    if( cascade )
    {
        double t = (double)cvGetTickCount();
        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                                            1.1, 2, 0
                                            //|CV_HAAR_FIND_BIGGEST_OBJECT
                                            //|CV_HAAR_DO_ROUGH_SEARCH
                                            |CV_HAAR_DO_CANNY_PRUNING
                                            //|CV_HAAR_SCALE_IMAGE
                                            ,
                                            cvSize(30, 30) );
        t = (double)cvGetTickCount() - t;
        printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
            CvMat small_img_roi;
            CvSeq* nested_objects;
            CvPoint center;
            CvScalar color = colors[i%8];
            int radius;
            center.x = cvRound((r->x + r->width*0.5)*scale);
            center.y = cvRound((r->y + r->height*0.5)*scale);
            radius = cvRound((r->width + r->height)*0.25*scale);
            cvCircle( img, center, radius, color, 3, 8, 0 );
            if( !nested_cascade )
                continue;
            cvGetSubRect( small_img, &small_img_roi, *r );
            nested_objects = cvHaarDetectObjects( &small_img_roi, nested_cascade, storage,
                                        1.1, 2, 0
                                        //|CV_HAAR_FIND_BIGGEST_OBJECT
                                        //|CV_HAAR_DO_ROUGH_SEARCH
                                        //|CV_HAAR_DO_CANNY_PRUNING
                                        //|CV_HAAR_SCALE_IMAGE
                                        ,
                                        cvSize(0, 0) );
            for( j = 0; j < (nested_objects ? nested_objects->total : 0); j++ )
            {
                CvRect* nr = (CvRect*)cvGetSeqElem( nested_objects, j );
                center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
                center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
                radius = cvRound((nr->width + nr->height)*0.25*scale);
                cvCircle( img, center, radius, color, 3, 8, 0 );
            }
        }
    }

    cvShowImage( "result", img );
    cvReleaseImage( &gray );
    cvReleaseImage( &small_img );
}
Example #8
0
 int main() {
  CvPoint pt1,pt2;
  CvRect regt;
  CvPoint cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
  unsigned char sendBuf;
  int serial;
  serial = openSerial("/dev/ttyACM0");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM1");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM2"); 
  if (serial == -1)
  serial = openSerial("/dev/ttyACM3");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM4");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM5");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM6"); 
  if (serial == -1)
  serial = openSerial("/dev/ttyACM7");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM8");	
if( serial == -1 ) {
return -1;
}
   //CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
   CvCapture* capture = cvCaptureFromCAM( 0 );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
  cvSetCaptureProperty(capture, CV_CAP_PROP_FPS,10);
//  cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,5);  
 // Create a window in which the captured images will be presented
   cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   // Show the image captured from the camera in the window and repeat
   while ( 1 ) {
     // Get one frame
     IplImage* frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }
     int modfheight, modfwidth;

     modfheight = frame->height;
     modfwidth = frame->width;
     // create modified frame with 1/4th the original size
     IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
     IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
     // create a frame within threshold.
     IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
     cvInRangeS(hsvframe,cvScalar(10, 180, 130),cvScalar(40, 240, 245),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     // created dilated image
     IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1);
     cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate)

     CBlobResult blobs;
     blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob;
     blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1.x = biggestblob.MinX()*4;
     pt1.y = biggestblob.MinY()*4;
     pt2.x = biggestblob.MaxX()*4;
     pt2.y = biggestblob.MaxY()*4;
     cir_center.x=(pt1.x+pt2.x)/2;
     cir_center.y=(pt1.y+pt2.y)/2;
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     cvCircle( frame, cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
     if (cir_center.x!=0&&cir_center.y!=0){
     spinsize=sqrt((cir_center.x-frame_center.x)*(cir_center.x-frame_center.x) +(cir_center.y-frame_center.y)*(cir_center.y-frame_center.y));
     angle = atan2((double)cir_center.y-frame_center.y,(double)cir_center.x-frame_center.x);
     //printf("%f, %f \n",angle*180/3.1416,spinsize/10);
     temp.x=(int)(frame_center.x+spinsize/5*cos(angle+3.1416/4));
     temp.y=(int)(frame_center.y+spinsize/5*sin(angle+3.1415/4));
     cvLine(frame, temp, frame_center,cvScalar(0,255,0),1,8,0);	

     temp.x=(int)(frame_center.x+spinsize/5*cos(angle-3.1416/4));
     temp.y=(int)(frame_center.y+spinsize/5*sin(angle-3.1415/4));
     cvLine(frame, temp, frame_center,cvScalar(0,255,0),1,8,0);	
	
     cvLine(frame, cir_center, frame_center,cvScalar(0,255,0),1,8,0);
     sendBuf=88;
     write(serial, &sendBuf,1);
     sendBuf=cir_center.x;
     write(serial, &sendBuf,1);
     sendBuf=89;
     write(serial, &sendBuf,1);
     sendBuf=cir_center.y;
     write(serial, &sendBuf,1);
    
    //printf("%d %d %f\n",cir_center.x,cir_center.y, angle*180/3.1415);
     //sendvalue(serial, angle*180/3.1416);
     //cvCircle( frame, frame_center, cir_radius, cvScalar(0,255,255), 2, 8, 0 );
}

     cvShowImage( "mywindow", frame); // show output image
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;
 }
Example #9
0
unsigned short int* find2 (IplImage* frame2)
{
	//--------------------------------Область алгоритма---------------------------------------------------
	unsigned short int xy[6]={0};

	int up = brightness_spot (frame2, (frame2->width/2)-block_size/2, delta_edge);
	int right = brightness_spot (frame2, frame2->width-block_size-delta_edge, (frame2->height/2)-block_size/2);
	int down = brightness_spot (frame2, (frame2->width/2)-block_size/2, frame2->height-block_size-delta_edge);
	int left = brightness_spot (frame2, delta_edge, (frame2->height/2)-block_size/2);

	if ( up>=getW-delta_brightness_max || up<=getB+delta_brightness_max)
	{
		xy[0]++;
		xy[1]=1;
		CvPoint pt = cvPoint( cvRound( frame2->width/2 ), cvRound( delta_edge*3 ) );
		cvCircle(frame2, pt, cvRound( Radius ), CV_RGB(255,0,0), 10 );

		if ( up>=getW-delta_brightness_max )
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(255,255,255), 5 );
		}
		else
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(0,0,0), 5 );
		}
	}
	//if ( abs(right-w)<=delta_brightness_max || abs(right-b)<=delta_brightness_max)
	if ( right>=getW-delta_brightness_max || right<=getB+delta_brightness_max)
	{
		xy[0]++;
		xy[2]=1;
		CvPoint pt = cvPoint( cvRound(frame2->width - delta_edge*3  ), cvRound( frame2->height/2 ) );
		cvCircle(frame2, pt, cvRound( Radius ), CV_RGB(255,0,0), 10 );

		if ( right>=getW-delta_brightness_max)
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(255,255,255), 5 );
		}
		else
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(0,0,0), 5 );
		}
	}
	//if ( abs(down-w)<=delta_brightness_max || abs(down-b)<=delta_brightness_max)
	if ( down>=getW-delta_brightness_max || down<=getB+delta_brightness_max)
	{
		xy[0]++;
		xy[3]=1;
		CvPoint pt = cvPoint( cvRound( frame2->width/2 ), cvRound(frame2->height - delta_edge*3 ) );
		cvCircle(frame2, pt, cvRound( Radius ), CV_RGB(255,0,0), 10 );

		if ( down>=getW-delta_brightness_max)
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(255,255,255), 5 );
		}
		else
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(0,0,0), 5 );
		}

	}
	//if ( abs(left-w)<=delta_brightness_max || abs(left-b)<=delta_brightness_max)
	if ( left>=getW-delta_brightness_max || left<=getB+delta_brightness_max)
	{
		xy[0]++;
		xy[4]=1;
		CvPoint pt = cvPoint( cvRound( delta_edge*3 ), cvRound(frame2->height/2 ) );
		cvCircle(frame2, pt, cvRound( Radius ), CV_RGB(255,0,0), 10 );
		if (left>=getW-delta_brightness_max)
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(255,255,255), 5 );
		}
		else
		{
			cvCircle(frame2, pt, cvRound( Radius/2 ), CV_RGB(0,0,0), 5 );
		}
	}

	// if(xy[0]>0)//Поиск перпендикулярных линий.
	// {
		class Vect///Ограничение на количество найденных линий!!! Может быть ошибка, если вдруг превысит.
{
public: int x[1000], y[1000];
		Vect()
		{
			for (int i=0; i<1000; i++)
			{
				x[i]=0; 
				y[i]=0;
			}
		}
} Vec;

		IplImage* dst=0; 
		// хранилище памяти для хранения найденных линий
		CvMemStorage* storage = cvCreateMemStorage(0);
		CvSeq* lines = 0;
		int i = 0;
		dst = cvCreateImage( cvGetSize(frame2), 8, 1 );
		// детектирование границ
		cvCanny( frame2, dst, 50, 100, 3 );
		// конвертируем в цветное изображение
		//cvCvtColor( dst, color_dst, CV_GRAY2BGR );

		// нахождение линий
		lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 50, 200, 50 );

		std::list<short unsigned int> MXX,MYY;
		// Нарисуем найденные линии и найдём соответствующие им вектора.
		for( i = 0; i < lines->total; i++ ){
			CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
			cvLine( frame2, line[0], line[1], CV_RGB(255,0,0), 3, CV_AA, 0 );
			Vec.x[i]=line[1].x-line[0].x;///////// Для каждого отрезка найдём вектор (xn-ck,yn-yk)
			Vec.y[i]=line[1].y-line[0].y;

			MXX.push_back(line[0].x);
			MXX.push_back(line[1].x);
			MYY.push_back(line[0].y);
			MYY.push_back(line[1].y);
		}

		int countPer=0;

		const short int Os=100;
		for (int i=0; i<lines->total; i++)
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
			/////for (std::list<short unsigned int>::iterator x2=VX.begin(),std::list<short unsigned int>::iterator y2=VY.begin();x2!=VX.end(),y2!=VY.end();x2++,y2++)
			for (int j=i+1; j<lines->total; j++)
			{
				CvPoint* line2 = (CvPoint*)cvGetSeqElem(lines,j);
				if  ( ((Vec.x[i])*(Vec.x[j])+(Vec.y[i])*(Vec.y[j]))>=-Os && ((Vec.x[i])*(Vec.x[j])+(Vec.y[i])*(Vec.y[j]))<=Os)//Перпендикулярность
				{
					countPer++;
				}
			}
		}

		if (lines->total!=0 && countPer>4)
		{
			int MX=0,MY=0;
			for(std::list<short unsigned int>::iterator i=MXX.begin();i!=MXX.end();i++)
			{
				MX+=(*i);
			}
			for(std::list<short unsigned int>::iterator i=MYY.begin();i!=MYY.end();i++)
			{
				MY+=(*i);
			}
			MX/=(lines->total*2); MY/=(lines->total*2);
			//const short int k2=10;

			//Рисуем крестик
			/*CvPoint* line=new CvPoint();
			line[0].x=MX-k2;
			line[0].y=MY;
			line[1].x=MX+k2;
			line[1].y=MY;
			cvLine( frame2, line[0], line[1], CV_RGB(0,255,0), 3, CV_AA, 0 );
			line[0].x=MX;
			line[0].y=MY-k2;
			line[1].x=MX;
			line[1].y=MY+k2;
			cvLine( frame2, line[0], line[1], CV_RGB(0,255,0), 3, CV_AA, 0 );
			*/
			
			xy[5]=1;
		}

		// освобождаем ресурсы
		cvReleaseMemStorage(&storage);
		cvReleaseImage(&dst);
		MXX.clear();
		MYY.clear();
	// }
	//----------------------------------------------------------------------------------------------------

	return xy;
}
Example #10
0
void LegsDetector::update(const std::vector< laser_t >& laserBuffer)
{
   // first remove high peaks due to absorving materials
   laser_t laser[BUFFERLENGTH];
   for (int i = 0; i < _bufferLength; i++)
   {
      laser[i].range = DBL_MAX;
      double angle = laser[i].angle = laserBuffer[i].angle;
	  for (int k = max(0, i-_delta); k <= min( _bufferLength-1, i+_delta); k++)
      {
         double range;
         if (laserBuffer[k].range < laser[i].range)
         {
            range = laser[i].range = laserBuffer[k].range;
            laser[i].x = range * cos(angle);
            laser[i].y = range * sin(angle);
         }
      }
   }
   //                       (0)
   //                        |
   //                        |
   //                        |
   // (+90)------------------|-------------------(-90)
   // reading from right to left i.e. from -90 to +90
   //
   // start extracting all the vertical edges of interest
   // remembering the scan goes from right (-PI/2) to left (+PI/2)
   // left and right edges correspond to the robot's point of view
   //
   //                 -(p1)             (p1)-
   //                   |    (p1)-(p1)   |
   //                   |     |    |     |
   //                   |     |   l|     |r
   //                   |     |    |     |
   //                  L|     |R  (p2)--(p2)
   //                   |     |
   //                   |     |
   //                  (p2)--(p2)
   //
   vector< edge_t<point_t> > vEdge;
   double prevRange = laser[0].range;
   for (int id = 1; id < _bufferLength; id++)
   {
      double range = laser[id].range;

      //if ( range == MAXIMUM_RANGE  || prevRange == MAXIMUM_RANGE ) ;
	  if ((prevRange - range) > MIN_LONG_EDGE)      // possible left long edge
      {
		  edge_t<point_t> e = {Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle),
                              Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle), 'R'};
         vEdge.push_back(e);
      }
      else if ((range - prevRange) > MIN_LONG_EDGE) // possible right long edge
      {
         edge_t<point_t> e = {Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle),
                              Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle), 'L'};
         vEdge.push_back(e);
      }
      else if ((prevRange - range) > MIN_SHORT_EDGE) // possible left short edge
      {
         edge_t<point_t> e = {Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle),
                              Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle), 'r'};
         vEdge.push_back(e);
      }
      else if ((range - prevRange) > MIN_SHORT_EDGE) // possible right short edge
      {
         edge_t<point_t> e = {Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle),
                              Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle), 'l'};
         vEdge.push_back(e);
      }

      prevRange = range;
   }
   // remove edges too close to each other
   if ( vEdge.empty() ) return;
   vector<edge_t<point_t> >::iterator first = vEdge.begin();
   vector<edge_t<point_t> >::iterator second = first + 1;
   double d1, d2;
   char t1, t2;
   while (second < vEdge.end())
   {
	   t1 = toupper(first->type);
       t2 = toupper(second->type);
	   d1 = getDistance(second->p1, first->p2);
	   d2 = getDistance(first->p1, second->p2);
       if ( t1 == 'R' && t2 == 'R' && d1 < MIN_EDGE_DIST )
       {
		   first->p2 = second->p2;
           first->type = 'R';
           second = vEdge.erase(second);
        }
        else if ( t1 == 'L' && t2 == 'L' && d2 < MIN_EDGE_DIST )
        {
			first->p1 = second->p1;
            first->type = 'L';
            second = vEdge.erase(second);
	   }
       else
       {
		   first++;
           second++;
       }
   }
   if ( vEdge.empty() ) return;
   // draw some stuff for debugging... (must be done now, before vEdge is modified)
   if (_debug)
   {
      CvPoint start;
	  cvSet(_tmpImg, cvScalar(255,255,255));

	  start = cvPoint(DEBUG_WINDOW_WIDTH/2, 0);
	  cvCircle(_tmpImg, start, 1*DEBUG_WINDOW_WIDTH/80, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 1*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 2*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 3*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 4*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 5*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 6*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 7*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 8*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));

      start = cvPoint(METER2PIXEL(laser[0].y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(laser[0].x));
      // draw the laser data
      for (int i = 1; i < _bufferLength; i++)
      {
         CvPoint end = cvPoint(METER2PIXEL(laser[i].y) + DEBUG_WINDOW_WIDTH/2,
                               METER2PIXEL(laser[i].x));

		 if (laser[i].range == MAXIMUM_RANGE && laser[i-1].range == MAXIMUM_RANGE)
			 cvLine(_tmpImg, start, end, cvScalar(0,0,0));
		 if (laser[i].range <  MAXIMUM_RANGE && laser[i-1].range <  MAXIMUM_RANGE)
			 cvLine(_tmpImg, start, end, cvScalar(0,0,0));

		 start = end;
      }
      // draw the extremes
      for (unsigned int i = 0; i < vEdge.size(); i++)
      {
         CvScalar color;
		 switch (vEdge[i].type)
         {
            case 'R':
               color = cvScalar(0,0,255); // red
               break;
            case 'L':
               color = cvScalar(255,0,0); // blue
               break;
            case 'r':
               color = cvScalar(0,196,255);  // yellow
               break;
            case 'l':
               color = cvScalar(64,255,0);  // green
               break;
         }
		 // draw min extremes
		 CvPoint center = cvPoint(METER2PIXEL(vEdge[i].p1.y) + DEBUG_WINDOW_WIDTH/2,
                                  METER2PIXEL(vEdge[i].p1.x));
         cvCircle(_tmpImg, center, 2, color);
         // draw max extremes
         CvPoint c1 = cvPoint(METER2PIXEL(vEdge[i].p2.y) - 3 + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(vEdge[i].p2.x) - 3);
         CvPoint c2 = cvPoint(METER2PIXEL(vEdge[i].p2.y) + 3 + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(vEdge[i].p2.x) + 3);
         cvRectangle(_tmpImg, c1, c2, color);
      }
   }

   // extract the horizontal lines of interest
   vector< edge_t<point_t> > hEdge;
   int temp = 1;
   while ( temp > 0 ) { temp = getUpattern(vEdge, hEdge); }
   temp = 1;
   while ( _selectivity < 2 && temp > 0 ) { temp = getPpattern(vEdge, hEdge);}
   temp = 1;
   while ( _selectivity < 1 && temp > 0 ) { temp = getOpattern(vEdge, hEdge);}

   // finally calculate distance and direction of each horizontal line
   _target.clear();
   vector< edge_t<point_t> >::iterator itend = hEdge.end();
   for (vector< edge_t<point_t> >::iterator it = hEdge.begin(); it < itend; it++)
   {
      target_t t;
      // the distance is an average between the two points
      double xm = ((it->p1).x + (it->p2).x) / 2;
      double ym = ((it->p1).y + (it->p2).y) / 2;
      t.distance = sqrt(sqr(xm) + sqr(ym));
      // left PI/2, right -PI/2
      t.bearing = atan2(ym, xm);
      // no height information of course...
      t.pattern = it->type;
      _target.push_back(t);
   }
   // final number of detected people
   _howMany = _target.size();
   // draw the last things for debugging
   if (_debug)
   {
      // draw horizontal edges
      for (unsigned int i = 0; i < hEdge.size(); i++)
      {
         CvPoint p1 = cvPoint(METER2PIXEL(hEdge[i].p1.y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(hEdge[i].p1.x));
         CvPoint p2 = cvPoint(METER2PIXEL(hEdge[i].p2.y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(hEdge[i].p2.x));
//          cvLine(_tmpImg, p1, p2, cvScalar(0,128,255), 2);
         CvPoint pm = cvPoint((p1.x + p2.x) / 2, (p1.y + p2.y) / 2);
         int thick;
         if (hEdge[i].type == 'U')
            thick = 3;
         else if (hEdge[i].type == 'P')
            thick = 2;
         else
            thick = 1;
         cvLine(_tmpImg, cvPoint(DEBUG_WINDOW_WIDTH/2, 0), pm, cvScalar(0,128,255), thick);
      }

      cvFlip(_tmpImg, NULL, -1);
      cvResize(_tmpImg, _debugImage, CV_INTER_NN);
      cvShowImage("Legs detector", _debugImage);
	cvWaitKey(20);
 	  //if (_delay)
        //cvWaitKey(_delay);  // handles event processing of HIGHGUI library
   }
   return;
}
int main( int argc, char **argv ){ 
	int key;							//	キー入力用の変数
	CvCapture *capture = 0;				//	カメラキャプチャ用の構造体
	IplImage *frameImage;				//	キャプチャ画像用IplImage
	IplImage *frameImage2;				//	キャプチャ画像用IplImage2

	//	画像を生成する
	IplImage *backgroundImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//背景画像用IplImage
	IplImage *grayImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//グレースケール画像用IplImage
	IplImage *differenceImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//差分画像用IplImage

	IplImage *hsvImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 );				//HSV画像用IplImage
	IplImage *hueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//色相(H)情報用IplImage
	IplImage *saturationImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//彩度(S)情報用IplImage
	IplImage *valueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//明度(V)情報用IplImage
	IplImage *thresholdImage1 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//明度がTHRES_BOTTOMより大きい領域用IplImage
//	IplImage *thresholdImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//明度がTHRES_TOP以下の領域用IplImage
//	IplImage *thresholdImage3 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//thresholdImage1とthresholdImage2のAND演算結果用IplImage
	IplImage *lightImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//光っている部分の領域の抽出結果用IplImage
	
	char windowNameCapture[] = "Capture"; 			//キャプチャした画像を表示するウィンドウの名前
	char windowNameLight[] = "Light";				//光っている部分の領域を表示するウィンドウの名前
	char windowNameCapture2[] = "Capture2"; 		//キャプチャした画像を表示するウィンドウの名前
	char windowNameThreshold[] = "Threshold";		//thresholdImage1を表示するウィンドウの名前

	CvMoments moment;
	double m_00;
	double m_10;
	double m_01;
	int gravityX;
	int gravityY;
	unsigned char h,s,v;
    
	
	
	int x, y;
	int m,d,ss;
	uchar h0,s0,v0,h1,s1,v1,h2,s2,v2,h3,s3,v3,vv;
	int rr,gg,bb;




	//	カメラを初期化する
	if ( ( capture = cvCreateCameraCapture( 0 ) ) == NULL ) {
		//	カメラが見つからなかった場合
		printf( "カメラが見つかりません\n" );
		return -1;
	}

	cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_WIDTH, WIDTH);
	cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_HEIGHT, HEIGHT);

	//	ウィンドウを生成する
	cvNamedWindow( windowNameCapture, CV_WINDOW_AUTOSIZE );
	cvNamedWindow( windowNameLight, CV_WINDOW_AUTOSIZE );
  	cvNamedWindow( windowNameCapture2, CV_WINDOW_AUTOSIZE );
	cvNamedWindow( windowNameThreshold, CV_WINDOW_AUTOSIZE );

  	//	初期背景を設定するためにカメラから画像を取得
	frameImage = cvQueryFrame( capture );
	//	frameImageをグレースケール化し、背景画像とする
	cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
	frameImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 );
	cvSet (frameImage2, CV_RGB(0,0,0));  //黒色で塗りつぶす
	
	rr=0;
	gg=150;
	bb=0;
	v=0;
	m=0;
	d=0;
	ss=0;
	//	メインループ
	while( 1 ) {
		frameImage = cvQueryFrame( capture );
		cvCvtColor( frameImage, hsvImage, CV_BGR2HSV );

      /* 画素値を直接操作する*/   
		x = 160;
		y = 120;

		h0 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3];        // B
		s0 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 1];    // G      
		v0 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 2];    // R

    	     frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200;

		x = 161;
		y = 120;
		h1 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3];        // B
		s1 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 1];    // G      
		v1 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 2];    // R

        	    frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200;

		x = 160;
		y = 121;

		h2 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3];        // B
		s2= hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 1];     // G      
		v2 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 2];    // R

       	    frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200;


		x = 161;
		y = 121;
		h3 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3];        // B
		s3 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 1];    // G      
		v3 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 2];    // R

        	    frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200;
     	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200;

		vv=(v0+v1+v2+v3)/4;

		if (vv<200) {
			
			if(m==0)
				ss=1;
		
			if(ss)
				m=m+1;	
			printf("0 m= %d,d=%02X  \n",m,d);
		}
		else {
//			printf("%d \n",m);			
	
			if(ss){
				d=d+(1<<(m-1));
				m=m+1;
			}
			printf("1 m= %d,d=%02X  \n",m,d);
		
		}

		if(m>8){
		printf("コード d= %c   \n",d);
		
			if(d==97){
				rr=0;
				gg=0;
				bb=150;
			}

			if(d==98){
				rr=150;
				gg=0;
				bb=0;
			}

		m=0;
		d=0;
		ss=0;
		}

		//	captureの入力画像フレームをframeImageに格納する
//		frameImage = cvQueryFrame( capture );
		//	frameImageをグレースケール化したものを、grayImageに格納する
		cvCvtColor( frameImage, grayImage, CV_BGR2GRAY );
		//	grayImageと背景画像との差分をとる
		cvAbsDiff( grayImage, backgroundImage, differenceImage );
		
		//	frameImageをBGRからHSVに変換する
//		cvCvtColor( frameImage, hsvImage, CV_BGR2HSV );
		//	HSV画像をH、S、V画像に分ける
		cvSplit( hsvImage, hueImage, saturationImage, valueImage, NULL );
		//	明度が明るい部分を抽出、その部分のみ出力する
		cvThreshold( valueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
//		cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV );
//		cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, NULL );
		
		//	背景差分画像と明るい領域とのANDをとる
		cvAnd( differenceImage, thresholdImage1, lightImage, NULL );
		
		//	光っている領域の重心を算出する
		cvMoments( lightImage, &moment, 0 );
		m_00 = cvGetSpatialMoment( &moment, 0, 0 );
		m_10 = cvGetSpatialMoment( &moment, 1, 0 );
		m_01 = cvGetSpatialMoment( &moment, 0, 1 );
		gravityX = m_10 / m_00;
		gravityY = m_01 / m_00;

		if (0<gravityX){
			h = hsvImage ->imageData[hsvImage ->widthStep * (gravityY) + gravityX * 3 ];     // B
			s = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 1];    // G
			v = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 2];    // R

//			printf ("x= %d ,y= %d v= %d,s= %d,h= %d  \n" ,gravityX,gravityY,v,s,h);

		//	画像上に円を描画する
			if (v>200){
			cvCircle( frameImage2, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS,
			 CV_RGB( rr, gg, bb ), LINE_THICKNESS, LINE_TYPE, 0 );


			}
		}

		//	画像を表示する
		cvShowImage( windowNameCapture, frameImage );
		cvShowImage( windowNameLight, lightImage );
		cvShowImage( windowNameCapture2,   frameImage2);
		cvShowImage( windowNameThreshold, thresholdImage1);

		//	キー入力判定
		key = cvWaitKey( 10 );
		if( key == 'q' ) 
			//	'q'キーが押されたらループを抜ける
			break;
		else if( key == 'b' ) {
			//	'b'キーが押されたら、その時点での画像を背景画像とする
		   frameImage = cvQueryFrame( capture );
		    cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
		}
		else if(key == 'c') {
			//	'c'キーが押されたら画像を保存
			cvSaveImage( "image/frame.bmp", frameImage );
			cvSaveImage( "image/light.bmp", lightImage );
		}
	}


	//	キャプチャを解放する
	cvReleaseCapture( &capture );
	//	メモリを解放する
	cvReleaseImage( &backgroundImage );
	cvReleaseImage( &grayImage );
	cvReleaseImage( &differenceImage );
	cvReleaseImage( &hsvImage );
	cvReleaseImage( &hueImage );
	cvReleaseImage( &saturationImage );
	cvReleaseImage( &valueImage );
	cvReleaseImage( &thresholdImage1 );
//	cvReleaseImage( &thresholdImage2 );
//	cvReleaseImage( &thresholdImage3 );
	cvReleaseImage( &lightImage );
	//	ウィンドウを破棄する
	cvDestroyWindow( windowNameCapture );
	cvDestroyWindow( windowNameLight );
	cvDestroyWindow( windowNameThreshold );
	cvDestroyWindow( windowNameCapture2 );

	return 0;
} 
Example #12
0
void CvCalibFilter::DrawPoints( CvMat** dstarr )
{
    int i, j;

    if( !dstarr )
    {
        assert(0);
        return;
    }

    if( latestCounts )
    {
        for( i = 0; i < cameraCount; i++ )
        {
            if( dstarr[i] && latestCounts[i] )
            {
                CvMat dst_stub, *dst;
                int count = 0;
                bool found = false;
                CvPoint2D32f* pts = 0;

                GetLatestPoints( i, &pts, &count, &found );

                dst = cvGetMat( dstarr[i], &dst_stub );

                static const CvScalar line_colors[] =
                {
                    {{0,0,255}},
                    {{0,128,255}},
                    {{0,200,200}},
                    {{0,255,0}},
                    {{200,200,0}},
                    {{255,0,0}},
                    {{255,0,255}}
                };

                const int colorCount = sizeof(line_colors)/sizeof(line_colors[0]);
                const int r = 4;
                CvScalar color = line_colors[0];
                CvPoint prev_pt = { 0, 0};

                for( j = 0; j < count; j++ )
                {
                    CvPoint pt;
                    pt.x = cvRound(pts[j].x);
                    pt.y = cvRound(pts[j].y);

                    if( found )
                    {
                        if( etalonType == CV_CALIB_ETALON_CHESSBOARD )
                            color = line_colors[(j/cvRound(etalonParams[0]))%colorCount];
                        else
                            color = CV_RGB(0,255,0);

                        if( j != 0 )
                            cvLine( dst, prev_pt, pt, color, 1, CV_AA );
                    }

                    cvLine( dst, cvPoint( pt.x - r, pt.y - r ),
                            cvPoint( pt.x + r, pt.y + r ), color, 1, CV_AA );

                    cvLine( dst, cvPoint( pt.x - r, pt.y + r),
                            cvPoint( pt.x + r, pt.y - r), color, 1, CV_AA );

                    cvCircle( dst, pt, r+1, color, 1, CV_AA );

                    prev_pt = pt;
                }
            }
        }
    }
}