Exemple #1
0
void ObjectTracker::processImage( IplImage* frame, IplImage** output )
{
    // TODO: this is only an Object Tracker, it should not return an illustrated image.
    //  instead, it should return a structure of information about tracked object.
    //  There need to be an Illustrator coded.
    //
    imgSize_ = cvSize(frame->width, frame->height);
    if (first_) {
        temp_ = cvCreateImage(imgSize_, IPL_DEPTH_8U, 3);
        mask3C_ = cvCreateImage(imgSize_, IPL_DEPTH_8U, 3);
        mask_ = cvCreateImage(imgSize_, IPL_DEPTH_8U, 1);
        first_ = false;
        numStat_ = 100;
        rectsStat_ = new CvRect[numStat_];
        centersStat_ = new CvPoint[numStat_];
    }

    if (*output == NULL) {
        *output = cvCreateImage(imgSize_, IPL_DEPTH_8U, 3);
    }
    cvCopy(frame, *output);
    cvCopy(frame, temp_);
    cvSmooth(temp_, temp_, CV_GAUSSIAN);
    mog_->process((cv::Mat)temp_, (cv::Mat)mask3C_);
    cvCvtColor(mask3C_, mask_, CV_RGB2GRAY);

    cvMorphologyEx(mask_, mask_, 0, 0, CV_MOP_OPEN, openIteration_);
    cvMorphologyEx(mask_, mask_, 0, 0, CV_MOP_CLOSE, closeIteration_);

    // Collect statistics
    if (!rectsStat_)
        rectsStat_ = new CvRect[numStat_];
    if (!centersStat_)
        centersStat_ = new CvPoint[numStat_];

    count_ = numStat_;

    findConnectedComponents(mask_, 0, perimScaleThrhold_, &count_, rectsStat_, centersStat_);
    cvCvtColor(mask_, mask3C_, CV_GRAY2RGB);

    matchObjects(centersStat_, count_);
    for (int i=0; i<currObjs_.size(); i++) {
        drawText(mask3C_, currObjs_[i].label_.c_str(),
                 currObjs_[i].positionHistory_[currObjs_[i].positionHistory_.size() - 1],
                 cvScalar(255,255,0));
        drawCross(mask3C_,
                  currObjs_[i].positionHistory_[currObjs_[i].positionHistory_.size() - 1],
                  5, cvScalar(255,0,0));
        drawCross(mask3C_,
                  currObjs_[i].predictedPositionHistory_[currObjs_[i].predictedPositionHistory_.size() - 1],
                  5, cvScalar(0,255,0));
        drawCross(mask3C_,
                  currObjs_[i].correctedPositionHistory_[currObjs_[i].correctedPositionHistory_.size() - 1],
                  5, cvScalar(0,0,255));
    }

    cvNamedWindow("4", CV_WINDOW_NORMAL);
    cvShowImage("4", mask3C_);
}
static void
find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
    CvMemStorage * mem_storage, CvSeq * contours)
{
  CvContourScanner scanner;
  CvSeq *c;
  int numCont = 0;
  /* Just some convenience variables */
  const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
  const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);

  /* CLEAN UP RAW MASK */
  cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
  cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
  /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
  if (mem_storage == NULL) {
    mem_storage = cvCreateMemStorage (0);
  } else {
    cvClearMemStorage (mem_storage);
  }

  scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
      CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));

  while ((c = cvFindNextContour (scanner)) != NULL) {
    double len = cvContourArea (c, CV_WHOLE_SEQ, 0);
    /* calculate perimeter len threshold: */
    double q = (mask->height + mask->width) / perimScale;
    /* Get rid of blob if its perimeter is too small: */
    if (len < q) {
      cvSubstituteContour (scanner, NULL);
    } else {
      /* Smooth its edges if its large enough */
      CvSeq *c_new;
      if (poly1_hull0) {
        /* Polygonal approximation */
        c_new =
            cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
            CVCONTOUR_APPROX_LEVEL, 0);
      } else {
        /* Convex Hull of the segmentation */
        c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
      }
      cvSubstituteContour (scanner, c_new);
      numCont++;
    }
  }
  contours = cvEndFindContours (&scanner);

  /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
  cvZero (mask);
  /* DRAW PROCESSED CONTOURS INTO THE MASK */
  for (c = contours; c != NULL; c = c->h_next)
    cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
            0));
}
void find_red_points( IplImage* source, IplImage* result, IplImage* temp )
{
	// TO DO:  Write code to select all the red road sign points.  You may need to clean up the result
	//        using mathematical morphology.  The result should be a binary image with the selected red
	//        points as white points.  The temp image passed may be used in your processing.
	
	 IplImage* temp_hsv= cvCloneImage(source);
	 cvCvtColor(source,temp_hsv,CV_BGR2HSV);

	 int width_step=source->widthStep;
	 int pixel_step=source->widthStep/source->width;
	 int number_channels=source->nChannels;
	 unsigned char white_pixel[4] = {255,255,255,0};
	 cvZero( result );
	 int row=0,col=0;


	 for (row=0; row < result->height; row++)
	 {
		  for (col=0; col < result->width; col++)
		  {
			   unsigned char* curr_point = GETPIXELPTRMACRO( temp_hsv, col, row, width_step, pixel_step );
			   //determine the range of different channels which represente the red part on the sign.
			  if
			  (
			  	(
			  		(
			  			(curr_point[CHAN_H] >= THRESHOLD_H_RANGE1_LOW)
			  			&&
			  			(curr_point[CHAN_H] <= THRESHOLD_H_RANGE1_HIGH)
			  		) 
			  		||
			  		(
			  			(curr_point[CHAN_H] >= THRESHOLD_H_RANGE2_LOW)
			  			&&
			  			(curr_point[CHAN_H] < THRESHOLD_H_RANGE2_HIGH)
			  		)
			  	)
			  	&&
			  	(
			  		(curr_point[CHAN_S] > THRESHOLD_S_LOW)
			  		&&
			  		(curr_point[CHAN_V] > THRESHOLD_V_LOW)
			  	)
			  )
			   {
					PUTPIXELMACRO( result, col, row, white_pixel, width_step, pixel_step, number_channels );
			   }
		  }
	 }

	 //clean the result image
	 cvMorphologyEx( result, result, NULL, NULL, CV_MOP_OPEN, 1);
	 cvMorphologyEx( result, result, NULL, NULL, CV_MOP_CLOSE, 1 );
}
void determine_moving_points_using_running_gaussian_averages( IplImage *current_frame, IplImage *averages_image, IplImage *stan_devs_image, IplImage *moving_mask_image )
{


	//Get data for manage the 8 bit image and float images
	int width_step_cur=current_frame->widthStep;
	int pixel_step_cur=current_frame->widthStep/current_frame->width;
	int number_channels_cur=current_frame->nChannels;
	
	int width_step_avg=averages_image->widthStep;
	int pixel_step_avg=averages_image->widthStep/averages_image->width;

	int row=0,col=0;

	//Clear mask image
	cvZero(moving_mask_image);

	//Scan every pixel of images
	for (row=0; row < current_frame->height; row++)
		for (col=0; col < current_frame->width; col++)
		{
			//Get current point of current_frame
			unsigned char* curr_point_current = GETPIXELPTRMACRO( current_frame, col, row, width_step_cur, pixel_step_cur );

			//Get current point of averages_image and stan_devs_image casting the char* returned by GETPIXELMACRO to float
			float* curr_point_avg=reinterpret_cast<float*>GETPIXELPTRMACRO( averages_image, col, row, width_step_avg, pixel_step_avg );
			float* curr_point_stdev=reinterpret_cast<float*>GETPIXELPTRMACRO( stan_devs_image, col, row, width_step_avg, pixel_step_avg );

			float abs_diff[3];
			float stdev_mul[3];

			unsigned char white_pixel[4] = {255,255,255};

			//Check every channel
			for (int i=0; i<3; i++){
				//Calc the absolute difference
				abs_diff[i]=(float)fabs(((double)curr_point_current[i])-((double)curr_point_avg[i]));
				//Calc k*standard deviation
				stdev_mul[i]=curr_point_stdev[i]*k;

			}

			//Check if (abs_diff> k*std_dev) in at leat one channel
			if((abs_diff[0]>stdev_mul[0])||(abs_diff[1]>stdev_mul[1])||(abs_diff[2]>stdev_mul[2])){
				//Put white pixel in the moving_mask_image  -->The moving mask image will be a binary image and i can use opening and closing on it to clean the result
				PUTPIXELMACRO( moving_mask_image, col, row, white_pixel, width_step_cur, pixel_step_cur, number_channels_cur );
			}

		}

		// Apply morphological opening and closing operations to clean up the image
		cvMorphologyEx( moving_mask_image, moving_mask_image, NULL, NULL, CV_MOP_OPEN, 1 );
		cvMorphologyEx( moving_mask_image, moving_mask_image, NULL, NULL, CV_MOP_CLOSE, 2 );
		
}
CV_IMPL CvSeq*
cvSegmentFGMask( CvArr* _mask, int poly1Hull0, float perimScale,
                 CvMemStorage* storage, CvPoint offset )
{
    CvMat mstub, *mask = cvGetMat( _mask, &mstub );
    CvMemStorage* tempStorage = storage ? storage : cvCreateMemStorage();
    CvSeq *contours, *c;
    int nContours = 0;
    CvContourScanner scanner;
    
    // clean up raw mask
    cvMorphologyEx( mask, mask, 0, 0, CV_MOP_OPEN, 1 );
    cvMorphologyEx( mask, mask, 0, 0, CV_MOP_CLOSE, 1 );

    // find contours around only bigger regions
    scanner = cvStartFindContours( mask, tempStorage,
        sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset );
    
    while( (c = cvFindNextContour( scanner )) != 0 ) 
    {
        double len = cvContourPerimeter( c );
        double q = (mask->rows + mask->cols)/perimScale; // calculate perimeter len threshold
        if( len < q ) //Get rid of blob if it's perimeter is too small
            cvSubstituteContour( scanner, 0 );
        else //Smooth it's edges if it's large enough
        {
            CvSeq* newC;
            if( poly1Hull0 ) //Polygonal approximation of the segmentation 
                newC = cvApproxPoly( c, sizeof(CvContour), tempStorage, CV_POLY_APPROX_DP, 2, 0 ); 
            else //Convex Hull of the segmentation
                newC = cvConvexHull2( c, tempStorage, CV_CLOCKWISE, 1 );
            cvSubstituteContour( scanner, newC );
            nContours++;
        }
    }
    contours = cvEndFindContours( &scanner );

    // paint the found regions back into the image
    cvZero( mask );
    for( c=contours; c != 0; c = c->h_next ) 
        cvDrawContours( mask, c, cvScalarAll(255), cvScalarAll(0), -1, CV_FILLED, 8,
            cvPoint(-offset.x,-offset.y));

    if( tempStorage != storage )
    {
        cvReleaseMemStorage( &tempStorage );
        contours = 0;
    }

    return contours;
}
Exemple #6
0
/**
 * Accurately applies circle detection and filtering in order to detect the position
 * of the dot within an image then, it draws a red rectangle around it in the
 * original image.
 * @param img: pointer to the input image
 * @return PointImage: pointer to a newly created data structure holding the image,
 * center position of the dot within the image as well as it's radius.
 */
PointImage* getPoint(IplImage* img) {
	IplImage* clone = cvCreateImage(cvSize(img->width, img->height), img->depth, 1);
	cvCvtColor(img, clone, CV_RGB2GRAY);
	cvThreshold(clone, clone, 127, 255, CV_THRESH_BINARY_INV);
	cvSmooth(clone, clone, CV_GAUSSIAN, 31, 31);
	cvMorphologyEx(clone, clone, NULL, NULL, CV_MOP_CLOSE, 6);
	CvMemStorage* storage = cvCreateMemStorage();
	CvSeq* circles = cvHoughCircles(clone, storage, CV_HOUGH_GRADIENT, ((double) (clone->width * clone->height * 5)) / ((double) 964 * 726),
			50, 200, 100, 0, 35);
	float* p;
	PointImage* out;
	for (int i = circles->total; i > 0; i--) {
		p = (float*) cvGetSeqElem(circles, i);
		if (*I(cvRound(p[0]), cvRound(p[1]), clone) > 185)
			if (abs(cvRound(p[0]) - (clone->width / 2) * (clone->width / clone->height)) <= 100) {
				cvRectangle(img, cvPoint(cvRound(p[0] - p[2]), cvRound(p[1] - p[2])), cvPoint(cvRound(p[0] + p[2]), cvRound(p[1] + p[2])),
						CV_RGB(255,0,0), 3);
				out = new PointImage(clone, cvPoint(cvRound(p[0]), cvRound(p[1])), cvRound(p[2]));
				break;
			}
	}
	cvNamedWindow("Circle Detection", CV_WINDOW_AUTOSIZE);
	cvShowImage("Circle Detection", img);
	cvReleaseMemStorage(&storage);
	return out;
}
//
// function "noiseRemoval":
// applying the open morphology on the image of color segmentation 
//
IplImage* noiseRemoval(IplImage* inputImage)
{
	int iWidth = inputImage->width;
	int iHeight = inputImage->height;

	IplImage* imageNoiseRem = cvCreateImage(cvSize(iWidth,iHeight),IPL_DEPTH_8U,1);
	if(!imageNoiseRem)
		exit(EXIT_FAILURE);

	IplConvKernel* structureEle1 =cvCreateStructuringElementEx(
		3,
		3,
		1,
		1,
		CV_SHAPE_ELLIPSE,
		0);

	int operationType[2] = {
		CV_MOP_OPEN,
		CV_MOP_CLOSE
	};

	//seems function open and close, as well as erode and dilate reversed. 
	cvMorphologyEx(inputImage,imageNoiseRem,NULL,structureEle1,operationType[0],1);
	//cvMorphologyEx(inputImage,imageNoiseRem,NULL,structureEle1,operationType[1],1);

	//in order to connect regions breaked by mophology operation ahead
	//cvErode(imageNoiseRem,imageNoiseRem,structureEle1,1);
	
	cvReleaseStructuringElement(&structureEle1);

	return imageNoiseRem;
}
Exemple #8
0
void filter_and_threshold(struct ctx *ctx)
{

	/* Soften image */
	cvSmooth(ctx->image, ctx->temp_image3, CV_GAUSSIAN, 11, 11, 0, 0);
	/* Remove some impulsive noise */
	cvSmooth(ctx->temp_image3, ctx->temp_image3, CV_MEDIAN, 11, 11, 0, 0);
	cvCvtColor(ctx->temp_image3, ctx->temp_image3, CV_BGR2HSV);

	/*
	 * Apply threshold on HSV values to detect skin color
	 */
/*	cvInRangeS(ctx->temp_image3,
		   cvScalar(0, 55, 90, 255), // cvScalar( (b), (g), (r), 0 )
		   cvScalar(28, 175, 230, 255),
		   ctx->thr_image);
*/
	cvInRangeS(ctx->temp_image3,
		   cvScalar(100, 200, 200, 0), // cvScalar( (b), (g), (r), 0 )
		   cvScalar(200, 220, 255, 0),
		   ctx->thr_image);


	/* Apply morphological opening */
	cvMorphologyEx(ctx->thr_image, ctx->thr_image, NULL, ctx->kernel,
		       CV_MOP_OPEN, 1);  // 2 interations of opening
	cvSmooth(ctx->thr_image, ctx->thr_image, CV_GAUSSIAN, 3, 3, 0, 0);
}
Exemple #9
0
/*!
 * @function close
 * @discussion Perform image closing with a custom kernel.
 * @updated 2011-4-13
 */
char* close(IplImage* frameImage)
{
    //Select based on the capture dimensions.
    switch(captureSize)
    {
        case(SMALL_BACK):
        case(SMALL_FRONT):
            convertedImage = cvCreateImage(cvSize(192, 144), IPL_DEPTH_8U, 4);
            break;
        case(MEDIUM_BACK):
        case(LARGE_FRONT):
        case(MEDIUM_FRONT):
            convertedImage = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 4);
            break;
        case(LARGE_BACK):
            convertedImage = cvCreateImage(cvSize(1280, 720), IPL_DEPTH_8U, 4);
            break;
    }
    
    cvCopy(frameImage, convertedImage, 0);
    
    IplConvKernel* closeKernel = cvCreateStructuringElementEx(7, 7, 3, 3, CV_SHAPE_RECT, NULL);
    
    //Default number of iterations is 1. We'll do a few iterations to make the effect more pronounced.
    cvMorphologyEx(convertedImage, convertedImage, NULL, (IplConvKernel *)closeKernel, CV_MOP_CLOSE, 3);
    
    return convertedImage->imageDataOrigin;
}
/*	The function will return the connected components in 'comp', 
	as well as the number of connected components 'nc'.
	At this point, we have to determine whether the components are eye pair or not.
	We'll use experimentally derived heuristics for this, based on the width, 
	height, vertical distance, and horizontal distance of the components. 
	To make things simple, we only proceed if the number of the connected components is 2.*/
int get_connected_components(IplImage* img, IplImage* prev, CvRect window, CvSeq** comp)
{
		IplImage* _diff;
 
		cvZero(diff);
 
    /* apply search window to images */
		cvSetImageROI(img, window);
		cvSetImageROI(prev, window);
		cvSetImageROI(diff, window);
 
    /* motion analysis */
		cvSub(img, prev, diff, NULL);
		cvThreshold(diff, diff, 5, 255, CV_THRESH_BINARY);
		cvMorphologyEx(diff, diff, NULL, kernel, CV_MOP_OPEN, 1);
 
    /* reset search window */
		cvResetImageROI(img);
		cvResetImageROI(prev);
		cvResetImageROI(diff);
 
		_diff = (IplImage*)cvClone(diff);
 
    /* get connected components */
		int nc = cvFindContours(_diff, storage, comp, sizeof(CvContour),
                            CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
 
		cvClearMemStorage(storage);		
		cvReleaseImage(&_diff);
	
		return nc;
}
Exemple #11
0
void BlobTracking::process(const cv::Mat &img_input, const cv::Mat &img_mask, cv::Mat &img_output)
{
  if(img_input.empty() || img_mask.empty())
    return;

  loadConfig();

  if(firstTime)
    saveConfig();

  IplImage* frame = new IplImage(img_input);
  cvConvertScale(frame, frame, 1, 0);

  IplImage* segmentated = new IplImage(img_mask);
  
  IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL);
  cvMorphologyEx(segmentated, segmentated, NULL, morphKernel, CV_MOP_OPEN, 1);

  if(showBlobMask)
    cvShowImage("Blob Mask", segmentated);

  IplImage* labelImg = cvCreateImage(cvGetSize(frame), IPL_DEPTH_LABEL, 1);

  cvb::CvBlobs blobs;
  unsigned int result = cvb::cvLabel(segmentated, labelImg, blobs);
  
  //cvb::cvFilterByArea(blobs, 500, 1000000);
  cvb::cvFilterByArea(blobs, minArea, maxArea);
  
  //cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX);
  if(debugBlob)
    cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE|CV_BLOB_RENDER_TO_STD);
  else
    cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE);

  cvb::cvUpdateTracks(blobs, tracks, 200., 5);
  
  if(debugTrack)
    cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_STD);
  else
    cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);
  
  //std::map<CvID, CvTrack *> CvTracks

  if(showOutput)
    cvShowImage("Blob Tracking", frame);

  cv::Mat img_result(frame);
  img_result.copyTo(img_output);

  //cvReleaseImage(&frame);
  //cvReleaseImage(&segmentated);
  cvReleaseImage(&labelImg);
  delete frame;
  delete segmentated;
  cvReleaseBlobs(blobs);
  cvReleaseStructuringElement(&morphKernel);

  firstTime = false;
}
Exemple #12
0
IplImage *contoursGetOutlineMorh(IplImage *src, IplImage *temp, int mask)
{
    int radius = 3;
    int cols = radius * 2 + 1;
    int rows = cols;
    IplImage *res;
    IplImage *bin  = cvCreateImage(cvGetSize(src), src->depth, 1);

    cvAdaptiveThreshold(src, bin, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 1);

    if (mask == 1) {
        IplImage *mask = cvCreateImage(cvGetSize(src), src->depth, 1);
        res = cvCreateImage(cvGetSize(src), src->depth, 1);
        cvThreshold(src, mask, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
        cvOr(bin, mask, res, NULL);

        cvReleaseImage(&mask);
    } else {
        res = bin;
    }

    IplConvKernel *element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL);

    cvMorphologyEx(res, res, temp, element, CV_MOP_OPEN, 1);
    cvReleaseStructuringElement(&element);

    radius = 9;
    cols = radius * 2 + 1;
    rows = cols;
    element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL);
    cvMorphologyEx(res, res, temp, element, CV_MOP_CLOSE, 1);
    cvReleaseStructuringElement(&element);

    radius = 7;
    cols = radius * 2 + 1;
    rows = cols;
    element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL);
    cvErode(res, res, element, 1);
    cvDilate(res, res, element, 1);

    contoursDrawBorder(res);

    cvReleaseStructuringElement(&element);
    cvReleaseImage(&temp);

    return res;
}
Exemple #13
0
void TamatarVision::update() {
    vidGrabber.grabFrame();
    if (vidGrabber.isFrameNew()) {
        // load image from videograbber
        colorImg.setFromPixels(vidGrabber.getPixels(), camWidth, camHeight);
        // convert to grayscale
        cvCvtColor( colorImg.getCvImage(), grayImg.getCvImage(), CV_RGB2GRAY );
        grayImg.flagImageChanged();
        
        // equalize histogram
        if (doHistEqualize) {
            cvEqualizeHist(grayImg.getCvImage(), grayImg.getCvImage() );
        }
        
        // `morphological opening`
        if (doMorphEx) {
            int anchor = morphExRadius / 2;
            structure = cvCreateStructuringElementEx(morphExRadius, morphExRadius, anchor, anchor, CV_SHAPE_ELLIPSE);
            cvCopy(grayImg.getCvImage(), grayImg2.getCvImage());
            cvMorphologyEx(grayImg2.getCvImage(), grayImg.getCvImage(), NULL, structure, CV_MOP_OPEN);
        }
        
        if (doSmoothing) {
            //grayImg2 = grayImg;
            //smoothSigmaColor=20;
            //smoothSigmaSpatial=20;
            //cvSmooth(grayImg2.getCvImage(), grayImg.getCvImage(), CV_BILATERAL, 9, 9, smoothSigmaColor, smoothSigmaSpatial);
            cvSmooth(grayImg.getCvImage(), grayImg.getCvImage(), CV_GAUSSIAN, 3, 3, 2, 2);
        }
        
        //grayImg.threshold(120);
        
        // threshold
        if (doThreshold) {
            //            grayImg.threshold(threshold);
            grayImg2 = grayImg;
            cvThreshold(grayImg2.getCvImage(), grayImg.getCvImage(), threshold, thresholdMax, CV_THRESH_TOZERO);
            //   cvAdaptiveThreshold(grayImg2.getCvImage(), grayImg.getCvImage(), threshold, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_, 3, 5);
        }
        
        if (doCanny) {
            cvCanny(grayImg.getCvImage(), grayImg.getCvImage(), cannyThres1, cannyThres2, 3);
        }
        
        //cvCanny5grayImg.getCvImage(),grayImg.getCvImage(), 120, 180, 3);
        //cvSobel(grayImg.getCvImage(), grayImg.getCvImage(), 1, 1, 3);
        if (doCircles) {
            CvMemStorage* storage = cvCreateMemStorage(0);
            circles = cvHoughCircles(grayImg.getCvImage(), storage, CV_HOUGH_GRADIENT, 2, grayImg.getHeight()/4, circleEdgeThres, circleAccThres, circleMinRadius, circleMaxRadius);
        }
        
        if (doContours) {
            contourFinder.findContours(grayImg, 10, (camWidth*camHeight)/2, 20, false, true);
        }
    }        
}
Exemple #14
0
gboolean get_frame_difference( IplImage* in, IplImage* inprev, IplImage* output)
{
  cvSmooth(in,     in,     CV_GAUSSIAN, 5);
  cvSmooth(inprev, inprev, CV_GAUSSIAN, 5);

  cvAbsDiff( in, inprev, output);
  cvThreshold( output, output, 5, 255, CV_THRESH_BINARY);
  cvMorphologyEx( output, output, 0, 0, CV_MOP_CLOSE, 1 );
  return(TRUE);
}
int catcierge_haar_matcher_find_prey(catcierge_haar_matcher_t *ctx,
									IplImage *img, IplImage *thr_img,
									match_result_t *result, int save_steps)
{
	catcierge_haar_matcher_args_t *args = ctx->args;
	IplImage *thr_img2 = NULL;
	CvSeq *contours = NULL;
	size_t contour_count = 0;
	assert(ctx);
	assert(img);
	assert(ctx->args);

	// thr_img is modified by FindContours so we clone it first.
	thr_img2 = cvCloneImage(thr_img);

	cvFindContours(thr_img, ctx->storage, &contours,
		sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));

	// If we get more than 1 contour we count it as a prey. At least something
	// is intersecting the white are to split up the image.
	contour_count = catcierge_haar_matcher_count_contours(ctx, contours);

	// If we don't find any prey 
	if ((args->prey_steps >= 2) && (contour_count == 1))
	{
		IplImage *erod_img = NULL;
		IplImage *open_img = NULL;
		CvSeq *contours2 = NULL;

		erod_img = cvCreateImage(cvGetSize(thr_img2), 8, 1);
		cvErode(thr_img2, erod_img, ctx->kernel3x3, 3);
		if (ctx->super.debug) cvShowImage("haar eroded img", erod_img);

		open_img = cvCreateImage(cvGetSize(thr_img2), 8, 1);
		cvMorphologyEx(erod_img, open_img, NULL, ctx->kernel5x1, CV_MOP_OPEN, 1);
		if (ctx->super.debug) cvShowImage("haar opened img", erod_img);

		cvFindContours(erod_img, ctx->storage, &contours2,
			sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));
		cvReleaseImage(&erod_img);
		cvReleaseImage(&open_img);

		contour_count = catcierge_haar_matcher_count_contours(ctx, contours2);
	}

	if (ctx->super.debug)
	{
		cvDrawContours(img, contours, cvScalarAll(0), cvScalarAll(0), 1, 1, 8, cvPoint(0, 0));
		cvShowImage("Haar Contours", img);
	}

	cvReleaseImage(&thr_img2);

	return (contour_count > 1);
}
Exemple #16
0
void work(int)
{
	cvReleaseImage(&dst);
	if(isColor)
	{
		dst = cvCloneImage(img);
		if(times != 0)
			cvMorphologyEx(img, dst, 0, 0, CV_MOP_CLOSE, times);
		
	}//end if
	else
	{
		IplImage *gray = cvCreateImage(cvGetSize(img), img->depth, 1);
		cvCvtColor(img, gray, CV_BGR2GRAY);
		dst = cvCloneImage(gray);

		if(times != 0)
			cvMorphologyEx(gray, dst, 0, 0, CV_MOP_CLOSE, times);
		cvReleaseImage(&gray);
	}
	cvShowImage(windowName, dst);
}//end work
// Locate the red pixels in the source image and return the percentage of red points found.
int find_spoons( IplImage* source, IplImage* result, IplImage* temp )
{
    int red_point_count = 0;
    int width_step=source->widthStep;
    int pixel_step=source->widthStep/source->width;
    int number_channels=source->nChannels;
    cvZero( result );
    unsigned char white_pixel[4] = {255,255,255,0};
    int row=0,col=0;
    // Find all red points in the image
    for (row=0; row < result->height; row++)
        for (col=0; col < result->width; col++)
        {
            unsigned char* curr_point = GETPIXELPTRMACRO( source, col, row, width_step, pixel_step );
            if ((curr_point[RED_CH] >= THRESHOLD) && ((curr_point[BLUE_CH] < THRESHOLD) || (curr_point[GREEN_CH] < THRESHOLD)))
            {
                PUTPIXELMACRO( result, col, row, white_pixel, width_step, pixel_step, number_channels );
            }
        }

    // Apply morphological opening and closing operations to clean up the image
    cvMorphologyEx( result, temp, NULL, NULL, CV_MOP_OPEN, 3 );
    cvMorphologyEx( temp, result, NULL, NULL, CV_MOP_CLOSE, 3 );

    // Count the red points remaining
    for (row=0; row < result->height; row++)
        for (col=0; col < result->width; col++)
        {
            unsigned char* curr_point = GETPIXELPTRMACRO( result, col, row, width_step, pixel_step );
            if (curr_point[RED_CH] == 255)
            {
                red_point_count++;
            }
        }

    return (red_point_count*100) / (result->height*result->width);
}
int main(int argc, char** argv) {
	cvNamedWindow("image");
	IplImage * src = cvLoadImage("../cvtest/7newsample.jpg", 0);

	IplImage * temp = cvCreateImage(cvGetSize(src), 8,1);
	IplImage * img=cvCreateImage(cvGetSize(src), 8, 1);
	cvCopyImage(src,temp);
	cvCopyImage(src, img);
	cvSmooth(img,img);
	IplConvKernel *element = 0; //定义形态学结构指针
	element = cvCreateStructuringElementEx(3,3, 1, 1, CV_SHAPE_ELLIPSE, 0);//3,5,7
	cvErode( src, src, element);
	//形态梯度
	cvMorphologyEx(
		src,
		img,
		img,
		element,//NULL, //default 3*3
		CV_MOP_GRADIENT,
		1);

	cvShowImage("image", img);
	cvWaitKey(0);
	IplImage* image=img; //= cvLoadImage( 		"../cvtest/7newsample.jpg",		CV_LOAD_IMAGE_GRAYSCALE		);
	IplImage* src2 =img;//= cvLoadImage( "../cvtest/7newsample.jpg"); //Changed for prettier show in color
	CvMemStorage* storage = cvCreateMemStorage(0);
	cvSmooth(image, image, CV_GAUSSIAN, 5, 5 );
	CvSeq* results = cvHoughCircles( 
		image, 
		storage, 
		CV_HOUGH_GRADIENT, 
		4, 
		image->width/10 
		); 
	for( int i = 0; i < results->total; i++ ) {
		float* p = (float*) cvGetSeqElem( results, i );
		CvPoint pt = cvPoint( cvRound( p[0] ), cvRound( p[1] ) );
		cvCircle( 
			src2,
			pt, 
			cvRound( p[2] ),
			CV_RGB(0xff,0,0) 
			);
	}
	cvNamedWindow( "cvHoughCircles", 1 );
	cvShowImage( "cvHoughCircles", src2);
	cvWaitKey(0);
}
void test_opencv_min_max(){

	IplImage * rimage = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); 
	IplImage * timage = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); 
	IplConvKernel * kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT);
	//cvErode(image,min_image,kernel);
	//cvDilate(image,max_image,kernel);
	//display_image("dilate",min_image);
	//display_image("erode",max_image);
	tester->start_timer();
	cvMorphologyEx(image,rimage,timage,kernel,CV_MOP_GRADIENT);
	tester->stop_timer();
	display_image("edges",rimage);
	cvSaveImage("edges.png",rimage);

}
int main( int argc, char* argv[] ) {

    // Load an image of a scene and convert it to grayscale.
    IplImage* src = cvLoadImage( argv[1], CV_LOAD_IMAGE_GRAYSCALE );

    // Run the morphological Top Hat operation on your image and display the
    // results.
    IplImage* dst_a = cvCloneImage(src);
    cvMorphologyEx(src, dst_a, NULL, NULL, CV_MOP_TOPHAT, 1);

    // Convert the resulting image into an 8-bit mask.
    // TODO: understand this!
    IplImage *dst_b = cvCreateImage( cvSize(dst_a->width, dst_a->height), IPL_DEPTH_8U, 1 );
    cvCopy( dst_a, dst_b );
    cvThreshold( dst_b, dst_b, 25, 255, CV_THRESH_BINARY );

    // Copy a grayscale value into the Top Hat pieces and display the results.
    // TODO: understand this!
    IplImage *img_black = cvCreateImage( cvSize(src->width, src->height), IPL_DEPTH_8U, 1 );
    IplImage *dst_c     = cvCreateImage( cvSize(src->width, src->height), IPL_DEPTH_8U, 1 );
    cvZero( img_black );
    cvSet( dst_c, cvScalarAll(200) );
    cvCopy( img_black, dst_c, dst_b );

    cvNamedWindow( "Original", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "Top Hat", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "8-bit mask", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "Grayscale value copied into Top Hat pieces", CV_WINDOW_AUTOSIZE );
    cvShowImage( "Original", src );
    cvShowImage( "Top Hat", dst_a );
    cvShowImage( "8-bit mask", dst_b );
    cvShowImage( "Grayscale value copied into Top Hat pieces", dst_c );

    cvWaitKey(0);

    cvReleaseImage( &src );
    cvReleaseImage( &dst_a );
    cvReleaseImage( &dst_b );
    cvDestroyWindow( "Original" );
    cvDestroyWindow( "Top Hat" );
    cvDestroyWindow( "8-bit mask" );
    cvDestroyWindow( "Grayscale value copied into Top Hat pieces" );

    return 0;

}
int main(){
    
    //initialize
    int erosions = 1;
    
    IplImage *img=0;
    IplImage *noise=0;
    IplImage *dst;
    
    //load image
    img = cvLoadImage("/Users/ihong-gyu/MyProject/OpenCVTest/Lena.jpeg",0);
    dst = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    
    //create a window
    cvNamedWindow("Original Image",CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Impulse Noisy Image",CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Dilated Image",CV_WINDOW_AUTOSIZE);
    
    
    //show the image
    cvShowImage("Original Image", img);
    
    //call GaussNoise function
    IplConvKernel *element = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT);
    
    noise=ImpulseNoise(img);
    
    cvShowImage("Impulse Noisy Image", noise);
    
    //remove noise by opening operation
    cvMorphologyEx(noise,dst,NULL,element,CV_MOP_CLOSE,1);
    
    cvShowImage("Dilated Image", dst);
    
    //wait for a key
    cvWaitKey(0);
    
    
    //release the image
    cvReleaseImage(&img);
    cvReleaseImage(&noise);
    
    
    
    return 0;
}
Exemple #22
0
bool _stdcall opencvProcess(LPWSTR csInputPath, LPWSTR csOutputPath)
{
	char inputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csInputPath, -1, inputPath, SIZE, NULL, NULL);//wchar_t * to char
	char outputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csOutputPath, -1, outputPath, SIZE, NULL, NULL);//wchar_t * to char *

	//load image
	IplImage *img = cvLoadImage(inputPath, 0);

	if(!img)
		return false;
	else
	{
		cvMorphologyEx(img, img, 0, 0, CV_MOP_OPEN, 1);
		cvSaveImage(outputPath, img);
		cvReleaseImage(&img);
		return true;
	}//end else
	return false;
}//end opencvProcess
DMZ_INTERNAL void prepare_image_for_cat(IplImage *image, IplImage *as_float, CharacterRectListIterator rect) {
  // Input image: IPL_DEPTH_8U [0 - 255]
  // Data for models: IPL_DEPTH_32F [0.0 - 1.0]
  
  cvSetImageROI(image, cvRect(rect->left, rect->top, kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight));
  
  // TODO: optimize this a lot!
  
  // Gradient
  IplImage *filtered_image = cvCreateImage(cvSize(kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight), IPL_DEPTH_8U, 1);
  //llcv_morph_grad3_2d_cross_u8(image, filtered_image);
  IplConvKernel *kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL);
  cvMorphologyEx(image, filtered_image, NULL, kernel, CV_MOP_GRADIENT, 1);
  cvReleaseStructuringElement(&kernel);
  
  // Equalize
  llcv_equalize_hist(filtered_image, filtered_image);
  
  // Bilateral filter
  int aperture = 3;
  double space_sigma = (aperture / 2.0 - 1) * 0.3 + 0.8;
  double color_sigma = (aperture - 1) / 3.0;
  IplImage *smoothed_image = cvCreateImage(cvSize(kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight), IPL_DEPTH_8U, 1);
  cvSmooth(filtered_image, smoothed_image, CV_BILATERAL, aperture, aperture, space_sigma, color_sigma);
  
  // Convert to float
  cvConvertScale(smoothed_image, as_float, 1.0f / 255.0f, 0);
  
  cvReleaseImage(&smoothed_image);
  cvReleaseImage(&filtered_image);
  
  cvResetImageROI(image);

#if DEBUG_EXPIRY_CATEGORIZATION_PERFORMANCE
  dmz_debug_timer_print("prepare image", 2);
#endif
}
int catcierge_haar_matcher_find_prey_adaptive(catcierge_haar_matcher_t *ctx,
											IplImage *img, IplImage *inv_thr_img,
											match_result_t *result, int save_steps)
{
	IplImage *inv_adpthr_img = NULL;
	IplImage *inv_combined = NULL;
	IplImage *open_combined = NULL;
	IplImage *dilate_combined = NULL;
	CvSeq *contours = NULL;
	size_t contour_count = 0;
	CvSize img_size;
	assert(ctx);
	assert(img);
	assert(ctx->args);

	img_size = cvGetSize(img);

	// We expect to be given an inverted global thresholded image (inv_thr_img)
	// that contains the rough cat profile.

	// Do an inverted adaptive threshold of the original image as well.
	// This brings out small details such as a mouse tail that fades
	// into the background during a global threshold.
	inv_adpthr_img = cvCreateImage(img_size, 8, 1);
	cvAdaptiveThreshold(img, inv_adpthr_img, 255,
		CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV, 11, 5);
	catcierge_haar_matcher_save_step_image(ctx,
		inv_adpthr_img, result, "adp_thresh", "Inverted adaptive threshold", save_steps);

	// Now we can combine the two thresholded images into one.
	inv_combined = cvCreateImage(img_size, 8, 1);
	cvAdd(inv_thr_img, inv_adpthr_img, inv_combined, NULL);
	catcierge_haar_matcher_save_step_image(ctx,
		inv_combined, result, "inv_combined", "Combined global and adaptive threshold", save_steps);

	// Get rid of noise from the adaptive threshold.
	open_combined = cvCreateImage(img_size, 8, 1);
	cvMorphologyEx(inv_combined, open_combined, NULL, ctx->kernel2x2, CV_MOP_OPEN, 2);
	catcierge_haar_matcher_save_step_image(ctx,
		open_combined, result, "opened", "Opened image", save_steps);

	dilate_combined = cvCreateImage(img_size, 8, 1);
	cvDilate(open_combined, dilate_combined, ctx->kernel3x3, 3);
	catcierge_haar_matcher_save_step_image(ctx,
		dilate_combined, result, "dilated", "Dilated image", save_steps);

	// Invert back the result so the background is white again.
	cvNot(dilate_combined, dilate_combined);
	catcierge_haar_matcher_save_step_image(ctx,
		dilate_combined, result, "combined", "Combined binary image", save_steps);

	cvFindContours(dilate_combined, ctx->storage, &contours,
		sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));

	// If we get more than 1 contour we count it as a prey.
	contour_count = catcierge_haar_matcher_count_contours(ctx, contours);

	if (save_steps)
	{
		IplImage *img_contour = cvCloneImage(img);
		IplImage *img_final_color = NULL;
		CvScalar color;

		cvDrawContours(img_contour, contours, cvScalarAll(255), cvScalarAll(0), 1, 1, 8, cvPoint(0, 0));
		catcierge_haar_matcher_save_step_image(ctx,
			img_contour, result, "contours", "Background contours", save_steps);

		// Draw a final color combined image with the Haar detection + contour.
		cvResetImageROI(img_contour);

		img_final_color =  cvCreateImage(cvGetSize(img_contour), 8, 3);

		cvCvtColor(img_contour, img_final_color, CV_GRAY2BGR);
		color = (contour_count > 1) ? CV_RGB(255, 0, 0) : CV_RGB(0, 255, 0);
		cvRectangleR(img_final_color, result->match_rects[0], color, 2, 8, 0);

		catcierge_haar_matcher_save_step_image(ctx,
			img_final_color, result, "final", "Final image", save_steps);

		cvReleaseImage(&img_contour);
		cvReleaseImage(&img_final_color);
	}

	cvReleaseImage(&inv_adpthr_img);
	cvReleaseImage(&inv_combined);
	cvReleaseImage(&open_combined);
	cvReleaseImage(&dilate_combined);

	return (contour_count > 1);
}
Exemple #25
0
IplImage * find_macbeth( const char *img )
{
    IplImage * macbeth_img = cvLoadImage( img,
        CV_LOAD_IMAGE_ANYCOLOR|CV_LOAD_IMAGE_ANYDEPTH );
        
    IplImage * macbeth_original = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, macbeth_img->nChannels );
    cvCopy(macbeth_img, macbeth_original);
        
    IplImage * macbeth_split[3];
    IplImage * macbeth_split_thresh[3];
    
    for(int i = 0; i < 3; i++) {
        macbeth_split[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
        macbeth_split_thresh[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
    }
    
    cvSplit(macbeth_img, macbeth_split[0], macbeth_split[1], macbeth_split[2], NULL);
    
    if( macbeth_img )
    {
        int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C;
        int threshold_type = CV_THRESH_BINARY_INV;
        int block_size = cvRound(
            MIN(macbeth_img->width,macbeth_img->height)*0.02)|1;
        fprintf(stderr,"Using %d as block size\n", block_size);
        
        double offset = 6;
        
        // do an adaptive threshold on each channel
        for(int i = 0; i < 3; i++) {
            cvAdaptiveThreshold(macbeth_split[i], macbeth_split_thresh[i], 255, adaptive_method, threshold_type, block_size, offset);
        }
        
        IplImage * adaptive = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), IPL_DEPTH_8U, 1 );
        
        // OR the binary threshold results together
        cvOr(macbeth_split_thresh[0],macbeth_split_thresh[1],adaptive);
        cvOr(macbeth_split_thresh[2],adaptive,adaptive);
        
        for(int i = 0; i < 3; i++) {
            cvReleaseImage( &(macbeth_split[i]) );
            cvReleaseImage( &(macbeth_split_thresh[i]) );
        }
                
        int element_size = (block_size/10)+2;
        fprintf(stderr,"Using %d as element size\n", element_size);
        
        // do an opening on the threshold image
        IplConvKernel * element = cvCreateStructuringElementEx(element_size,element_size,element_size/2,element_size/2,CV_SHAPE_RECT);
        cvMorphologyEx(adaptive,adaptive,NULL,element,CV_MOP_OPEN);
        cvReleaseStructuringElement(&element);
        
        CvMemStorage* storage = cvCreateMemStorage(0);
        
        CvSeq* initial_quads = cvCreateSeq( 0, sizeof(*initial_quads), sizeof(void*), storage );
        CvSeq* initial_boxes = cvCreateSeq( 0, sizeof(*initial_boxes), sizeof(CvBox2D), storage );
        
        // find contours in the threshold image
        CvSeq * contours = NULL;
        cvFindContours(adaptive,storage,&contours);
        
        int min_size = (macbeth_img->width*macbeth_img->height)/
            (MACBETH_SQUARES*100);
        
        if(contours) {
            int count = 0;
            
            for( CvSeq* c = contours; c != NULL; c = c->h_next) {
                CvRect rect = ((CvContour*)c)->rect;
                // only interested in contours with these restrictions
                if(CV_IS_SEQ_HOLE(c) && rect.width*rect.height >= min_size) {
                    // only interested in quad-like contours
                    CvSeq * quad_contour = find_quad(c, storage, min_size);
                    if(quad_contour) {
                        cvSeqPush( initial_quads, &quad_contour );
                        count++;
                        rect = ((CvContour*)quad_contour)->rect;
                        
                        CvScalar average = contour_average((CvContour*)quad_contour, macbeth_img);
                        
                        CvBox2D box = cvMinAreaRect2(quad_contour,storage);
                        cvSeqPush( initial_boxes, &box );
                        
                        // fprintf(stderr,"Center: %f %f\n", box.center.x, box.center.y);
                        
                        double min_distance = MAX_RGB_DISTANCE;
                        CvPoint closest_color_idx = cvPoint(-1,-1);
                        for(int y = 0; y < MACBETH_HEIGHT; y++) {
                            for(int x = 0; x < MACBETH_WIDTH; x++) {
                                double distance = euclidean_distance_lab(average,colorchecker_srgb[y][x]);
                                if(distance < min_distance) {
                                    closest_color_idx.x = x;
                                    closest_color_idx.y = y;
                                    min_distance = distance;
                                }
                            }
                        }
                        
                        CvScalar closest_color = colorchecker_srgb[closest_color_idx.y][closest_color_idx.x];
                        // fprintf(stderr,"Closest color: %f %f %f (%d %d)\n",
                        //     closest_color.val[2],
                        //     closest_color.val[1],
                        //     closest_color.val[0],
                        //     closest_color_idx.x,
                        //     closest_color_idx.y
                        // );
                        
                        // cvDrawContours(
                        //     macbeth_img,
                        //     quad_contour,
                        //     cvScalar(255,0,0),
                        //     cvScalar(0,0,255),
                        //     0,
                        //     element_size
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     cvScalarAll(255),
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     closest_color,
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*4,
                        //     average,
                        //     -1
                        // );
                        // CvRect rect = contained_rectangle(box);
                        // cvRectangle(
                        //     macbeth_img,
                        //     cvPoint(rect.x,rect.y),
                        //     cvPoint(rect.x+rect.width, rect.y+rect.height),
                        //     cvScalarAll(0),
                        //     element_size
                        // );
                    }
                }
            }
            
            ColorChecker found_colorchecker;

            fprintf(stderr,"%d initial quads found", initial_quads->total);
            if(count > MACBETH_SQUARES) {
                fprintf(stderr," (probably a Passport)\n");
                
                CvMat* points = cvCreateMat( initial_quads->total , 1, CV_32FC2 );
                CvMat* clusters = cvCreateMat( initial_quads->total , 1, CV_32SC1 );
                
                CvSeq* partitioned_quads[2];
                CvSeq* partitioned_boxes[2];
                for(int i = 0; i < 2; i++) {
                    partitioned_quads[i] = cvCreateSeq( 0, sizeof(**partitioned_quads), sizeof(void*), storage );
                    partitioned_boxes[i] = cvCreateSeq( 0, sizeof(**partitioned_boxes), sizeof(CvBox2D), storage );
                }
                
                // set up the points sequence for cvKMeans2, using the box centers
                for(int i = 0; i < initial_quads->total; i++) {
                    CvBox2D box = (*(CvBox2D*)cvGetSeqElem(initial_boxes, i));
                    
                    cvSet1D(points, i, cvScalar(box.center.x,box.center.y));
                }
                
                // partition into two clusters: passport and colorchecker
                cvKMeans2( points, 2, clusters, 
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,
                                           10, 1.0 ) );
        
                for(int i = 0; i < initial_quads->total; i++) {
                    CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i];
                    int cluster_idx = clusters->data.i[i];
                    
                    cvSeqPush( partitioned_quads[cluster_idx],
                               cvGetSeqElem(initial_quads, i) );
                    cvSeqPush( partitioned_boxes[cluster_idx],
                               cvGetSeqElem(initial_boxes, i) );

                    // cvCircle(
                    //     macbeth_img,
                    //     cvPointFrom32f(pt),
                    //     element_size*2,
                    //     cvScalar(255*cluster_idx,0,255-(255*cluster_idx)),
                    //     -1
                    // );
                }
                
                ColorChecker partitioned_checkers[2];
                
                // check each of the two partitioned sets for the best colorchecker
                for(int i = 0; i < 2; i++) {
                    partitioned_checkers[i] =
                        find_colorchecker(partitioned_quads[i], partitioned_boxes[i],
                                      storage, macbeth_img, macbeth_original);
                }
                
                // use the colorchecker with the lowest error
                found_colorchecker = partitioned_checkers[0].error < partitioned_checkers[1].error ?
                    partitioned_checkers[0] : partitioned_checkers[1];
                
                cvReleaseMat( &points );
                cvReleaseMat( &clusters );
            }
            else { // just one colorchecker to test
                fprintf(stderr,"\n");
                found_colorchecker = find_colorchecker(initial_quads, initial_boxes,
                                  storage, macbeth_img, macbeth_original);
            }
            
            // render the found colorchecker
            draw_colorchecker(found_colorchecker.values,found_colorchecker.points,macbeth_img,found_colorchecker.size);
            
            // print out the colorchecker info
            for(int y = 0; y < MACBETH_HEIGHT; y++) {            
                for(int x = 0; x < MACBETH_WIDTH; x++) {
                    CvScalar this_value = cvGet2D(found_colorchecker.values,y,x);
                    CvScalar this_point = cvGet2D(found_colorchecker.points,y,x);
                    
                    printf("%.0f,%.0f,%.0f,%.0f,%.0f\n",
                        this_point.val[0],this_point.val[1],
                        this_value.val[2],this_value.val[1],this_value.val[0]);
                }
            }
            printf("%0.f\n%f\n",found_colorchecker.size,found_colorchecker.error);
            
        }
                
        cvReleaseMemStorage( &storage );
        
        if( macbeth_original ) cvReleaseImage( &macbeth_original );
        if( adaptive ) cvReleaseImage( &adaptive );
        
        return macbeth_img;
    }

    if( macbeth_img ) cvReleaseImage( &macbeth_img );

    return NULL;
}
void connectComponent(IplImage* src, const int poly_hull0, const float perimScale, int *num,
		vector<CvRect> &rects, vector<CvPoint> &centers) {

	/*
	 * Pre : "src"        :is the input image
	 *       "poly_hull0" :is usually set to 1
	 *       "perimScale" :defines how big connected component will be retained, bigger
	 *                     the number, more components are retained (100)
	 *
	 * Post: "num"        :defines how many connected component was found
	 *       "rects"      :the bounding box of each connected component
	 *       "centers"    :the center of each bounding box
	 */

	rects.clear();
	centers.clear();

	CvMemStorage* mem_storage = NULL;
	CvSeq* contours = NULL;

	// Clean up
	cvMorphologyEx(src, src, 0, 0, CV_MOP_OPEN, 1);
	cvMorphologyEx(src, src, 0, 0, CV_MOP_CLOSE, 1);

	// Find contours around only bigger regions
	mem_storage = cvCreateMemStorage(0);

	CvContourScanner scanner = cvStartFindContours(src, mem_storage, sizeof(CvContour),
			CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
	CvSeq* c;
	int numCont = 0;

	while ((c = cvFindNextContour(scanner)) != NULL) {

		double len = cvContourPerimeter(c);

		// calculate perimeter len threshold
		double q = (double) (src->height + src->width) / perimScale;

		// get rid of blob if its perimeter is too small
		if (len < q) {

			cvSubstituteContour(scanner, NULL);

		} else {

			// smooth its edge if its large enough
			CvSeq* c_new;
			if (poly_hull0) {

				// polygonal approximation
				c_new = cvApproxPoly(c, sizeof(CvContour), mem_storage, CV_POLY_APPROX_DP, 2, 0);

			} else {

				// convex hull of the segmentation
				c_new = cvConvexHull2(c, mem_storage, CV_CLOCKWISE, 1);

			}

			cvSubstituteContour(scanner, c_new);

			numCont++;
		}
	}

	contours = cvEndFindContours(&scanner);

	// Calc center of mass and/or bounding rectangles
	if (num != NULL) {

		// user wants to collect statistics
		int numFilled = 0, i = 0;

		for (i = 0, c = contours; c != NULL; c = c->h_next, i++) {

			if (i < *num) {

				// bounding retangles around blobs

				rects.push_back(cvBoundingRect(c));

				CvPoint center = cvPoint(rects[i].x + rects[i].width / 2, rects[i].y
						+ rects[i].height / 2);
				centers.push_back(center);

				numFilled++;
			}
		}

		*num = numFilled;

	}

	cvReleaseMemStorage(&mem_storage);

}
Exemple #27
0
//---------------------------------------------------------
bool COpenCV_Morphology::On_Execute(void)
{
	int			Type, Shape, Radius, Iterations;
	CSG_Grid	*pInput, *pOutput;

	pInput		= Parameters("INPUT")		->asGrid();
	pOutput		= Parameters("OUTPUT")		->asGrid();
	Type		= Parameters("TYPE")		->asInt();
	Shape		= Parameters("SHAPE")		->asInt();
	Radius		= Parameters("RADIUS")		->asInt();
	Iterations	= Parameters("ITERATIONS")	->asInt();

	//-----------------------------------------------------
	switch( Shape )
	{
	default:
	case 0:	Shape	= CV_SHAPE_ELLIPSE;	break;
	case 1:	Shape	= CV_SHAPE_RECT;	break;
	case 2:	Shape	= CV_SHAPE_CROSS;	break;
	}

	//-----------------------------------------------------
	IplImage	*cv_pInput	= Get_CVImage(pInput);
	IplImage	*cv_pOutput	= Get_CVImage(Get_NX(), Get_NY(), pInput->Get_Type());
	IplImage	*cv_pTmp	= NULL;

	//-----------------------------------------------------
	IplConvKernel	*cv_pElement	= cvCreateStructuringElementEx(Radius * 2 + 1, Radius * 2 + 1, Radius, Radius, Shape, 0);

	switch( Type )
	{
	case 0:	// dilation
		cvDilate		(cv_pInput, cv_pOutput, cv_pElement, Iterations);
		break;

	case 1:	// erosion
		cvErode			(cv_pInput, cv_pOutput, cv_pElement, Iterations);
		break;

	case 2:	// opening
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp,
			cv_pElement, CV_MOP_OPEN    , Iterations
		);
		break;

	case 3:	// closing
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp,
			cv_pElement, CV_MOP_CLOSE   , Iterations
		);
		break;

	case 4:	// morpological gradient
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp	= Get_CVImage(Get_NX(), Get_NY(), pInput->Get_Type()),
			cv_pElement, CV_MOP_GRADIENT, Iterations
		);
		break;

	case 5:	// top hat
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp	= Get_CVImage(Get_NX(), Get_NY(), pInput->Get_Type()),
			cv_pElement, CV_MOP_TOPHAT  , Iterations
		);
		break;

	case 6:	// black hat
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp	= Get_CVImage(Get_NX(), Get_NY(), pInput->Get_Type()),
			cv_pElement, CV_MOP_BLACKHAT, Iterations
		);
		break;
	}

	cvReleaseStructuringElement(&cv_pElement);

	//-----------------------------------------------------
	Copy_CVImage_To_Grid(pOutput, cv_pOutput);

    cvReleaseImage(&cv_pInput);
    cvReleaseImage(&cv_pOutput);

	if( cv_pTmp )
	{
		cvReleaseImage(&cv_pTmp);
	}

	pOutput->Set_Name(CSG_String::Format(SG_T("%s [%s]"), pInput->Get_Name(), Get_Name().c_str()));

	return( true );
}
Exemple #28
0
IplImage *preImg(std::string caminho) {

    int quadratico = 200;
    CvSize tamanho = cvSize(quadratico, quadratico);

    IplImage *in = cvLoadImage(caminho.c_str(), CV_LOAD_IMAGE_GRAYSCALE);

    IplImage *src = cvCreateImage(tamanho, in->depth, in->nChannels);
    IplImage *dst = cvCreateImage(tamanho, in->depth, in->nChannels);
    IplImage *fn = cvCreateImage(cvSize(mh, mw), in->depth, in->nChannels);

    cvResize(in, src);

    cvThreshold(src, src, 220, 255, CV_THRESH_BINARY);

    cvShowImage("tresh", src);

    cvCanny(src, src, 100, 120, 3);

    //cvShowImage("canny", src);

    cvMorphologyEx(src, src, 0, cvCreateStructuringElementEx(4, 4, 0, 0, CV_SHAPE_RECT), cv::MORPH_DILATE, 1);

    //cvShowImage("Dilatacao", src);

    std::vector<CvPoint> pontos;

    for (int y = 0; y < src->height; y++) {
        for (int x = 0; x < src->width; x++) {

            if (cvGet2D(src, x, y).val[0] == 255) {

                //inversão dos eixos
                pontos.push_back(cvPoint(y, x));
            }

        }
    }

    std::sort(pontos.begin(), pontos.end(), sortPontos);

    CvPoint interpol = getInterpolado(pontos[0], pontos[pontos.size() - 1]);

//	CvScalar color = cvScalar(255, 255, 255);
//	int radius = 6;
//	int thickness = 2;
//
//	cvCircle(src, pontos[0], radius, color, thickness);
//
//	cvCircle(src, pontos[pontos.size() - 1], radius, color, thickness);

//cvCircle(src, interpol, radius, color, thickness);

//	std::cout << cvGetReal2D(src, pontos.begin()->x, pontos.begin()->y)
//			<< std::endl;

//	cvShowImage("teste", src);

//-----------------------------

    cvLogPolar(src, dst, cvPoint2D32f(interpol.x, interpol.y), 40,
               CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS);

    //cvNamedWindow("log-polar", 1);

    //cvShowImage("log-polar", dst);

    //cvShowImage("LogPolar",dst);

    cvResize(dst, fn);

    //cvShowImage("teste saida", fn);

    return fn;

}
// --------------------------------------------------------------------------
// main(Number of arguments, Argument values)
// Description  : This is the entry point of the program.
// Return value : SUCCESS:0  ERROR:-1
// --------------------------------------------------------------------------
int main(int argc, char **argv)
{
    // AR.Drone class
    ARDrone ardrone;

    // Initialize
    if (!ardrone.open()) {
        printf("Failed to initialize.\n");
        return -1;
    }

    // Kalman filter
    CvKalman *kalman = cvCreateKalman(4, 2);

    // Setup
    cvSetIdentity(kalman->measurement_matrix, cvRealScalar(1.0));
    cvSetIdentity(kalman->process_noise_cov, cvRealScalar(1e-5));
    cvSetIdentity(kalman->measurement_noise_cov, cvRealScalar(0.1));
    cvSetIdentity(kalman->error_cov_post, cvRealScalar(1.0));

    // Linear system
    kalman->DynamMatr[0]  = 1.0; kalman->DynamMatr[1]  = 0.0; kalman->DynamMatr[2]  = 1.0; kalman->DynamMatr[3]  = 0.0; 
    kalman->DynamMatr[4]  = 0.0; kalman->DynamMatr[5]  = 1.0; kalman->DynamMatr[6]  = 0.0; kalman->DynamMatr[7]  = 1.0; 
    kalman->DynamMatr[8]  = 0.0; kalman->DynamMatr[9]  = 0.0; kalman->DynamMatr[10] = 1.0; kalman->DynamMatr[11] = 0.0; 
    kalman->DynamMatr[12] = 0.0; kalman->DynamMatr[13] = 0.0; kalman->DynamMatr[14] = 0.0; kalman->DynamMatr[15] = 1.0; 

    // Thresholds
    int minH = 0, maxH = 255;
    int minS = 0, maxS = 255;
    int minV = 0, maxV = 255;

    // Create a window
    cvNamedWindow("binalized");
    cvCreateTrackbar("H max", "binalized", &maxH, 255);
    cvCreateTrackbar("H min", "binalized", &minH, 255);
    cvCreateTrackbar("S max", "binalized", &maxS, 255);
    cvCreateTrackbar("S min", "binalized", &minS, 255);
    cvCreateTrackbar("V max", "binalized", &maxV, 255);
    cvCreateTrackbar("V min", "binalized", &minV, 255);
    cvResizeWindow("binalized", 0, 0);

    // Main loop
    while (1) {
        // Key input
        int key = cvWaitKey(1);
        if (key == 0x1b) break;

        // Update
        if (!ardrone.update()) break;

        // Get an image
        IplImage *image = ardrone.getImage();

        // HSV image
        IplImage *hsv = cvCloneImage(image);
        cvCvtColor(image, hsv, CV_RGB2HSV_FULL);

        // Binalized image
        IplImage *binalized = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);

        // Binalize
        CvScalar lower = cvScalar(minH, minS, minV);
        CvScalar upper = cvScalar(maxH, maxS, maxV);
        cvInRangeS(image, lower, upper, binalized);

        // Show result
        cvShowImage("binalized", binalized);

        // De-noising
        cvMorphologyEx(binalized, binalized, NULL, NULL, CV_MOP_CLOSE);
 
        // Detect contours
        CvSeq *contour = NULL, *maxContour = NULL;
        CvMemStorage *contourStorage = cvCreateMemStorage();
        cvFindContours(binalized, contourStorage, &contour, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

        // Find largest contour
        double max_area = 0.0;
        while (contour) {
            double area = fabs(cvContourArea(contour));
            if ( area > max_area) {
                maxContour = contour;
                max_area = area;
            }
            contour = contour->h_next;
        }

        // Object detected
        if (maxContour) {
            // Draw a contour
            cvZero(binalized);
            cvDrawContours(binalized, maxContour, cvScalarAll(255), cvScalarAll(255), 0, CV_FILLED);

            // Calculate the moments
            CvMoments moments;
            cvMoments(binalized, &moments, 1);
            int my = (int)(moments.m01/moments.m00);
            int mx = (int)(moments.m10/moments.m00);

            // Measurements
            float m[] = {mx, my};
            CvMat measurement = cvMat(2, 1, CV_32FC1, m);

            // Correct phase
            const CvMat *correction = cvKalmanCorrect(kalman, &measurement);
        }

        // Prediction phase
        const CvMat *prediction = cvKalmanPredict(kalman);

        // Display the image
        cvCircle(image, cvPointFrom32f(cvPoint2D32f(prediction->data.fl[0], prediction->data.fl[1])), 10, CV_RGB(0,255,0));
        cvShowImage("camera", image);

        // Release the memories
        cvReleaseImage(&hsv);
        cvReleaseImage(&binalized);
        cvReleaseMemStorage(&contourStorage);
    }

    // Release the kalman filter
    cvReleaseKalman(&kalman);

    // See you
    ardrone.close();

    return 0;
}
Exemple #30
0
// Function cvUpdateFGDStatModel updates statistical model and returns number of foreground regions
// parameters:
//      curr_frame  - current frame from video sequence
//      p_model     - pointer to CvFGDStatModel structure
static int CV_CDECL
icvUpdateFGDStatModel( IplImage* curr_frame, CvFGDStatModel*  model, double )
{
    int            mask_step = model->Ftd->widthStep;
    CvSeq         *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
    IplImage*      prev_frame = model->prev_frame;
    int            region_count = 0;
    int            FG_pixels_count = 0;
    int            deltaC  = cvRound(model->params.delta * 256 / model->params.Lc);
    int            deltaCC = cvRound(model->params.delta * 256 / model->params.Lcc);
    int            i, j, k, l;

    //clear storages
    cvClearMemStorage(model->storage);
    cvZero(model->foreground);

    // From foreground pixel candidates using image differencing
    // with adaptive thresholding.  The algorithm is from:
    //
    //    Thresholding for Change Detection
    //    Paul L. Rosin 1998 6p
    //    http://www.cis.temple.edu/~latecki/Courses/CIS750-03/Papers/thresh-iccv.pdf
    //
    cvChangeDetection( prev_frame, curr_frame, model->Ftd );
    cvChangeDetection( model->background, curr_frame, model->Fbd );

    for( i = 0; i < model->Ftd->height; i++ )
    {
        for( j = 0; j < model->Ftd->width; j++ )
        {
            if( ((uchar*)model->Fbd->imageData)[i*mask_step+j] || ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
            {
	        float Pb  = 0;
                float Pv  = 0;
                float Pvb = 0;

                CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;

                CvBGPixelCStatTable*   ctable = stat->ctable;
                CvBGPixelCCStatTable* cctable = stat->cctable;
    
                uchar* curr_data = (uchar*)(curr_frame->imageData) + i*curr_frame->widthStep + j*3;
                uchar* prev_data = (uchar*)(prev_frame->imageData) + i*prev_frame->widthStep + j*3;

                int val = 0;

                // Is it a motion pixel?
                if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
                {
		    if( !stat->is_trained_dyn_model ) {

                        val = 1;

		    } else {

                        // Compare with stored CCt vectors:
                        for( k = 0;  PV_CC(k) > model->params.alpha2 && k < model->params.N1cc;  k++ )
                        {
                            if ( abs( V_CC(k,0) - prev_data[0]) <=  deltaCC &&
                                 abs( V_CC(k,1) - prev_data[1]) <=  deltaCC &&
                                 abs( V_CC(k,2) - prev_data[2]) <=  deltaCC &&
                                 abs( V_CC(k,3) - curr_data[0]) <=  deltaCC &&
                                 abs( V_CC(k,4) - curr_data[1]) <=  deltaCC &&
                                 abs( V_CC(k,5) - curr_data[2]) <=  deltaCC)
                            {
                                Pv += PV_CC(k);
                                Pvb += PVB_CC(k);
                            }
                        }
                        Pb = stat->Pbcc;
                        if( 2 * Pvb * Pb <= Pv ) val = 1;
                    }
                }
                else if( stat->is_trained_st_model )
                {
                    // Compare with stored Ct vectors:
                    for( k = 0;  PV_C(k) > model->params.alpha2 && k < model->params.N1c;  k++ )
                    {
                        if ( abs( V_C(k,0) - curr_data[0]) <=  deltaC &&
                             abs( V_C(k,1) - curr_data[1]) <=  deltaC &&
                             abs( V_C(k,2) - curr_data[2]) <=  deltaC )
                        {
                            Pv += PV_C(k);
                            Pvb += PVB_C(k);
                        }
                    }
                    Pb = stat->Pbc;
                    if( 2 * Pvb * Pb <= Pv ) val = 1;
                }

                // Update foreground:
                ((uchar*)model->foreground->imageData)[i*mask_step+j] = (uchar)(val*255);
                FG_pixels_count += val;

            }		// end if( change detection...
        }		// for j...
    }			// for i...
    //end BG/FG classification

    // Foreground segmentation.
    // Smooth foreground map:
    if( model->params.perform_morphing ){
        cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_OPEN,  model->params.perform_morphing );
        cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_CLOSE, model->params.perform_morphing );
    }
   
   
    if( model->params.minArea > 0 || model->params.is_obj_without_holes ){

        // Discard under-size foreground regions:
	//
        cvFindContours( model->foreground, model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
        for( seq = first_seq; seq; seq = seq->h_next )
        {
            CvContour* cnt = (CvContour*)seq;
            if( cnt->rect.width * cnt->rect.height < model->params.minArea || 
                (model->params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)) )
            {
                // Delete under-size contour:
                prev_seq = seq->h_prev;
                if( prev_seq )
                {
                    prev_seq->h_next = seq->h_next;
                    if( seq->h_next ) seq->h_next->h_prev = prev_seq;
                }
                else
                {
                    first_seq = seq->h_next;
                    if( seq->h_next ) seq->h_next->h_prev = NULL;
                }
            }
            else
            {
                region_count++;
            }
        }        
        model->foreground_regions = first_seq;
        cvZero(model->foreground);
        cvDrawContours(model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);

    } else {

        model->foreground_regions = NULL;
    }

    // Check ALL BG update condition:
    if( ((float)FG_pixels_count/(model->Ftd->width*model->Ftd->height)) > CV_BGFG_FGD_BG_UPDATE_TRESH )
    {
         for( i = 0; i < model->Ftd->height; i++ )
             for( j = 0; j < model->Ftd->width; j++ )
             {
                 CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
                 stat->is_trained_st_model = stat->is_trained_dyn_model = 1;
             }
    }


    // Update background model:
    for( i = 0; i < model->Ftd->height; i++ )
    {
        for( j = 0; j < model->Ftd->width; j++ )
        {
            CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
            CvBGPixelCStatTable* ctable = stat->ctable;
            CvBGPixelCCStatTable* cctable = stat->cctable;

            uchar *curr_data = (uchar*)(curr_frame->imageData)+i*curr_frame->widthStep+j*3;
            uchar *prev_data = (uchar*)(prev_frame->imageData)+i*prev_frame->widthStep+j*3;

            if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] || !stat->is_trained_dyn_model )
            {
                float alpha = stat->is_trained_dyn_model ? model->params.alpha2 : model->params.alpha3;
                float diff = 0;
                int dist, min_dist = 2147483647, indx = -1;

                //update Pb
                stat->Pbcc *= (1.f-alpha);
                if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                {
                    stat->Pbcc += alpha;
                }

                // Find best Vi match:
                for(k = 0; PV_CC(k) && k < model->params.N2cc; k++ )
                {
                    // Exponential decay of memory
                    PV_CC(k)  *= (1-alpha);
                    PVB_CC(k) *= (1-alpha);
                    if( PV_CC(k) < MIN_PV )
                    {
                        PV_CC(k) = 0;
                        PVB_CC(k) = 0;
                        continue;
                    }

                    dist = 0;
                    for( l = 0; l < 3; l++ )
                    {
                        int val = abs( V_CC(k,l) - prev_data[l] );
                        if( val > deltaCC ) break;
                        dist += val;
                        val = abs( V_CC(k,l+3) - curr_data[l] );
                        if( val > deltaCC) break;
                        dist += val;
                    }
                    if( l == 3 && dist < min_dist )
                    {
                        min_dist = dist;
                        indx = k;
                    }
                }


                if( indx < 0 )
                {   // Replace N2th elem in the table by new feature:
                    indx = model->params.N2cc - 1;
                    PV_CC(indx) = alpha;
                    PVB_CC(indx) = alpha;
                    //udate Vt
                    for( l = 0; l < 3; l++ )
                    {
                        V_CC(indx,l) = prev_data[l];
                        V_CC(indx,l+3) = curr_data[l];
                    }
                }
                else
                {   // Update:
                    PV_CC(indx) += alpha;
                    if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                    {
                        PVB_CC(indx) += alpha;
                    }
                }

                //re-sort CCt table by Pv
                for( k = 0; k < indx; k++ )
                {
                    if( PV_CC(k) <= PV_CC(indx) )
                    {
                        //shift elements
                        CvBGPixelCCStatTable tmp1, tmp2 = cctable[indx];
                        for( l = k; l <= indx; l++ )
                        {
                            tmp1 = cctable[l];
                            cctable[l] = tmp2;
                            tmp2 = tmp1;
                        }
                        break;
                    }
                }


                float sum1=0, sum2=0;
                //check "once-off" changes
                for(k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
                {
                    sum1 += PV_CC(k);
                    sum2 += PVB_CC(k);
                }
                if( sum1 > model->params.T ) stat->is_trained_dyn_model = 1;
                
                diff = sum1 - stat->Pbcc * sum2;
                // Update stat table:
                if( diff >  model->params.T )
                {
                    //printf("once off change at motion mode\n");
                    //new BG features are discovered
                    for( k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
                    {
                        PVB_CC(k) =
                            (PV_CC(k)-stat->Pbcc*PVB_CC(k))/(1-stat->Pbcc);
                    }
                    assert(stat->Pbcc<=1 && stat->Pbcc>=0);
                }
            }

            // Handle "stationary" pixel:
            if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] )
            {
                float alpha = stat->is_trained_st_model ? model->params.alpha2 : model->params.alpha3;
                float diff = 0;
                int dist, min_dist = 2147483647, indx = -1;

                //update Pb
                stat->Pbc *= (1.f-alpha);
                if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                {
                    stat->Pbc += alpha;
                }

                //find best Vi match
                for( k = 0; k < model->params.N2c; k++ )
                {
                    // Exponential decay of memory
                    PV_C(k) *= (1-alpha);
                    PVB_C(k) *= (1-alpha);
                    if( PV_C(k) < MIN_PV )
                    {
                        PV_C(k) = 0;
                        PVB_C(k) = 0;
                        continue;
                    }
                    
                    dist = 0;
                    for( l = 0; l < 3; l++ )
                    {
                        int val = abs( V_C(k,l) - curr_data[l] );
                        if( val > deltaC ) break;
                        dist += val;
                    }
                    if( l == 3 && dist < min_dist )
                    {
                        min_dist = dist;
                        indx = k;
                    }
                }

                if( indx < 0 )
                {//N2th elem in the table is replaced by a new features
                    indx = model->params.N2c - 1;
                    PV_C(indx) = alpha;
                    PVB_C(indx) = alpha;
                    //udate Vt
                    for( l = 0; l < 3; l++ )
                    {
                        V_C(indx,l) = curr_data[l];
                    }
                } else
                {//update
                    PV_C(indx) += alpha;
                    if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                    {
                        PVB_C(indx) += alpha;
                    }
                }

                //re-sort Ct table by Pv
                for( k = 0; k < indx; k++ )
                {
                    if( PV_C(k) <= PV_C(indx) )
                    {
                        //shift elements
                        CvBGPixelCStatTable tmp1, tmp2 = ctable[indx];
                        for( l = k; l <= indx; l++ )
                        {
                            tmp1 = ctable[l];
                            ctable[l] = tmp2;
                            tmp2 = tmp1;
                        }
                        break;
                    }
                }

                // Check "once-off" changes:
                float sum1=0, sum2=0;
                for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
                {
                    sum1 += PV_C(k);
                    sum2 += PVB_C(k);
                }
                diff = sum1 - stat->Pbc * sum2;
                if( sum1 > model->params.T ) stat->is_trained_st_model = 1;

                // Update stat table:
                if( diff >  model->params.T )
                {
                    //printf("once off change at stat mode\n");
                    //new BG features are discovered
                    for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
                    {
                        PVB_C(k) = (PV_C(k)-stat->Pbc*PVB_C(k))/(1-stat->Pbc);
                    }
                    stat->Pbc = 1 - stat->Pbc;
                }
            }		// if !(change detection) at pixel (i,j)

            // Update the reference BG image:
            if( !((uchar*)model->foreground->imageData)[i*mask_step+j])
            {
                uchar* ptr = ((uchar*)model->background->imageData) + i*model->background->widthStep+j*3;
                
                if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] &&
                    !((uchar*)model->Fbd->imageData)[i*mask_step+j] )
                {
                    // Apply IIR filter:
                    for( l = 0; l < 3; l++ )
                    {
                        int a = cvRound(ptr[l]*(1 - model->params.alpha1) + model->params.alpha1*curr_data[l]);
                        ptr[l] = (uchar)a;
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l]*=(1 - model->params.alpha1);
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] += model->params.alpha1*curr_data[l];
                    }
                }
                else
                {
                    // Background change detected:
                    for( l = 0; l < 3; l++ )
                    {
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] = curr_data[l];
                        ptr[l] = curr_data[l];
                    }
                }
            }
        }		// j
    }			// i

    // Keep previous frame:
    cvCopy( curr_frame, model->prev_frame );

    return region_count;
}