Beispiel #1
0
/// <summary>
///光照归一化处理
///1.转换色彩空间到HSV空间;
///2.把HSV空间的V值设置为固定的值IlluminationThreshold;
///3.再从HSV空间转换到RGB空间;
/// </summary>
void CLightSet::LightNormalization(IplImage* src, IplImage* dst, int threshold){
	ASSERT(src->nChannels==3);
	//转换色彩空间
	cvCvtColor(src,dst,CV_RGB2HSV);
	//分离通道
	IplImage* imgChannel[3] = { 0, 0, 0 };  

	for (int i=0;i<dst->nChannels;i++)
	{
		imgChannel[i] = cvCreateImage(cvGetSize( dst ), IPL_DEPTH_8U, 1);  //要求单通道图像才能直方图均衡化  
	}

	cvSplit(dst, imgChannel[0], imgChannel[1], imgChannel[2],0);//HSVA  

	CvScalar avg=cvAvg(imgChannel[2]);
	cvCvtScale(imgChannel[2],imgChannel[2],1.0,threshold-avg.val[0]);
	cvMerge( imgChannel[0], imgChannel[1], imgChannel[2], 0, src );  		

	cvCvtColor(dst,dst,CV_HSV2RGB);

	for (int i=0;i<dst->nChannels;i++)
	{
		cvReleaseImage(&imgChannel[i] ); 
	}
}
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
static void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold, int frameCount){
	if(DEBUG){
		std::cout << "- UPDATING_MHI" << std::endl;
	}
	double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
	CvSize size = cvSize(img->width,img->height); // get current frame size
	int i, idx1 = last, idx2;
	CvSeq* seq;
	CvRect comp_rect;
	CvRect roi;
	double count;
	double angle;
	CvPoint center;
	double magnitude;
	CvScalar color;

	// Allocate images at the beginning or reallocate them if the frame size is changed
	if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
		if( buf == 0 ) {
			buf = (IplImage**)malloc(N*sizeof(buf[0]));
			memset( buf, 0, N*sizeof(buf[0]));
		}

		for( i = 0; i < N; i++ ) {
			cvReleaseImage( &buf[i] );
			buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
			cvZero( buf[i] );
		}
		cvReleaseImage( &mhi );
		cvReleaseImage( &orient );
		cvReleaseImage( &segmask );
		cvReleaseImage( &mask );

		mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		cvZero( mhi ); // clear MHI at the beginning
		orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
	}

	cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
	idx2 = (last + 1) % N; // index of (last - (N-1))th frame
	last = idx2;

	silh = buf[idx2];
	cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

	cvThreshold( silh, silh, diff_threshold, 255, CV_THRESH_BINARY); // and threshold it
	cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

	// convert MHI to blue 8u image
	cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION );
	cvZero( dst );
	cvMerge( mask, 0, 0, 0, dst );
}
// Learn the background statistics for one more frame
void accumulateBackground( IplImage *I ){
	static int first = 1;
	cvCvtScale( I, Iscratch, 1, 0 );
	if( !first ){
		cvAcc( Iscratch, IavgF );
		cvAbsDiff( Iscratch, IprevF, Iscratch2 );
		cvAcc( Iscratch2, IdiffF );
		Icount += 1.0;
	}
	first = 0;
	cvCopy( Iscratch, IprevF );
}
Beispiel #4
0
void extractHaarFeatures(const IplImage* img, Mat& haar)
{
  CvSize size = cvSize(IMAGE_RESIZE, IMAGE_RESIZE);
  CvSize size2 = cvSize(INTEGRAL_SIZE, INTEGRAL_SIZE);
  CvSize img_size = cvGetSize(img);
  IplImage*	ipl=	cvCreateImage(img_size,8,0);
  if(img->nChannels==3)
    {
      cvCvtColor(img,ipl,CV_BGR2GRAY);
    }
  else
    {
      cvCopy(img,ipl,0);
    }

  if((size.width!=img_size.width)|| (size.height!=img_size.height))
    {
      IplImage* tmpsize=cvCreateImage(size,IPL_DEPTH_8U,0);
      cvResize(ipl,tmpsize,CV_INTER_LINEAR);
      cvReleaseImage( &ipl);
      ipl=cvCreateImage(size,IPL_DEPTH_8U,0);
      cvCopy(tmpsize,ipl,0);
      cvReleaseImage( &tmpsize);
    }

  IplImage* temp = cvCreateImage(size, IPL_DEPTH_64F, 0);
  cvCvtScale(ipl,temp);
  cvNormalize(temp, temp, 0, 1, CV_MINMAX);
  haar.release();
  haar = Mat::zeros(1, NUM_HAAR_FEATURES, CV_32FC1);
    
  IplImage* integral=cvCreateImage(size2,IPL_DEPTH_64F,0);
  CvMat * sqSum = cvCreateMat(temp->height + 1, temp->width + 1, CV_64FC1);
  cvIntegral(temp, integral, sqSum);
  cvReleaseMat(&sqSum);

  int actualSize = 0;
  // top left
  for(int i = 0; i < 100; i+= 10) {
    for(int j = 0; j < 100; j+= 10) {
      // bottom right
      for(int m = i+10; m<=100; m+=10) {
	for(int n = j+10; n<=100; n+=10) {
	  haar.at<float>(0, actualSize++) = getIntegralRectValue(integral, i, j, m, n);
	}
      }
    }
  }
  cvReleaseImage(&ipl);
  cvReleaseImage(&temp);
  cvReleaseImage(&integral);
}
// Accumulate the background statistics for one more frame
// We accumulate the images, the image differences and the count of images for the 
//    the routine createModelsfromStats() to work on after we're done accumulating N frames.
// I		Background image, 3 channel, 8u
// number	Camera number
void accumulateBackground(IplImage *I, int number)
{
	static int first = 1;
	cvCvtScale(I,Iscratch,1,0); //To float;
	if (!first){
		cvAcc(Iscratch,IavgF[number]);
		cvAbsDiff(Iscratch,IprevF[number],Iscratch2);
		cvAcc(Iscratch2,IdiffF[number]);
		Icount[number] += 1.0;
	}
	first = 0;
	cvCopy(Iscratch,IprevF[number]);
}
// Create a mask
void backgroundDiff( IplImage *I, IplImage *Imask ){
	cvCvtScale( I, Iscratch, 1, 0);
	cvSplit( Iscratch, Igray1, Igray2, Igray3, 0 );

	// channel 1
	cvInRange( Igray1, Ilow1, Ihi1, Imask );
	// channel 2
	cvInRange( Igray2, Ilow2, Ihi2, Imaskt );
	cvOr( Imask, Imaskt, Imask );
	// channel 3
	cvInRange( Igray3, Ilow3, Ihi3, Imaskt );
	cvOr( Imask, Imaskt, Imask );
}
	// Create a binary: 0,255 mask where 255 means forground pixel
	// I		Input image, 3 channel, 8u
	// Imask	mask image to be created, 1 channel 8u
	// num		camera number.
	//
void ofxBackground::backgroundDiff(IplImage *I,IplImage *Imask)  //Mask should be grayscale
{
	cvCvtScale(I,Iscratch,1,0); //To float;
								//Channel 1
	cvCvtPixToPlane( Iscratch, Igray1,Igray2,Igray3, 0 );
	cvInRange(Igray1,Ilow1,Ihi1,Imask);
		//Channel 2
	cvInRange(Igray2,Ilow2,Ihi2,Imaskt);
	cvOr(Imask,Imaskt,Imask);
		//Channel 3
	cvInRange(Igray3,Ilow3,Ihi3,Imaskt);
	cvOr(Imask,Imaskt,Imask);
		//Finally, invert the results
	cvSubRS( Imask, cvScalar(255), Imask);
}
Beispiel #8
0
// Create a binary: 0,255 mask where 255 means forehground pixel
// I      Input image, 3 channel, 8u
// Imask      Mask image to be created, 1 channel 8u
//
void backgroundDiff(IplImage *I,IplImage *Imask)
{
   cvCvtScale(I,Iscratch,1,0); //To float;
   cvSplit( Iscratch, Igray1,Igray2,Igray3, 0 );
   //Channel 1
   cvInRange(Igray1,Ilow1,Ihi1,Imask);
   //Channel 2
   cvInRange(Igray2,Ilow2,Ihi2,Imaskt);
   cvOr(Imask,Imaskt,Imask);
   //Channel 3
   cvInRange(Igray3,Ilow3,Ihi3,Imaskt);
   cvOr(Imask,Imaskt,Imask)
   //Finally, invert the results
   cvSubRS( Imask, 255, Imask);
}
// Create a binary: 0,255 mask where 255 means forground pixel.
//
// Parameters:
//   I:     input image, 3 channel, 8u
//   Imask: mask image to be created, 1 channel 8u
//   num:   camera number
//
void backgroundDiff(IplImage *I, IplImage *Imask, int num)  // Mask should be grayscale
{
	cvCvtScale(I, Iscratch, 1, 0);  // To float.

	// Channel 1
	cvCvtPixToPlane( Iscratch, Igray1, Igray2, Igray3, 0 ); // TODO: book uses cvSplit: check!
	cvInRange( Igray1, Ilow1[num], Ihi1[num], Imask);

	// Channel 2
	cvInRange( Igray2, Ilow2[num], Ihi2[num], Imaskt );
	cvOr( Imask, Imaskt, Imask );

	// Channel 3
	cvInRange( Igray3, Ilow3[num], Ihi3[num], Imaskt );
	cvOr( Imask, Imaskt, Imask );

	// Finally, invert the results.
	cvSubRS( Imask, cvScalar(255), Imask);
}
// Accumulate the background statistics for one more frame.
// We accumulate the images, the image differences and the count of images for the 
//    the routine createModelsfromStats() to work on after we're done accumulating N frames.
//
// Parameters:
//   I:       Background image, 3 channel, 8u
//   number:	Camera number
void accumulateBackground(IplImage *I, int number)
{
	static int first = 1;  // nb. Not thread safe

  // Turn the raw background 8-bit-per-channel, three-color-channel image into a
  // floating-point three-channel image.
	cvCvtScale(I, Iscratch, 1, 0);

	if (!first)
  {
    // Learn the accumulated background image.
		cvAcc(Iscratch, IavgF[number]);

    // Learn the accumulated absolute value of frame-to-frame image differences.
		cvAbsDiff(Iscratch, IprevF[number], Iscratch2);
		cvAcc(Iscratch2, IdiffF[number]);

		Icount[number] += 1.0;
	}
	first = 0;
	cvCopy(Iscratch, IprevF[number]);
}
Beispiel #11
0
public:bool analizarMhi( IplImage* img, IplImage* dst, int diff_threshold, CvRect rect ) {
        double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
        int i, idx1 = last, idx2;
        IplImage* silh;
        CvSeq* seq;
        CvRect comp_rect;
        cv::Rect result;
        double count;
        double angle;
        CvPoint center;
        double magnitude;
        CvScalar color;

        cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

        idx2 = (last + 1) % N; // index of (last - (N-1))th frame
        last = idx2;

        silh = buf[idx2];
        cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

        cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
        cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

        // convert MHI to blue 8u image
        cvCvtScale( mhi, mask, 255./MHI_DURATION,
                    (MHI_DURATION - timestamp)*255./MHI_DURATION );
        cvZero( dst );
        cvCvtPlaneToPix( mask, 0, 0, 0, dst );

        // calculate motion gradient orientation and valid orientation mask
        cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

        if( !storage )
            storage = cvCreateMemStorage(0);
        else
            cvClearMemStorage(storage);

        // segment motion: get sequence of motion components
        // segmask is marked motion components map. It is not used further
        seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

        // iterate through the motion components,
        // One more iteration (i == -1) corresponds to the whole image (global motion)
    //    for( i = -1; i < seq->total; i++ ) {
        i = 1;
        comp_rect = cvRect( 0, 0, img->width, img->height );
        color = CV_RGB(255,255,255);
        magnitude = 100;
        while (result.area() < 10 & i < seq->total) {

                comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
                if( comp_rect.width + comp_rect.height < 100 ) {// reject very small components
                    i++;
                    continue;
                }
                color = CV_RGB(255,0,0);
                magnitude = 30;

            // select component ROI
            cvSetImageROI( silh, comp_rect );
            cvSetImageROI( mhi, comp_rect );
            cvSetImageROI( orient, comp_rect );
            cvSetImageROI( mask, comp_rect );

            // calculate orientation
            angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
            angle = 360.0 - angle;  // adjust for images with top-left origin

            count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

            cvResetImageROI( mhi );
            cvResetImageROI( orient );
            cvResetImageROI( mask );
            cvResetImageROI( silh );

            center = cvPoint( (comp_rect.x + comp_rect.width/2),
                              (comp_rect.y + comp_rect.height/2) );

            cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
            cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                    cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );

            result = (cv::Rect)comp_rect & (cv::Rect)rect;
            i++;
        }

        if (result.area() > 10) {
            return true;
        } else {
            return false;
        }
    }
Beispiel #12
0
static void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    unsigned  int tmpx=0,tmpy=0;
    int tmppcount=0;
    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( mbuf == 0 ) {
            mbuf = (IplImage**)malloc(N*sizeof(mbuf[0]));
            memset( mbuf, 0, N*sizeof(mbuf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &mbuf[i] );
            mbuf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( mbuf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, mbuf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh=mbuf[idx2];
    cvAbsDiff( mbuf[idx1], mbuf[idx2], silh );                          //

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // 类似二值化 可是只有0 1这么小的差距
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION );    // 更新以时间为单位的运动历史图

    // 将运动历史图转换为具有像素值的图片,并merge到输出图像
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask,0,0,0,dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );//orient中保存每个运动点的方向角度值

    if( !mstorage )
        mstorage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(mstorage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, mstorage, timestamp, MAX_TIME_DELTA );//对运动历史图片进行运动单元的分割

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
	   // printf("ALL image.\n");
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 30 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);//统计感兴趣区域内的运动单元的总体方向
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05*0.1  )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

	if(Zmode == ZMODE_Z && i !=-1)
	{
		//放大模式下的稳定算法++++++++++++++++++++++++++
		tmpx = comp_rect.x+comp_rect.width/2;//当前获取到的目标坐标之一
		tmpy = comp_rect.y+comp_rect.height/2;
		tmpal1 =  sqrt(abs(tx-ax))+sqrt(abs(ty-ay));

		if(tmpal2 > tmpal1 || tmppcount == 0)//更接近中心
		{
			tmpal2 = tmpal1;
			tx = tmpx;
			ty = tmpy;
		}
		catchflag = 1;
	}
	
	if(Zmode == ZMOOE_S)
	{	
		//缩小模式下的选择目标规则
		if(tmppcount == 0 && i != -1)
		{
			tx = comp_rect.x+comp_rect.width/2;
			ty = comp_rect.y+comp_rect.height/2;
			catchflag = 1;
		}
	}
	tmppcount++;
	printf("The %dth rect:(%d,%d)\n",tmppcount,comp_rect.x+comp_rect.width/2,comp_rect.y+comp_rect.height/2);
        cvCircle( img, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( img, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }

}
Beispiel #13
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
Beispiel #14
0
int main( int argc, char** argv )
{
    CvSize imgSize;                 
    imgSize.width = 320; 
    imgSize.height = 240; 
	
	int key= -1; 
	
	// set up opencv capture objects

    CvCapture* capture= cvCaptureFromCAM(0); 
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240);
	
    CvCapture* capture2= cvCaptureFromCAM(1); 
	cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_HEIGHT, 240);

    CvCapture* capture3= cvCaptureFromCAM(2); 
	cvSetCaptureProperty(capture3, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture3, CV_CAP_PROP_FRAME_HEIGHT, 240);

    
	// allocate image storage (other createimage specifiers: IPL_DEPTH_32F, IPL_DEPTH_8U)
	
    IplImage* colourImage  = cvCloneImage(cvQueryFrame(capture)); 
    IplImage* greyImage    = cvCreateImage(cvGetSize(colourImage), IPL_DEPTH_8U, 1); 
    IplImage* hannImage    = cvCloneImage(greyImage); 
	IplImage *poc= cvCreateImage( cvSize( greyImage->width, kFFTStoreSize ), IPL_DEPTH_64F, 1 );
	IplImage *pocdisp= cvCreateImage( cvSize( greyImage->width, kFFTStoreSize ), IPL_DEPTH_8U, 1 );
	
	// set up opencv windows
	
    cvNamedWindow("hannImage", 1);
    cvNamedWindow("greyImage", 1); 
    cvNamedWindow("greyImage2", 1); 
    cvNamedWindow("greyImage3", 1); 
    cvNamedWindow("poc", 1);
	cvMoveWindow("greyImage", 40, 0);
	cvMoveWindow("hannImage", 40, 270);
	cvMoveWindow("poc", 365, 0);
	cvMoveWindow("greyImage2", 40, 540);
	cvMoveWindow("greyImage3", 365, 540);
	
	// set up storage for fftw
	
	fftw_complex *fftwSingleRow = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * 1 );
	fftw_complex *fftwSingleRow2 = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * 1 );
	fftw_complex *fftwStore = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * kFFTStoreSize );
		
	// loop
	
    while(key != 'q') 
	{ 

		//		double t = (double)cvGetTickCount();
		//		printf( "%g ms: start.\n", (cvGetTickCount() - t)/((double)cvGetTickFrequency()*1000.));

		// capture a frame, convert to greyscale, and show it
		
		cvCopyImage(cvQueryFrame(capture), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage",greyImage); 

        cvCopyImage(cvQueryFrame(capture2), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage2",greyImage); 

        cvCopyImage(cvQueryFrame(capture3), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage3",greyImage);

        
        key = cvWaitKey(3);

		// project and calculate hann window
		
		int i, j, k;
		uchar 	*inData= ( uchar* ) greyImage->imageData;
		uchar 	*hannImageData= ( uchar* ) hannImage->imageData;
		unsigned long acc;
		
		for( j = 0 ; j < greyImage->width ; j++) {
			
			// sum input column
			
			acc= 0;
			for( i = 0; i < greyImage->height ; i++ ) {
				acc+= inData[i * greyImage->widthStep + j];
			}
			
			// hann window and output
			
			for( i = 0; i < 240 ; i++ ) {
				double hannMultiplier = 0.5 * (1 - cos(2*3.14159*j/(greyImage->width-1)));  // hann window coefficient
				hannImageData[i * hannImage->widthStep + j]=  hannMultiplier * (acc/greyImage->height);
			}
			
		}

		cvShowImage("hannImage",hannImage); 

		// set up forward FFT into store plan
		
		fftw_plan fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, fftwSingleRow, &(fftwStore[kFFTWidth * pocline]), FFTW_FORWARD,  FFTW_ESTIMATE );
				
		// load data for fftw
		
		for( int j = 0 ; j < kFFTWidth ; j++) {
			fftwSingleRow[j][0] = ( double )hannImageData[j];
			fftwSingleRow[j][1] = 0.0;
		}
		
		// run and release plan
		
		fftw_execute( fft_plan );
		fftw_destroy_plan( fft_plan );

		// compare pocline against ALL OTHER IN STORE

		for( int j = 0 ; j < kFFTStoreSize ; j++) {
			
			fftw_complex *img1= &(fftwStore[kFFTWidth * pocline]);
			fftw_complex *img2= &(fftwStore[kFFTWidth * j]);
			
			// obtain the cross power spectrum
			for( int i = 0; i < kFFTWidth ; i++ ) {
				
				// complex multiply complex img2 by complex conjugate of complex img1
				
				fftwSingleRow[i][0] = ( img2[i][0] * img1[i][0] ) - ( img2[i][1] * ( -img1[i][1] ) );
				fftwSingleRow[i][1] = ( img2[i][0] * ( -img1[i][1] ) ) + ( img2[i][1] * img1[i][0] );
				
				// set tmp to (real) absolute value of complex number res[i]
				
				double tmp = sqrt( pow( fftwSingleRow[i][0], 2.0 ) + pow( fftwSingleRow[i][1], 2.0 ) );
				
				// complex divide res[i] by (real) absolute value of res[i]
				// (this is the normalization step)
				
				if(tmp == 0) {
					fftwSingleRow[i][0]= 0;
					fftwSingleRow[i][1]= 0;
				}
				else {
					fftwSingleRow[i][0] /= tmp;
					fftwSingleRow[i][1] /= tmp;
				}
			}
				
			// run inverse
			
			fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, fftwSingleRow, fftwSingleRow2, FFTW_BACKWARD,  FFTW_ESTIMATE );
			fftw_execute(fft_plan);
			fftw_destroy_plan( fft_plan );

			// normalize and copy to result image

			double 	*poc_data = ( double* )poc->imageData;
			
			for( int k = 0 ; k < kFFTWidth ; k++ ) {
				poc_data[k+(j*kFFTWidth)] = (fftwSingleRow2[k][0] / ( double )kFFTWidth);
			}
				
			
		}
		
		
		

		// inc pocline
		
		pocline++;
		if(pocline == kFFTStoreSize-1)
			pocline= 0;
		
		
		// display??
		

//		for(int i = 0 ; i < kFFTWidth ; i++ ) {
//			poc_data[i+(pocline*kFFTWidth)] = (fftwStore[(kFFTWidth * pocline)+i])[1];
//		}
		
		// find the maximum value and its location
		CvPoint minloc, maxloc;
		double  minval, maxval;
		cvMinMaxLoc( poc, &minval, &maxval, &minloc, &maxloc, 0 );
		
		// print it
//		printf( "Maxval at (%d, %d) = %2.4f\n", maxloc.x, maxloc.y, maxval );
		
//        cvConvertScale(dft_re,dft_orig,255,0); //255.0*(max-min),0);

        
        
		cvCvtScale(poc, pocdisp, (1.0/(maxval/2))*255, 0);
		
		cvShowImage("poc",pocdisp);
		
		
		// set up fftw plans
//		fftw_plan fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, img2, img2, FFTW_FORWARD,  FFTW_ESTIMATE );
//		fftw_plan ifft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, res,  res,  FFTW_BACKWARD, FFTW_ESTIMATE );
		
		
		
		// TODO FROM HERE
		
		/*
		
		if(key == 'r') {
			cvReleaseImage(&ref);
			ref= cvCloneImage(testOutImage);
			cvShowImage("ref",ref); 
		}
		
		
		
		{  // try phase correlating full img
			
			tpl= cvCloneImage(testOutImage);
			//				ref= cvCloneImage(testOutImage);
//				cvShowImage("tpl",tpl); 
//				cvShowImage("ref",ref); 
			
			
			if(ref == 0)
				continue;
			
			if( ( tpl->width != ref->width ) || ( tpl->height != ref->height ) ) {
				fprintf( stderr, "Both images must have equal width and height!\n" );
				continue
				;
			}
			
			// get phase correlation of input images
			
			phase_correlation( ref, tpl, poc );
			
			// find the maximum value and its location
			CvPoint minloc, maxloc;
			double  minval, maxval;
			cvMinMaxLoc( poc, &minval, &maxval, &minloc, &maxloc, 0 );
			
			// print it
			printf( "Maxval at (%d, %d) = %2.4f\n", maxloc.x, maxloc.y, maxval );
			
			cvCvtScale(poc, pocdisp, 1.0/(maxval/2), 0);
			
			cvShowImage("poc",pocdisp);
			
			cvReleaseImage(&tpl);
		
			
		}*/

//			cvReleaseImage(&ref);
//			ref= cvCloneImage(testOutImage);

//			printf( "%g ms: done.\n", (cvGetTickCount() - t)/((double)cvGetTickFrequency()*1000.));
			

	} 
	
	
	cvReleaseImage(&poc);

	
	return 0;
}
Beispiel #15
0
void CLightSet::RunLightPrep(IplImage* src,IplImage* dest)
{
	int M,N;
	M=0;
	N=0;
	if (src->roi)
	{
		 M = src->roi->width;
		 N = src->roi->height;
	}
	else
	{
		 M = src->width;
		 N = src->height;
	}

	CvMat *matD; // create mat for meshgrid frequency matrices
	matD = cvCreateMat(M,N,CV_32FC1);

	CDM(M,N,matD);

	CvMat *matH;
	matH = cvCreateMat(M,N,CV_32FC1); // mat for lowpass filter

	float D0 = 10.0;
	float rH,rL,c;
	rH = 2.0;
	rL = 0.5;
	c  = 1.0;
	lpfilter(matD,matH,D0,rH,rL,c);

	IplImage *srcshift; // shift center
	srcshift = cvCloneImage(src);
	cvShiftDFT(srcshift,srcshift);

	IplImage *log, *temp;
	log = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);
	temp = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);

	cvCvtScale(srcshift,temp,1.0,0);
	cvLog(temp,log);
	cvCvtScale(log,log,-1.0,0);

	CvMat *Fourier;
	Fourier = cvCreateMat( M, N, CV_32FC2 );

	fft2(log,Fourier);
	IplImage* image_im;
	image_im = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);

	cvSplit(Fourier,dest,image_im,0,0);

	cvMul(dest,matH,dest); 
	cvMul(image_im,matH,image_im);

	IplImage *dst;
	dst  = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,2);

	cvMerge(dest,image_im,0,0,dst);
	cvDFT(dst,dst,CV_DXT_INV_SCALE); 

	cvExp(dst,dst);

	cvZero(dest);
	cvZero(image_im);

	cvSplit(dst,dest,image_im,0,0); 
	//使得图像按照原来的顺序显示
	cvShiftDFT(dest,dest);

	double max,min; // normalize
	cvMinMaxLoc(dest,&min,&max,NULL,NULL);

	cvReleaseImage(&image_im);
	cvReleaseImage(&srcshift);
 	cvReleaseImage(&dst);	
	cvReleaseImage(&log);
	cvReleaseImage(&temp);
	cvReleaseMat(&matD);
	cvReleaseMat(&matH);
}
// Update Motion History Image: Calculate motion features and orientation.
void motionDetection(IplImage* image, IplImage* destination_image, MotionInfo* motionInfo)
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize image_size = cvSize(image->width, image->height); // get current frame image_size
    int previous_frame_index = last_index, current_frame_index;
    
    initialize_images(image_size);
    
    cvCvtColor(image, image_buffer[last_index], CV_BGR2GRAY); // convert frame to grayscale
    
    current_frame_index = (last_index + 1) % N; // index of (last_index - (N-1))th frame
    last_index = current_frame_index;
    
    silhouette = image_buffer[current_frame_index];
    
    cvAbsDiff(image_buffer[previous_frame_index], image_buffer[current_frame_index], silhouette); // Get difference between frames
    cvThreshold(silhouette, silhouette, DIFFERENCE_THRESHOLD, 1, CV_THRESH_BINARY); // Add threshold
    //cvDilate(silhouette, silhouette, 0, 18);
    //cvErode(silhouette, silhouette, 0, 10);
    
    cvUpdateMotionHistory(silhouette, mhi, timestamp, MHI_DURATION); // Update MHI
    
    // Convert MHI to blue 8U image
    cvCvtScale(mhi, orientation_mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION);
    
    if (destination_image) {
      cvZero(destination_image);
      cvCvtPlaneToPix(orientation_mask, 0, 0, 0, destination_image);
    }
    
    // Calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient(mhi, orientation_mask, orientation, MAX_TIME_DELTA, MIN_TIME_DELTA, 3);
    
    // motion_feature_sequence = extract_motion_features();
    if(!storage)
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);
    
    CvSeq* motion_feature_sequence = cvSegmentMotion(mhi, segment_mask, storage, timestamp, MAX_TIME_DELTA);
    
    int SEGMENT_WIDTH = image_size.width / MAX_SEGMENTS_X;
    int SEGMENT_HEIGHT = image_size.height / MAX_SEGMENTS_Y;
    
    // Global motion
    CvRect global_motion_segment = cvRect(0, 0, image_size.width, image_size.height);
    motionInfo->global_angle = calculate_orientation(global_motion_segment, silhouette);
    
    if (destination_image)
      draw_orientation(destination_image, &global_motion_segment, motionInfo->global_angle, 100, CV_RGB(0, 255, 0), true);
    
    long area = 0;
    long totalArea = 0;
    int totalMovingSegments = 0;
    bool hasValidMovement = false;
    CvRect segmentRect;
    
    // Segmented motion
    for(int x = 0; x < MAX_SEGMENTS_X; x++)
    {
      for(int y = 0; y < MAX_SEGMENTS_Y; y++)
      {
        segmentRect = cvRect(x * SEGMENT_WIDTH, y * SEGMENT_HEIGHT, SEGMENT_WIDTH, SEGMENT_HEIGHT);
        area = calculate_motion(&segmentRect, motion_feature_sequence);
        hasValidMovement = (area > MIN_MOTION_FEATURE_AREA);
        
        motionInfo->segment_motion_areas[x][y] = area;
        motionInfo->segment_movements[x][y] = hasValidMovement;
        motionInfo->segment_angles[x][y] = calculate_orientation(segmentRect, silhouette);
        
        totalArea += area;
        totalMovingSegments += (area > MIN_MOTION_FEATURE_AREA);
        
        //printf("%i, ", area);
        //fflush(stdout);
        
        if (hasValidMovement)
          if (destination_image)
            draw_orientation(destination_image, &segmentRect, motionInfo->segment_angles[x][y], 20, CV_RGB(255, 0, 0), true);
      }
    }
    motionInfo->total_motion_area = totalArea;
    motionInfo->total_segments_with_movements = totalMovingSegments;
    motionInfo->SEGMENTS_X = MAX_SEGMENTS_X;
    motionInfo->SEGMENTS_Y = MAX_SEGMENTS_Y;
    
    printf("%i, %f\n", totalArea, (float)totalArea / (float)(image_size.width*image_size.height));
    //fflush(stdout);
}
Beispiel #17
0
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; 
    CvSize size = cvSize(img->width,img->height); 
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;         
    CvScalar color;
 

	if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }

        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi );
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY );
    idx2 = (last + 1) % N; 
    last = idx2;
    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh );
    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); 
	cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION );

     
	cvCvtScale( mhi, mask, 255./MHI_DURATION,
    (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

   
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
  

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

   
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

   
    for( i = -1; i < seq->total; i++ ) {
        if( i < 0 ) { 
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { 
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) 
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

       
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

		angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  
        count = cvNorm( silh, 0, CV_L1, 0 ); 
 

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

      
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;
 
       
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );
 
         cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
		 cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
int main( int argc, char* argv[] ) {
srand(time(0));
float one=1.0f;
int oneinc=1;
float zero=0.0f;

float dt=.000001;
float rho=1;
float *B,*tB,*pB,*x,*w,*bb,*ff;
int ii,jj;
const int hh=480;
const int ww=640;
const int m = hh * ww;
const int n = 9;

B=(float*)malloc(m*n*sizeof(float));


// seed matrix B with random values
for (ii=0;ii<m*n;ii++){
	B[ii]=rand();
}

w=(float*)malloc(n*sizeof(float));
x=(float*)malloc(m*sizeof(float));
bb=(float*)malloc(m*sizeof(float));
ff=(float*)malloc(m*sizeof(float));

float *tau;
tau=(float*)malloc(m*sizeof(float));

float  twork=0;
int lwork=-1;
int info;

sgeqrf( &m, &n, B, &m, tau, &twork, &lwork, &info);	
lwork=(int) twork;	
//	printf("\n lwork=%d\n", lwork );		
float *work;
work=(float*)malloc(lwork*sizeof(float));

sgeqrf(&m, &n, B, &m, tau, work, &lwork, &info );
sorgqr(&m, &n, &n, B, &m, tau, work, &lwork, &info );

//cvNamedWindow( "selected location", 1 );
cvNamedWindow( "capture", 1 );
cvNamedWindow( "background?", 1 );
cvNamedWindow( "foreground?", 1 );


CvCapture* capture = cvCreateCameraCapture(1);
if(!capture){
	printf("failed to capture video from usb camera, trying built in camera\n");
	capture = cvCreateCameraCapture(CV_CAP_ANY);
	if(!capture){
		printf("failed to capture video\n");
		return(1);
	}
}   




CvFont font;
cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, .5, .5, 0, 1, CV_AA);


IplImage* frame;
frame = cvQueryFrame(capture);

IplImage* outbw = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
IplImage* outg = cvCreateImage(cvGetSize(frame),IPL_DEPTH_32F,1);
IplImage* outgs = cvCreateImage(cvSize(ww,hh),IPL_DEPTH_32F,1);
IplImage* outgsb = cvCreateImage(cvSize(ww,hh),IPL_DEPTH_32F,1);
IplImage* outgsf = cvCreateImage(cvSize(ww,hh),IPL_DEPTH_32F,1);

//cvCopy(out,sframe,0);

#ifdef PROFILE_MODE
clock_t start_time(clock());
CStopWatch watch;
watch.startTimer();
#endif

char dtstring[40];
int c;

int names_count=0;
int classno=0;

double sample_percent=.1;
double  rm=double(RAND_MAX);

int use_number;
int* use_index;
use_index=(int*)malloc(m*sizeof(int));

int tcount=0;

int turbo=0;
float ff_l1_norm=0;
while( 1 ) {



    //if (tcount++>4) break;	
    frame = cvQueryFrame(capture);
    if( !frame ) break;
    cvCvtColor(frame,outbw,CV_BGR2GRAY);
    cvCvtScale(outbw,outg,.0039,0);//scale to 1/255
    cvResize(outg,outgs);

    x=(float*)outgs->imageData;

    #ifdef PROFILE_MODE
    //start_time = clock() - start_time;
    watch.stopTimer();
    sprintf(dtstring,"FPS = %.4f", 1.0 / watch.getElapsedTime());
    cvPutText(outgs,dtstring , cvPoint(10, 60), &font, cvScalar(0, 0, 0, 0));
    watch.startTimer();
    //start_time = clock();
    #endif

    sprintf(dtstring,"dt = %.8f",dt);
    cvPutText(outgs,dtstring , cvPoint(10, 40), &font, cvScalar(0, 0, 0, 0));
    cvShowImage("capture", outgs);

    rm=sample_percent*((double)RAND_MAX);
    use_number=0;	
    for (ii=0;ii<m;ii++){
        if (rand()<rm){
            use_index[use_number]=ii;
            use_number++;
        }
    }
    //fprintf(stderr,"use_number=%d\n",use_number);

    if (turbo<5) {
        grasta_step (B,x,w,m,n,dt,rho,20);
    }else{
        grasta_step_subsample (B,x,w,m,n,dt,rho,40,use_index,use_number);
    }

    sgemv("N",&m,&n,&one,B,&m,w,&oneinc,&zero,bb,&oneinc);

    // TODO examine what this loop is actually checking for -Steve
    // Update: looks like checking for changes in the L1 norm
    ff_l1_norm=0;	
    for (ii=0;ii<m;ii++){
        ff[ii]=x[ii]-bb[ii];
        if (fabs(ff[ii])>.05){
            ++ff_l1_norm;
            //ff_l1_norm += fabs(ff[ii]);
        }
    }
    //fprintf(stderr,"%f\n",ff_l1_norm);
    // If more than 60% of the L1 norms have changed
    if (ff_l1_norm>m*.6){
        turbo=0;
    }
    else{
        turbo++;
    }

/*  for(jj=0;jj<m;jj++){	
        g[jj]=g1[jj]-g2[jj];
    }*/


    outgsb->imageData = (char*)bb;
    outgsb->imageDataOrigin = outgsb->imageData;
    cvShowImage( "background?", outgsb);

    outgsf->imageData = (char*)ff;
    outgsf->imageDataOrigin = outgsf->imageData;
    cvNormalize(outgsf, outgsf,1,0,CV_MINMAX);
    cvShowImage( "foreground?", outgsf);

    //printf("%f\n",bb[556]);
    c = cvWaitKey(10);
    //c = cvWaitKey(80);
    if( (char)c == 27 )
        break;
    switch( (char) c )
    {
        case 'm':
            sample_percent=sample_percent+.05;
            printf("sample percent up %.8f \n",sample_percent);
            break;
        case 'l':
            sample_percent=sample_percent-.05;
            printf("sample percent down %.8f \n",sample_percent);
            break;
        case 'u':
            dt=3*dt/2;
            printf("dt up %.8f \n",dt);
            break;
        case 'd':
            dt=2*dt/3;
            printf("dt down %.8f\n",dt );
            break;
        default:
            ;
    }

} // End Frame Loop




free(use_index);
}
void CFRManagerCtrl::ShowImageOnClient(HDC pDC, IplImage* image, CRect rect,CRect ROI,int flags)
{
    char info[100];
    IplImage* image_show=cvCloneImage(image);
    if (ROI.Width()!=0&&ROI.Height()!=0)
    {
        CvRect rc;
        rc.x=ROI.left;
        rc.y=ROI.top;
        rc.height=ROI.Height();
        rc.width=ROI.Width();
        cvSetImageROI(image,rc);

        cvNamedWindow("处理前");
        cvShowImage("处理前",image);

        //转换色彩空间
        cvCvtColor(image,image,CV_RGB2HSV);
        //分离通道
        IplImage* imgChannel[3] = { 0, 0, 0 };

        for (int i=0; i<image->nChannels; i++)
        {
            imgChannel[i] = cvCreateImage( cvGetSize( image ), IPL_DEPTH_8U, 1 );  //要求单通道图像才能直方图均衡化
        }

        cvSplit( image, imgChannel[0], imgChannel[1], imgChannel[2],0);//HSVA

        CvFont font;
        cvInitFont( &font,CV_FONT_HERSHEY_PLAIN,1, 1, 0, 1, 8);
        for (int i=0; i<image->nChannels; i++)
        {
            CvScalar avg=cvAvg(imgChannel[i]);
            memset(info,'\0',100);
            sprintf(info,"%f",avg.val[0]);

            cvPutText(image_show, info , cvPoint(0,20*(i+1)), &font, CV_RGB(255,0,0));
        }

// 		CvScalar avg=cvAvg(image);
// 		memset(info,'\0',100);
// 		sprintf(info,"%f",avg.val[0]);
// 		CvFont font;
// 		cvInitFont( &font,CV_FONT_HERSHEY_PLAIN,1, 1, 0, 1, 8);
// 		cvPutText(image_show, info , cvPoint(0,20), &font, CV_RGB(255,0,0));

        /*cvCvtScale(image,image,1.0,100-avg.val[0]);*/
        CvScalar avg=cvAvg(imgChannel[2]);
        cvCvtScale(imgChannel[2],imgChannel[2],1.0,YUZHI-avg.val[0]);
        cvMerge( imgChannel[0], imgChannel[1], imgChannel[2], 0, image );
        for (int i=0; i<image->nChannels; i++)
        {
            CvScalar avg=cvAvg(imgChannel[i]);
            memset(info,'\0',100);
            sprintf(info,"%f",avg.val[0]);

            cvPutText(image_show, info , cvPoint(0,20*(i+4)), &font, CV_RGB(255,0,0));
        }

        cvCvtColor(image,image,CV_HSV2RGB);
        cvNamedWindow("处理后");
        cvShowImage("处理后",image);

        for (int i=0; i<image->nChannels; i++)
        {
            cvReleaseImage(&imgChannel[i] );
        }
// 		avg=cvAvg(image);
// 		memset(info,'\0',100);
// 		sprintf(info,"%f",avg.val[0]);
// 		cvPutText(image_show, info , cvPoint(0,40), &font, CV_RGB(255,0,0));
    }


    char* imagedata=image_show->imageData;
    LPBITMAPINFO lpbitm;
    lpbitm=CtreateMapInfo(image_show,flags);
    StretchDIBits(pDC,
                  rect.left,rect.top,rect.Width(),rect.Height(),
                  0,0,image_show->width,image_show->height,
                  imagedata,lpbitm,DIB_RGB_COLORS,SRCCOPY);
    cvReleaseImage(&image_show);
    //pDC.Rectangle(rect);
}
Beispiel #20
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void update_mhi(IplImage* img, IplImage* dst, int diff_threshold)
{
    // 获取当前时间
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds

    // 获取当前帧大小
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;          
    CvScalar color;

    // 给图像分配空间或者在尺寸改变的时候重新分配
    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if (!mhi || mhi->width != size.width || mhi->height != size.height) 
    {
        if (buf == 0) 
        {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }
        
        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );
        
        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );

        // 在开始时清空MHI
        cvZero( mhi ); // clear MHI at the beginning

		// 按img的尺寸创建图像
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    // 转换为灰度
    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];

    // 获取两帧间的差异,当前帧跟背景图相减,放到silh里面
    cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames

    // 二值化
    cvThreshold(silh, silh, diff_threshold, 1, CV_THRESH_BINARY); // and threshold it

    // 去掉影像(silhouette) 以更新运动历史图像
    cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI

    // 转换MHI到蓝色8位图
    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

    // 计算运动历史图像的梯度方向 
    // 计算运动梯度趋向和合法的趋向掩码
    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
    
    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // 将整个运动分割为独立的运动部分 
    // 分割运动:获取运动组件序列
    // 分割掩码是运动组件图标识出来的,不再过多的使用
    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA);

	// 按运动组件的数目来循环
    // 通过运动组件迭代
    // 根据整幅图像(全局运动)进行相应的一次或多次迭代
    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for (i = -1; i < seq->total; i++) 
    {

        if (i < 0) 
        { 
            // 全局运动事件
            // case of the whole image
            // 获取当前帧的范围
            comp_rect = cvRect( 0, 0, size.width, size.height );

			// 设置颜色为白色
			color = CV_RGB(255,255,255);

			// 设置放大倍数为100
            magnitude = 100;
        }
        else 
        { 
            // 第i个运动组件
            // i-th motion component
            // 获取当前运动组件的范围
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;

			// 丢弃很小的组件
			if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;

			// 设置颜色为红色
			color = CV_RGB(255,0,0);

			// 设置放大倍数为30
            magnitude = 30;
        }

        // 选择组件感兴趣的区域
        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // 计算某些选择区域的全局运动方向 
        // 每个运动部件的运动方向就可以被这个函数利用提取的特定部件的掩模(mask)计算出来(使用cvCmp) 
        // 计算趋势
        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);

        // 根据左上角的原点来调整图像的角度
        angle = 360.0 - angle;  // adjust for images with top-left origin

        // 计算数组的绝对范数, 绝对差分范数或者相对差分范数 
        // 计算轮廓感兴趣区域中点的个数
        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // 检测小运动事件
        // check for the case of little motion
        if (count < comp_rect.width*comp_rect.height * 0.05)
        {
            continue;
        }

        // 画一个带箭头的时钟来指示方向
        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}