コード例 #1
0
static int CheckImage(IplImage* image, char* file, char* /*funcname*/)
{
    //printf("loading %s\n", file );
    IplImage* read = cvLoadImage( file, 1 );

    if( !read )
    {
        trsWrite( ATS_CON | ATS_LST, "can't read image\n" );
        return 1;
    }

    int err = 0;

#if 0
    {
        IplImage* temp = cvCloneImage( read );
        cvAbsDiff( image, read, temp );
        cvThreshold( temp, temp, 0, 255, CV_THRESH_BINARY );
        cvvNamedWindow( "Original", 0 );
        cvvNamedWindow( "Diff", 0 );
        cvvShowImage( "Original", read );
        cvvShowImage( "Diff", temp );
        cvvWaitKey(0);
        cvvDestroyWindow( "Original" );
        cvvDestroyWindow( "Diff" );
    }
#endif

    cvAbsDiff( image, read, read );
    cvThreshold( read, read, 0, 1, CV_THRESH_BINARY );
    err = cvRound( cvNorm( read, 0, CV_L1 ))/3;

    cvReleaseImage( &read );
    return err;
}
コード例 #2
0
IplImage* CMotionDetector::calculus() {
	if (m_pImageTrois == NULL) return NULL;
	if (m_pImageUnDeux == NULL) m_pImageUnDeux = cvCloneImage(m_pImageUn);
	if (m_pImageDeuxTrois == NULL) m_pImageDeuxTrois = cvCloneImage(m_pImageUn);
	if (m_pImageResult == NULL) m_pImageResult = cvCloneImage(m_pImageUn);

	cvAbsDiff(m_pImageUn, m_pImageDeux, m_pImageUnDeux);
	cvAbsDiff(m_pImageDeux, m_pImageTrois, m_pImageDeuxTrois);
	cvAnd(m_pImageUnDeux, m_pImageDeuxTrois, m_pImageResult);
	cvThreshold(m_pImageResult, m_pImageResult, 50, 255, CV_THRESH_BINARY);
	cvDilate(m_pImageResult, m_pImageResult, 0, 4);
	emit calculusNewImage(m_pImageResult);
	return m_pImageResult;

}
コード例 #3
0
/* Standard Deviation */
IplImage* motionDetection::getStandardDeviationFrame(void) {

	// Initialize
	cvZero(mSum);
	for (int i = 0; i < mFrameNumber; ++i) {
		// frame[i] <= | frame[i] - Background Model |
		cvAbsDiff(mpFrame[i], m_imgBackgroundModel, mTmp8U);
		// uchar->float
		cvConvert(mTmp8U, mTmp);
		// mTmp = mTmp * mTmp 
		cvPow(mTmp, mTmp, 2.0);
		// add mSum += mTmp
		cvAdd(mSum, mTmp, mSum);
	}

	// variance: mTmp <= mSum / (mFrameNumber-1)
	for (int i = 0; i < mSize.height; ++i) {
		for (int j = 0; j < mSize.width; ++j) {
			((float*)(mTmp->imageData + i*mTmp->widthStep))[j] = ((float*)(mSum->imageData + i*mSum->widthStep))[j] / (mFrameNumber - 1);
		}
	}

	// standard deviation
	cvPow(mTmp, mTmp, 0.5);

	// float->uchar
	cvConvert(mTmp, m_imgStandardDeviation);

	return m_imgStandardDeviation;
}
コード例 #4
0
void detect_object(IplImage *image, IplImage *pBkImg, IplImage *pFrImg, CvMat *pFrameMat, CvMat *pBkMat, CvMat *pFrMat,int thre_limit)
{
	nFrmNum++;
	cvCvtColor(image, pFrImg, CV_BGR2GRAY);
	cvConvert(pFrImg, pFrameMat);
	//高斯滤波
	cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
	//当前帧减去背景图像并取绝对值
	cvAbsDiff(pFrameMat, pBkMat, pFrMat);
	//二值化前景图像
	cvThreshold(pFrMat, pFrImg,thre_limit, 255.0, CV_THRESH_BINARY);

	/*形态学滤波*/
	//IplConvKernel* element = cvCreateStructuringElementEx(2, 2, 0, 0, CV_SHAPE_RECT);
	//cvErode(pFrImg, pFrImg,element, 1);	// 腐蚀
	//delete element;

	//element = cvCreateStructuringElementEx(2, 2, 1, 1, CV_SHAPE_RECT);
	//cvDilate(pFrImg, pFrImg, element, 1);	//膨胀
	//delete element;
	cvErode(pFrImg, pFrImg,0, 1);	// 腐蚀
	cvDilate(pFrImg, pFrImg,0, 1);	//膨胀

	//滑动平均更新背景(求平均)
	cvRunningAvg(pFrameMat, pBkMat, 0.004, 0);
	//将背景矩阵转化为图像格式,用以显示
	cvConvert(pBkMat, pBkImg);

	cvShowImage("background", pFrImg);
//	cvShowImage("background", pBkImg);
}
コード例 #5
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
                                   ofxCvGrayscaleImage& dad ) {

	if( !mom.bAllocated ){
		ofLogError("ofxCvGrayscaleImage") << "absDiff(): first source image (mom) not allocated";
		return;	
	}
	if( !dad.bAllocated ){
		ofLogError("ofxCvGrayscaleImage") << "absDiff(): second source image (dad) not allocated";
		return;	
	}	
	if( !bAllocated ){
		ofLogNotice("ofxCvGrayscaleImage") << "absDiff(): allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}
									   
    ofRectangle roi = getROI();
    ofRectangle momRoi = mom.getROI();
    ofRectangle dadRoi = dad.getROI();
    if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
        (dadRoi.width == roi.width && dadRoi.height == roi.height ) )
    {
        cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
        flagImageChanged();
    } else {
        ofLogError("ofxCvGrayscaleImage") << "absDiff(): source image size mismatch between first (mom) & second (dad) image";
    }
}
コード例 #6
0
void frame_dif(IplImage* image, IplImage* image_pass, IplImage* res,IplImage* res0, IplImage* pFrImg,IplImage* pFrame,int thre_limit)
{
	cvZero(pFrame);

	cvAbsDiff(image, image_pass, res0);
	cvCvtColor(res0, res, CV_RGB2GRAY);
	cvThreshold(res, res, thre_limit, 255, CV_THRESH_BINARY);
	unsigned char data1, data2, data;
	
	int i, j;
	int width = pFrame->width;
	int height = pFrame->height;
	for (i = 0; i < height; i++)
		for (j = 0; j < width; j++)
		{
			data1 = (unsigned char)res->imageData[i * width + j];
			data2 = (unsigned char)pFrImg->imageData[i * width + j];
			if (data1 == 255 || data2 == 255)
			{
				data = 255;
				pFrame->imageData[i * width + j] = (char)data;
			}
		}

	cvCopy(image, image_pass, NULL);

}
コード例 #7
0
ファイル: motion_detect.c プロジェクト: sumitsrv/vk
void x(IplImage *img1, IplImage *img2, IplImage *imgsize)
{
	IplImage *imggray1;
    IplImage *imggray2;
    IplImage *imggray3;
    
	// grayscale buffers
    imggray1 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
    imggray2 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
    imggray3 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
	
	IplImage *hsv1 = cvCloneImage(img1);
	IplImage *hsv2 = cvCloneImage(img2);
	
	cvCvtColor( img2, imggray2, CV_RGB2GRAY );
    cvCvtColor(img1, hsv2, CV_BGR2HSV);
    cvSetImageCOI(hsv2, 1);
    cvCopy(hsv2, imggray2, 0);           
    // convert rgb to grayscale
    cvCvtColor( img1, imggray1, CV_RGB2GRAY );
    cvCvtColor(img2, hsv1, CV_BGR2HSV);
    cvSetImageCOI(hsv1, 1);
    cvCopy(hsv1, imggray1, 0);           
    // compute difference
    cvAbsDiff( imggray1, imggray2, imggray3 );
    cvShowImage( "video", imggray3 );    
    cvReleaseImage(&imggray1);
    cvReleaseImage(&imggray2);
    cvReleaseImage(&imggray3);
    cvReleaseImage(&hsv1);
    cvReleaseImage(&hsv2);
}
コード例 #8
0
ファイル: gstmotiondetect.c プロジェクト: ekelly30/stb-tester
static gboolean gst_motiondetect_apply (
    IplImage * cvReferenceImage, const IplImage * cvCurrentImage,
    const IplImage * cvMaskImage, float noiseThreshold)
{
  IplConvKernel *kernel = cvCreateStructuringElementEx (3, 3, 1, 1,
      CV_SHAPE_ELLIPSE, NULL);
  int threshold = (int)((1 - noiseThreshold) * 255);
  IplImage *cvAbsDiffImage = cvReferenceImage;
  double maxVal = -1.0;

  cvAbsDiff( cvReferenceImage, cvCurrentImage, cvAbsDiffImage );
  cvThreshold (cvAbsDiffImage, cvAbsDiffImage, threshold, 255,
      CV_THRESH_BINARY);
  cvErode (cvAbsDiffImage, cvAbsDiffImage, kernel, 1);

  cvReleaseStructuringElement(&kernel);

  cvMinMaxLoc(cvAbsDiffImage, NULL, &maxVal, NULL, NULL, cvMaskImage );
  if (maxVal > 0) {
    return TRUE;
  } else {
    return FALSE;
  }

}
コード例 #9
0
ファイル: diff.c プロジェクト: kthakore/simcam
int main ( int argc, char **argv )
{
  // use first camera attached to computer

  // image data structures
  IplImage *img1;   
  IplImage *img2;
  IplImage *imggray1;
  IplImage *imggray2;
  IplImage *imggray3;
        

     // load image one
    img1 = cvLoadImage( argv[1] ); 
   // grayscale buffers
  imggray1 = cvCreateImage( cvGetSize( img1 ), IPL_DEPTH_8U, 1);
  imggray2 = cvCreateImage( cvGetSize( img1 ), IPL_DEPTH_8U, 1);
  imggray3 = cvCreateImage( cvGetSize( img1 ), IPL_DEPTH_8U, 1);

   // convert rgb to grayscale
    cvCvtColor( img1, imggray1, CV_RGB2GRAY );
    
    // load image two
    img2 = cvLoadImage( argv[2] );

    // convert rgb to grayscale
    cvCvtColor( img2, imggray2, CV_RGB2GRAY );
    
    // compute difference
    cvAbsDiff( imggray1, imggray2, imggray3 );
    
    cvSaveImage( argv[3], imggray3 );
   return 0;
}
コード例 #10
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
                                   ofxCvGrayscaleImage& dad ) {

	if( !mom.bAllocated ){
		ofLog(OF_LOG_ERROR, "in absDiff, mom needs to be allocated");	
		return;	
	}
	if( !dad.bAllocated ){
		ofLog(OF_LOG_ERROR, "in absDiff, dad needs to be allocated");	
		return;	
	}	
	if( !bAllocated ){
		ofLog(OF_LOG_NOTICE, "in absDiff, allocating to match dimensions");			
		allocate(mom.getWidth(), mom.getHeight());
	}
									   
    ofRectangle roi = getROI();
    ofRectangle momRoi = mom.getROI();
    ofRectangle dadRoi = dad.getROI();
    if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
        (dadRoi.width == roi.width && dadRoi.height == roi.height ) )
    {
        cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in absDiff, images are different sizes");
    }

}
コード例 #11
0
static int detect_motion(struct motion_detection *md, AVFrame *frame) {
  IplImage *tmp;
  AVPicture pict;

  tmp = md->cur;
  md->cur = md->prev;
  md->prev = tmp;

  avpicture_fill(&pict, md->buffer, PIX_FMT_GRAY8, md->cam->codec->width, md->cam->codec->height);
  sws_scale(md->img_convert_ctx, (const uint8_t* const*)frame->data, frame->linesize, 0, md->cam->codec->height, (uint8_t* const*)pict.data, pict.linesize);
  memcpy(md->cur->imageData, pict.data[0], md->cur->imageSize);
  md->cur->widthStep = pict.linesize[0];

  cvAbsDiff(md->cur, md->prev, md->silh);
  cvThreshold(md->silh, md->silh, md->cam->threshold, 250, CV_THRESH_BINARY);

  int density = 0;
  for(int i=0; i < md->silh->height; i++) {
    uint8_t* ptr = (uint8_t*)md->silh->imageData + i * md->silh->widthStep;
    for(int j=0; j < md->silh->width; j++)
      if(*(ptr+j) > 0)
        density += 1;
  }

  if((float)density / (float)(md->silh->height * md->silh->width) > 0.01) {
    return 1;
  } else {
    return 0;
  }
}
コード例 #12
0
ファイル: chalk.c プロジェクト: roofilin/roofilin
int main( int argc, char** argv )
{
    char* filename = argc == 2 ? argv[1] : (char*)"1-small.jpg";

    if( (image = cvLoadImage( filename, CV_LOAD_IMAGE_COLOR)) == 0 )
        return -1;

    //cvNamedWindow("orig", CV_WINDOW_AUTOSIZE);
    //cvShowImage("orig", image);

    // Extract red channel of image
    red = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
    green = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
    cvSplit(image, NULL, green, red, NULL);
    
    red_edge = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
    green_edge = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
    cvCanny(red, red_edge, low, high, 3);
    cvCanny(green, green_edge, low, high, 3);

    edge = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);

    cvAbsDiff(red_edge, green_edge, edge);


    final = cvCreateImage(cvSize((image->width&-2)/2,(image->height&-2)/2), IPL_DEPTH_8U, 1);
コード例 #13
0
ファイル: testApp.cpp プロジェクト: dasaki/cvcinema
/// ****************************************************
///
///                     CARTOON FILTER
///
/// ****************************************************
bool testApp::cvFilterCartoon(ofxCvColorImage &src, ofxCvColorImage &dst, int w, int h)
{
    //CvtColor(src, dst, code)
    //cv::cvtColor(inputFrame, bgr, CV_BGRA2BGR);
    //  cv::pyrMeanShiftFiltering(bgr.clone(), bgr, sp, sr);
    // PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1))


    // Temporary storage.
    IplImage* pyr = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 );
    IplImage* edges = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 1 );
    IplImage* edgesRgb = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 );
    //cvSet(s, cvScalar(0,0,0));


    ofxCvGrayscaleImage tempGrayImg;

    tempGrayImg.allocate(w, h);


    tempGrayImg.setFromColorImage(src);


    //------------------------------
    cvPyrMeanShiftFiltering(src.getCvImage(), pyr, 10, 10);

    //  cv::Canny(gray, edges, 150, 150);
    cvCanny(tempGrayImg.getCvImage(), edges, 150,150);
    cvCvtColor(edges, edgesRgb, CV_GRAY2RGB);
    cvAbsDiff(pyr, edgesRgb, pyr);
    //cvAbsDiff(colorImg.getCvImage(), lastFrame.getCvImage(), colorDiff.getCvImage());
    dst.setFromPixels((unsigned char *)pyr->imageData, w, h);
    return true;
}
コード例 #14
0
double* ObtenerMaximo(IplImage* Imagen, STFrame* FrameData, CvRect Roi) {
	// obtener matriz de distancias normalizadas al background
	if (SHOW_VALIDATION_DATA == 1)
		printf(" \n\n Busqueda del máximo umbral...");
	IplImage* IDif = 0;
	IplImage* peso = 0;
	CvSize size = cvSize(Imagen->width, Imagen->height); // get current frame size
	if (!IDif || IDif->width != size.width || IDif->height != size.height) {
		cvReleaseImage(&IDif);
		cvReleaseImage(&peso);
		IDif = cvCreateImage(cvSize(FrameData->BGModel->width,
				FrameData->BGModel->height), IPL_DEPTH_8U, 1); // imagen diferencia abs(I(pi)-u(p(i))
		peso = cvCreateImage(cvSize(FrameData->BGModel->width,
				FrameData->BGModel->height), IPL_DEPTH_32F, 1);//Imagen resultado wi ( pesos)
		cvZero(IDif);
		cvZero(peso);
	}

	// |I(p)-u(p)|/0(p)
	cvAbsDiff(Imagen, FrameData->BGModel, IDif);
	cvDiv(IDif, FrameData->IDesvf, peso);

	// Buscar máximo
	double* Maximo = 0;
	cvMinMaxLoc(peso, Maximo, 0, 0, 0, FrameData->FG);

	return Maximo;
}
コード例 #15
0
ファイル: mainwindow.cpp プロジェクト: lkpjj/qt_demo
void MainWindow::BackgroundDiff()
{
    ui->alpha_slider->setEnabled(true);

    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);
  //  IplImage* pFrame=NULL;
    nFrameNum=0;

    while(pFrame = cvQueryFrame( pCapture ))
    {
        nFrameNum++;
        //如果是第一帧,需要申请内存,并初始化

        if(nFrameNum == 1)
        {
            pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),IPL_DEPTH_8U,1);
            pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
            pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
            pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
            pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);

            //转化成单通道图像再处理
            cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
            cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
            cvConvert(pFrImg, pFrameMat);
            cvConvert(pFrImg, pFrMat);
            cvConvert(pFrImg, pBkMat);
        }
        else
        {
            cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
            cvConvert(pFrImg, pFrameMat);
            //先做高斯滤波,以平滑图像
            cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
            //当前帧跟背景图相减
            cvAbsDiff(pFrameMat, pBkMat, pFrMat);
            //二值化前景图
            cvDilate(pFrMat,pFrMat);
            cvErode(pFrMat,pFrMat);

            cvThreshold(pFrMat, pFrImg, lowThreshold, 255.0, CV_THRESH_BINARY);
            //更新背景
            cvRunningAvg(pFrameMat, pBkMat, alpha,0);
            //将背景转化为图像格式,用以显示
            cvConvert(pBkMat, pBkImg);
            pFrame->origin = IPL_ORIGIN_BL;
            pFrImg->origin = IPL_ORIGIN_BL;
            pBkImg->origin = IPL_ORIGIN_BL;

        }

        if(27==cvWaitKey(33))
            break;

        MainWindow::Display(pFrame,pBkImg,pFrImg);
    }

}
コード例 #16
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom ) {
    if( matchingROI(getROI(), mom.getROI()) ) {
        cvAbsDiff( cvImage, mom.getCvImage(), cvImageTemp );
        swapTemp();
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
    }
}
コード例 #17
0
ファイル: gstgcs.c プロジェクト: miguelao/gst_plugins_tsunami
gboolean get_frame_difference( IplImage* in, IplImage* inprev, IplImage* output)
{
  cvSmooth(in,     in,     CV_GAUSSIAN, 5);
  cvSmooth(inprev, inprev, CV_GAUSSIAN, 5);

  cvAbsDiff( in, inprev, output);
  cvThreshold( output, output, 5, 255, CV_THRESH_BINARY);
  cvMorphologyEx( output, output, 0, 0, CV_MOP_CLOSE, 1 );
  return(TRUE);
}
コード例 #18
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
static void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold, int frameCount){
	if(DEBUG){
		std::cout << "- UPDATING_MHI" << std::endl;
	}
	double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
	CvSize size = cvSize(img->width,img->height); // get current frame size
	int i, idx1 = last, idx2;
	CvSeq* seq;
	CvRect comp_rect;
	CvRect roi;
	double count;
	double angle;
	CvPoint center;
	double magnitude;
	CvScalar color;

	// Allocate images at the beginning or reallocate them if the frame size is changed
	if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
		if( buf == 0 ) {
			buf = (IplImage**)malloc(N*sizeof(buf[0]));
			memset( buf, 0, N*sizeof(buf[0]));
		}

		for( i = 0; i < N; i++ ) {
			cvReleaseImage( &buf[i] );
			buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
			cvZero( buf[i] );
		}
		cvReleaseImage( &mhi );
		cvReleaseImage( &orient );
		cvReleaseImage( &segmask );
		cvReleaseImage( &mask );

		mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		cvZero( mhi ); // clear MHI at the beginning
		orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
	}

	cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
	idx2 = (last + 1) % N; // index of (last - (N-1))th frame
	last = idx2;

	silh = buf[idx2];
	cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

	cvThreshold( silh, silh, diff_threshold, 255, CV_THRESH_BINARY); // and threshold it
	cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

	// convert MHI to blue 8u image
	cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION );
	cvZero( dst );
	cvMerge( mask, 0, 0, 0, dst );
}
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 3) {
		AddError(wxT("The input image is not a color image."));
		return;
	}

	// Check and update the background
	if (! mOutputImage) {
	  mOutputImage = cvCloneImage(inputimage);
	} else {
	  cvCopyImage(inputimage, mOutputImage);
	}
	if (! mBackgroundImage) {
		mBackgroundImage = cvCloneImage(mOutputImage);
	} else if (mUpdateProportion > 0) {
		if ((cvGetSize(mOutputImage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(mOutputImage).width != cvGetSize(mBackgroundImage).width)) {
			AddError(wxT("Input and background images do not have the same size."));
			return;
		}

		cvAddWeighted(mOutputImage, mUpdateProportion, mBackgroundImage, 1.0 - mUpdateProportion, 0, mBackgroundImage);
	}

	try {
		// Correct the tmpImage with the difference in image mean
		if (mCorrectMean) {
			mBackgroundImageMean = cvAvg(mBackgroundImage);
			CvScalar tmpScalar = cvAvg(mOutputImage);
			cvAddS(mOutputImage, cvScalar(mBackgroundImageMean.val[0] - tmpScalar.val[0], mBackgroundImageMean.val[1] - tmpScalar.val[1], mBackgroundImageMean.val[2] - tmpScalar.val[2]), mOutputImage);
		}

		// Background subtraction
		if (mMode == sMode_SubImageBackground) {
			cvSub(mOutputImage, mBackgroundImage, mOutputImage);
		} else if (mMode == sMode_SubBackgroundImage) {
			cvSub(mBackgroundImage, mOutputImage, mOutputImage);
		} else {
			cvAbsDiff(mOutputImage, mBackgroundImage, mOutputImage);
		}
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}
	mCore->mDataStructureImageColor.mImage = mOutputImage;
	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mOutputImage);
	}
}
コード例 #20
0
ファイル: image-diff.cpp プロジェクト: squidforce/spot_hustle
int main ( int argc, char **argv )
{
  // use first camera attached to computer
  CvCapture *capture;
  capture = cvCaptureFromCAM( 0 );
  assert( capture );
 
  // image data structures
  IplImage *img1;   
  IplImage *img2;
  IplImage *imggray1;
  IplImage *imggray2;
  IplImage *imggray3;
         
  // get the camera image size
  IplImage *imgsize;
  imgsize = cvQueryFrame( capture );
  if( !imgsize ) return -1;
 
  // grayscale buffers
  imggray1 = cvCreateImage( cvGetSize( imgsize ), IPL_DEPTH_8U, 1);
  imggray2 = cvCreateImage( cvGetSize( imgsize ), IPL_DEPTH_8U, 1);
  imggray3 = cvCreateImage( cvGetSize( imgsize ), IPL_DEPTH_8U, 1);
   
  int key = 0;
  while ( key != 'q' ) {
    // load image one
    img1 = cvQueryFrame( capture ); 
  
   // convert rgb to grayscale
    cvCvtColor( img1, imggray1, CV_RGB2GRAY );
     
    // quit if user press 'q' and wait a bit between images
    key = cvWaitKey( 500 );
     
    // load image two
    img2 = cvQueryFrame( capture );
 
    // convert rgb to grayscale
    cvCvtColor( img2, imggray2, CV_RGB2GRAY );
     
    // compute difference
    cvAbsDiff( imggray1, imggray2, imggray3 );
     
    // display difference
    cvNamedWindow( "video", 1 );
    cvShowImage( "video", imggray3 );
  }
 
  // release camera and clean up resources when "q" is pressed
  cvReleaseCapture( &capture );
  cvDestroyWindow( "video" );
  return 0;
}
コード例 #21
0
ファイル: mainwindow.cpp プロジェクト: lkpjj/qt_demo
void MainWindow::FrameDiff()
{

    ui->alpha_slider->setDisabled(true);

    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);
    IplImage* frame=NULL;
    nFrameNum=0;

    while(frame = cvQueryFrame(pCapture))
    {
//        cvCvtColor(frame,frame,CV_BGR2RGB);
        nFrameNum++;

        cvReleaseImage(&src);//memory control

        src=cvCreateImage(cvSize(frame->width,frame->height),IPL_DEPTH_8U,3);
        cvCopy(frame,src);

        if( nFrameNum==1 )
        {
            last_img=cvCreateImage(cvSize(src->width,src->height),IPL_DEPTH_8U,3);
            img_diff=cvCreateImage(cvSize(src->width,src->height),IPL_DEPTH_8U,3);
            gray_diff=cvCreateImage(cvSize(src->width,src->height),IPL_DEPTH_8U,1);
            binary_img=cvCreateImage(cvSize(src->width,src->height),IPL_DEPTH_8U,1);

            cvCopy(src,last_img);
            cvZero(binary_img);
        }
        else
        {
            cvAbsDiff(src,last_img,img_diff);

            cvCvtColor(img_diff,gray_diff,CV_BGR2GRAY);
            cvSmooth(gray_diff,gray_diff,CV_GAUSSIAN,3,0,0);//gaussian smooth

            cvThreshold(gray_diff,binary_img,lowThreshold,255,CV_THRESH_BINARY);

            cvDilate(binary_img,binary_img);
            cvErode(binary_img,binary_img);

            cvCopy(src,last_img);
        }

        if(27==cvWaitKey(33))
            break;

        MainWindow::Display(src,img_diff,binary_img);

  //      MainWindow::releaseMemory();

    }
}
コード例 #22
0
int main()
{
    CvCapture* capture = cvCreateCameraCapture(1);
    if(!capture)
    {
        printf("Camera error.\n");
        return -1;
    }

    // set camera property
    cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, FRAMEWIDTH);
    cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, FRAMEHEIGHT);

    IplImage* frame = cvQueryFrame(capture);
    IplImage* frame_prior = cvCreateImage(
        cvGetSize(frame), frame->depth, frame->nChannels);
    IplImage * diff12 = cvCreateImage(
        cvGetSize(frame), frame->depth, frame->nChannels);

    if(!frame)
    {
        return -1;
    }

    cvCopy(frame, frame_prior);
    char c;
    char file_name[128];
    int count_frame = 0;
    while( 1 ) 
    { 
        frame = cvQueryFrame(capture);
        if(!frame)
        {
            return -1;
        }

        cvAbsDiff(frame, frame_prior, diff12);
        cvCopy(frame, frame_prior);
        sprintf(file_name, "%sframe_%d.bmp", SAVEIMGDIR, count_frame++ );
       cvSaveImage(file_name, frame);
        printf("%d: %s\n", count_frame, file_name);

        cvShowImage("diff", diff12);
        c = cvWaitKey(50);
        if(c == 27)
        {
            break;
        }
    }

    cvDestroyAllWindows();
    cvReleaseImage(&frame);
}
コード例 #23
0
// Learn the background statistics for one more frame
void accumulateBackground( IplImage *I ){
	static int first = 1;
	cvCvtScale( I, Iscratch, 1, 0 );
	if( !first ){
		cvAcc( Iscratch, IavgF );
		cvAbsDiff( Iscratch, IprevF, Iscratch2 );
		cvAcc( Iscratch2, IdiffF );
		Icount += 1.0;
	}
	first = 0;
	cvCopy( Iscratch, IprevF );
}
コード例 #24
0
// Accumulate the background statistics for one more frame
// We accumulate the images, the image differences and the count of images for the 
//    the routine createModelsfromStats() to work on after we're done accumulating N frames.
// I		Background image, 3 channel, 8u
// number	Camera number
void accumulateBackground(IplImage *I, int number)
{
	static int first = 1;
	cvCvtScale(I,Iscratch,1,0); //To float;
	if (!first){
		cvAcc(Iscratch,IavgF[number]);
		cvAbsDiff(Iscratch,IprevF[number],Iscratch2);
		cvAcc(Iscratch2,IdiffF[number]);
		Icount[number] += 1.0;
	}
	first = 0;
	cvCopy(Iscratch,IprevF[number]);
}
コード例 #25
0
ファイル: motiondetect.c プロジェクト: shohei/zybo-book
int main(int argc, char** argv) {
  CvCapture *capture = NULL;

  capture = cvCreateCameraCapture(0);
  if(capture == NULL){
    printf("can not find a camera!!");
    return -1;
  }

  IplImage *img = NULL;
  img = cvQueryFrame(capture);
  const int w = img->width;
  const int h = img->height;

  IplImage *imgBef = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgGray = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgDiff = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);

  char winNameCapture[] = "Capture";
  char winNameDiff[] = "Difference";
  char winNameBef[] = "Old Frame";

  cvNamedWindow(winNameCapture, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameBef, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameDiff, CV_WINDOW_AUTOSIZE);

  img = cvQueryFrame(capture);
  cvCvtColor(img, imgBef, CV_BGR2GRAY);  

  while (1) {
    img = cvQueryFrame(capture);
    cvCvtColor(img, imgGray,CV_BGR2GRAY);
    cvAbsDiff(imgGray, imgBef, imgDiff);

    cvShowImage(winNameCapture, img);
    cvShowImage(winNameBef, imgBef);
    cvShowImage(winNameDiff, imgDiff);

    cvCopy(imgGray, imgBef, 0);

    if(cvWaitKey(1) >= 0)
	break;
  }
  
  cvDestroyWindow(winNameCapture);
  cvDestroyWindow(winNameDiff);
  cvDestroyWindow(winNameBef);
  cvReleaseCapture(&capture);

  return 0;
}
コード例 #26
0
ファイル: analysis.cpp プロジェクト: terrencebrown/iwb
    IplImage* Analysis::getDiff(IplImage *frame1, IplImage *frame2) {
        IplImage* diff;
        IplImage* image2;
        IplImage* image1;
        image1 = cvCreateImage(cvGetSize(frame1), frame1->depth, 1);
        image2 = cvCreateImage(cvGetSize(frame1), frame1->depth, 1);
        diff = cvCreateImage(cvGetSize(frame1), frame1->depth, 1);
        cvCvtColor(frame1, image1, CV_BGR2GRAY);
        cvCvtColor(frame2, image2, CV_BGR2GRAY);

        cvAbsDiff(image1, image2, diff);
        cvThreshold(diff, diff, constants::threshold, 255, CV_THRESH_BINARY);
        return diff;
    }
コード例 #27
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
                                   ofxCvGrayscaleImage& dad ) {
    ofRectangle roi = getROI();
    ofRectangle momRoi = mom.getROI();
    ofRectangle dadRoi = dad.getROI();
    if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
        (dadRoi.width == roi.width && dadRoi.height == roi.height ) )
    {
        cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in absDiff, images are different sizes");
    }

}
コード例 #28
0
void PixelwiseComparison::compare(Feature *task)
{
	verbosePrintln(string("comparing"));

	CvSize sz = cvGetSize( image );

	Mat* otherImg  = ((PixelwiseComparison*)task)->getImage();
	Mat diffImage = cvCreateImage( sz, image->dims, 3 );

	cvAbsDiff( image , otherImg, &diffImage );
	
	Scalar s = mean(diffImage);

	DoubleOutputParameter *param1 = new DoubleOutputParameter("result");
	param1->setData(s.val[0] + s.val[1] + s.val[2]);
	addOutputParameter(*param1);
}
コード例 #29
0
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageGray.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 1) {
		AddError(wxT("The input image is not a grayscale image."));
		return;
	}

	// Check the background image
	if (! mBackgroundImage) {
		AddError(wxT("No background image loaded."));
		return;
	}
	if ((cvGetSize(inputimage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(inputimage).width != cvGetSize(mBackgroundImage).width)) {
		AddError(wxT("Input and background images don't have the same size."));
		return;
	}

	try {
		// Correct the inputimage with the difference in image mean
		if (mCorrectMean) {
			cvAddS(inputimage, cvScalar(mBackgroundImageMean.val[0] - cvAvg(inputimage).val[0]), inputimage);
		}

		// Background subtraction
		if (mMode == sMode_SubImageBackground) {
			cvSub(inputimage, mBackgroundImage, inputimage);
		} else if (mMode == sMode_SubBackgroundImage) {
			cvSub(mBackgroundImage, inputimage, inputimage);
		} else {
			cvAbsDiff(inputimage, mBackgroundImage, inputimage);
		}
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(inputimage);
	}
}
コード例 #30
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom ){
	if( !mom.bAllocated ){
		ofLog(OF_LOG_ERROR, "in absDiff, mom needs to be allocated");	
		return;	
	}
	if( !bAllocated ){
		ofLog(OF_LOG_NOTICE, "in absDiff, allocating to match dimensions");			
		allocate(mom.getWidth(), mom.getHeight());
	}	

    if( matchingROI(getROI(), mom.getROI()) ) {
        cvAbsDiff( cvImage, mom.getCvImage(), cvImageTemp );
        swapTemp();
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
    }
}