コード例 #1
0
/* Standard Deviation */
IplImage* motionDetection::getStandardDeviationFrame(void) {

	// Initialize
	cvZero(mSum);
	for (int i = 0; i < mFrameNumber; ++i) {
		// frame[i] <= | frame[i] - Background Model |
		cvAbsDiff(mpFrame[i], m_imgBackgroundModel, mTmp8U);
		// uchar->float
		cvConvert(mTmp8U, mTmp);
		// mTmp = mTmp * mTmp 
		cvPow(mTmp, mTmp, 2.0);
		// add mSum += mTmp
		cvAdd(mSum, mTmp, mSum);
	}

	// variance: mTmp <= mSum / (mFrameNumber-1)
	for (int i = 0; i < mSize.height; ++i) {
		for (int j = 0; j < mSize.width; ++j) {
			((float*)(mTmp->imageData + i*mTmp->widthStep))[j] = ((float*)(mSum->imageData + i*mSum->widthStep))[j] / (mFrameNumber - 1);
		}
	}

	// standard deviation
	cvPow(mTmp, mTmp, 0.5);

	// float->uchar
	cvConvert(mTmp, m_imgStandardDeviation);

	return m_imgStandardDeviation;
}
コード例 #2
0
void detect_object(IplImage *image, IplImage *pBkImg, IplImage *pFrImg, CvMat *pFrameMat, CvMat *pBkMat, CvMat *pFrMat,int thre_limit)
{
	nFrmNum++;
	cvCvtColor(image, pFrImg, CV_BGR2GRAY);
	cvConvert(pFrImg, pFrameMat);
	//高斯滤波
	cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
	//当前帧减去背景图像并取绝对值
	cvAbsDiff(pFrameMat, pBkMat, pFrMat);
	//二值化前景图像
	cvThreshold(pFrMat, pFrImg,thre_limit, 255.0, CV_THRESH_BINARY);

	/*形态学滤波*/
	//IplConvKernel* element = cvCreateStructuringElementEx(2, 2, 0, 0, CV_SHAPE_RECT);
	//cvErode(pFrImg, pFrImg,element, 1);	// 腐蚀
	//delete element;

	//element = cvCreateStructuringElementEx(2, 2, 1, 1, CV_SHAPE_RECT);
	//cvDilate(pFrImg, pFrImg, element, 1);	//膨胀
	//delete element;
	cvErode(pFrImg, pFrImg,0, 1);	// 腐蚀
	cvDilate(pFrImg, pFrImg,0, 1);	//膨胀

	//滑动平均更新背景(求平均)
	cvRunningAvg(pFrameMat, pBkMat, 0.004, 0);
	//将背景矩阵转化为图像格式,用以显示
	cvConvert(pBkMat, pBkImg);

	cvShowImage("background", pFrImg);
//	cvShowImage("background", pBkImg);
}
コード例 #3
0
ファイル: mainwindow.cpp プロジェクト: lkpjj/qt_demo
void MainWindow::BackgroundDiff()
{
    ui->alpha_slider->setEnabled(true);

    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);
  //  IplImage* pFrame=NULL;
    nFrameNum=0;

    while(pFrame = cvQueryFrame( pCapture ))
    {
        nFrameNum++;
        //如果是第一帧,需要申请内存,并初始化

        if(nFrameNum == 1)
        {
            pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),IPL_DEPTH_8U,1);
            pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
            pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
            pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
            pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);

            //转化成单通道图像再处理
            cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
            cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
            cvConvert(pFrImg, pFrameMat);
            cvConvert(pFrImg, pFrMat);
            cvConvert(pFrImg, pBkMat);
        }
        else
        {
            cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
            cvConvert(pFrImg, pFrameMat);
            //先做高斯滤波,以平滑图像
            cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
            //当前帧跟背景图相减
            cvAbsDiff(pFrameMat, pBkMat, pFrMat);
            //二值化前景图
            cvDilate(pFrMat,pFrMat);
            cvErode(pFrMat,pFrMat);

            cvThreshold(pFrMat, pFrImg, lowThreshold, 255.0, CV_THRESH_BINARY);
            //更新背景
            cvRunningAvg(pFrameMat, pBkMat, alpha,0);
            //将背景转化为图像格式,用以显示
            cvConvert(pBkMat, pBkImg);
            pFrame->origin = IPL_ORIGIN_BL;
            pFrImg->origin = IPL_ORIGIN_BL;
            pBkImg->origin = IPL_ORIGIN_BL;

        }

        if(27==cvWaitKey(33))
            break;

        MainWindow::Display(pFrame,pBkImg,pFrImg);
    }

}
コード例 #4
0
ファイル: cvparticlefilter.cpp プロジェクト: liangfu/dnn
CVAPI(void) cvParticleStateConfig( CvParticle * p,
                                   const CvSize imsize, CvBox2D std )
{
  // config dynamics model
  CvMat * dynamicsmat =
      cvCreateMat( p->num_states, p->num_states, CV_64F );
  cvSetIdentity(dynamicsmat, cvRealScalar(1));

  // config random noise standard deviation
  // CvRNG rng = cvRNG( time( NULL ) );
  double stdarr[] = {
    // std.x, std.y,
    // std.width, std.height,
    std.center.x,   std.center.y,
    std.size.width, std.size.height,
    std.angle
  };
  CvMat stdmat = cvMat( p->num_states, 1, CV_64FC1, stdarr );

  // config minimum and maximum values of states
  // lowerbound, upperbound, circular flag (useful for degree)
  // lowerbound == upperbound to express no bounding
  // const CvSize tsize = cvSize(24.*(imsize.width /160.),
  //                             30.*(imsize.height/120.));
  const CvSize tsize = cvSize(30.*(imsize.width /160.),
                              36.*(imsize.height/120.));
  const float space = // space for rotation of box
      (10.*(imsize.width /160.)+                    // additional bound
       MAX(tsize.width, tsize.height))*0.1415+      // rotated bound
      1.0;                                          // additional pixel
  double boundarr[] = {
    space, imsize.width - (tsize.width +1.) - space, false,   // position.x
    space, imsize.height -(tsize.height+1.) - space, false,   // position.y
    12.*(imsize.width/160.),  tsize.width , false, // winsize.x
    15.*(imsize.height/160.), tsize.height, false, // winsize.y
    // 0, 360, true                              // dtheta + circular flag
    -30, 30, false                 // dtheta
  };

  CvMat boundmat = cvMat( p->num_states, 3, CV_64FC1, boundarr );

  //cvParticleSetDynamics( p, dynamicsmat );
  cvConvert(dynamicsmat, p->dynamics);
  // cvParticleSetNoise( p, rng, &stdmat );
  p->rng = cvRNG( time( NULL ) );
  cvConvert(&stdmat,p->std);
  // cvParticleSetBound( p, &boundmat );
  cvConvert(&boundmat,p->bound);
  
  cvReleaseMat(&dynamicsmat);
}
コード例 #5
0
void ImgAli_common::setTemplate(char *name)
{
	IplImage *tmp=cvLoadImage(name,0);
	Template=cvCreateMat(tmp->height,tmp->width,CV_64FC1);
	cvConvert(tmp,Template);

	//cvNamedWindow("1");
	//
	//cvDrawLine(Template,cvPoint(0,0),cvPoint(50,50),cvScalar(255,0,0));
	//cvShowImage("1",Template);
	//cvWaitKey();

	width=Template->cols;
	height=Template->rows;

	/*SD_ic=new double**[width];
	for (int i=0;i<width;i++)
	{
		SD_ic[i]=new double *[height];
		for (int j=0;j<height;j++)
		{
			SD_ic[i][j]=new double[shapeDim];
		}
	}*/

	//
	setParameters();
}
コード例 #6
0
// Set Pixel Data - Arrays
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator =	( ofxCvGrayscaleImage& mom ) {
	if( mom.width == width && mom.height == height ) {
        cvConvert( mom.getCvImage(), cvImage );
	} else {
        cout << "error in =, images are different sizes" << endl;
	}
}
コード例 #7
0
//在图像srcImg上根据contour轮廓画上最小外接椭圆
CvBox2D DrawMinAreaEllipse(IplImage *srcImg,CvSeq *contour,CvScalar color/*=CV_RGB(255,0,0)*/)
{
	int count = contour->total; // This is number point in contour
	CvPoint center;
	CvSize size;
	CvBox2D box;

	if( count < 6 )
		return box;

	CvMat* points_f = cvCreateMat( 1, count, CV_32FC2 );
	CvMat points_i = cvMat( 1, count, CV_32SC2, points_f->data.ptr );
	cvCvtSeqToArray( contour, points_f->data.ptr, CV_WHOLE_SEQ );
	cvConvert( &points_i, points_f );
	// 椭圆拟合
	box = cvFitEllipse2( points_f );
	cout<<"拟合的椭圆参数:angle="<<box.angle<<",center=("<<box.center.x<<","
		<<box.center.y<<")"<<",size(w,h)=("<<box.size.width<<","<<box.size.height<<")"<<endl;
	// 获得椭圆参数
	center = cvPointFrom32f(box.center);
	size.width = cvRound(box.size.width*0.5)+1;
	size.height = cvRound(box.size.height*0.5)+1;
	// 画椭圆
	cvEllipse(srcImg, center, size,
		-box.angle, 0, 360,	color, 1, CV_AA, 0);
	cvReleaseMat(&points_f);
	return box;
}
コード例 #8
0
ファイル: scancell.cpp プロジェクト: FranoTech/ws-workflow
int main (int argv, char** argc[])
{
    int ncell = 0, prev_ncontour = 0, same_count = 0;
	////while (!worker->CancellationPending) {
		////worker->ReportProgress(50, String::Format(rm->GetString("Progress_Analyze_FoundNCell"), title, ncell));
		cvConvert(input_morph, tmp8UC1);
		cvClearMemStorage(storage);
		int ncontour = cvFindContours(tmp8UC1, storage, &first_con, sizeof(CvContour), CV_RETR_EXTERNAL);
		if (ncontour == 0)
			break; // finish extract cell
		if (ncontour == prev_ncontour) {
			cvErode(input_morph, input_morph);
			same_count++;
		} else
			same_count = 0;
		prev_ncontour = ncontour;
		cur = first_con;
		while (cur != nullptr) {
			double area = fabs(cvContourArea(cur));
			if ((area < 3000.0) || (same_count > 10)) {
				int npts = cur->total;
				CvPoint *p = new CvPoint[npts];
				cvCvtSeqToArray(cur, p);
				cvFillPoly(out_single, &p, &npts, 1, cvScalar(255.0)); // move to single
				cvFillPoly(input_morph, &p, &npts, 1, cvScalar(0.0)); // remove from input
				delete[] p;
				ncell++;
			}
			cur = cur->h_next;
		}
	////}
}
コード例 #9
0
ファイル: fitellipse.cpp プロジェクト: AndrewShmig/FaceDetect
// Define trackbar callback functon. This function find contours,
// draw it and approximate it by ellipses.
void process_image(int h)
{
    CvMemStorage* storage;
    CvSeq* contour;

    // Create dynamic structure and sequence.
    storage = cvCreateMemStorage(0);
    contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint) , storage);

    // Threshold the source image. This needful for cvFindContours().
    cvThreshold( image03, image02, slider_pos, 255, CV_THRESH_BINARY );

    // Find all contours.
    cvFindContours( image02, storage, &contour, sizeof(CvContour),
                    CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0));

    // Clear images. IPL use.
    cvZero(image02);
    cvZero(image04);

    // This cycle draw all contours and approximate it by ellipses.
    for(;contour;contour = contour->h_next)
    {
        int count = contour->total; // This is number point in contour
        CvPoint center;
        CvSize size;
        CvBox2D box;

        // Number point must be more than or equal to 6 (for cvFitEllipse_32f).
        if( count < 6 )
            continue;

        CvMat* points_f = cvCreateMat( 1, count, CV_32FC2 );
        CvMat points_i = cvMat( 1, count, CV_32SC2, points_f->data.ptr );
        cvCvtSeqToArray( contour, points_f->data.ptr, CV_WHOLE_SEQ );
        cvConvert( &points_i, points_f );

        // Fits ellipse to current contour.
        box = cvFitEllipse2( points_f );

        // Draw current contour.
        cvDrawContours(image04,contour,CV_RGB(255,255,255),CV_RGB(255,255,255),0,1,8,cvPoint(0,0));

        // Convert ellipse data from float to integer representation.
        center = cvPointFrom32f(box.center);
        size.width = cvRound(box.size.width*0.5);
        size.height = cvRound(box.size.height*0.5);

        // Draw ellipse.
        cvEllipse(image04, center, size,
                  -box.angle, 0, 360,
                  CV_RGB(0,0,255), 1, CV_AA, 0);

        cvReleaseMat(&points_f);
    }

    // Show image. HighGUI use.
    cvShowImage( "Result", image04 );
}
コード例 #10
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::operator =	( ofxCvFloatImage& mom ) {
	if( mom.width == width && mom.height == height ) {
		//cvCopy(mom.getCvImage(), cvImage, 0);
		//cvConvertScale( mom.getCvImage(), cvImage, 1, 0 );
		cvConvert( mom.getCvImage(), cvImage ); // same as above but optimized
	} else {
        cout << "error in =, images are different sizes" << endl;
	}
}
コード例 #11
0
ファイル: transfer_svhn_dram.cpp プロジェクト: liangfu/dnn
CvMat * icvReadSVHNImages(char * filename, CvMat * response)
{
  const int max_samples = response->rows;
  const int max_strlen = 64*64;
  CvMat * data = cvCreateMat(max_samples,max_strlen,CV_32F);
  char datastr[1000];
  int ii; CvMat hdr;
  CvMat * sample = cvCreateMat(64,64,CV_32F);
  CvMat * warp = cvCreateMat(2,3,CV_32F);
  for (ii=0;;ii++){
    sprintf(datastr,filename,ii+1);
    IplImage * img = cvLoadImage(datastr, CV_LOAD_IMAGE_GRAYSCALE);
    if (!img || ii==max_samples){break;}
    CvMat * mat = cvCreateMat(img->height,img->width,CV_32F);
    cvConvert(img,mat);
    int nimages = CV_MAT_ELEM(*response,int,ii,0);
    float ww = CV_MAT_ELEM(*response,int,ii,3);
    float hh = CV_MAT_ELEM(*response,int,ii,4);
    float xx = CV_MAT_ELEM(*response,int,ii,1)-ww*.5f;
    float yy = CV_MAT_ELEM(*response,int,ii,2)-hh*.5f;
    for (int jj=1;jj<nimages;jj++){
      float www = CV_MAT_ELEM(*response,int,ii,1+4*jj+2);
      float hhh = CV_MAT_ELEM(*response,int,ii,1+4*jj+3);
      float xxx = CV_MAT_ELEM(*response,int,ii,1+4*jj+0)-www*.5f;
      float yyy = CV_MAT_ELEM(*response,int,ii,1+4*jj+1)-hhh*.5f;
      xx = MIN(xx,xxx); yy = MIN(yy,yyy);
      ww = MAX(xxx+www-xx,ww);
      hh = MAX(yyy+hhh-yy,hh);
    }
    xx+=ww*.5f;yy+=hh*.5f; float ss = MAX(ww,hh);
    cvZero(warp);cvZero(sample);
    warp->data.fl[2]=xx-ss*.5f-ss*.15f;
    warp->data.fl[5]=yy-ss*.5f-ss*.15f;
    warp->data.fl[0]=warp->data.fl[4]=ss*1.3f/64.f; //
    icvWarp(mat,sample,warp);
    CvScalar avg,sdv;
    cvAvgSdv(sample,&avg,&sdv);
    cvSubS(sample,avg,sample);
    cvScale(sample,sample,1.f/sdv.val[0]);
#if 0 // debug
    cvPrintf(stderr,"%f,",warp);
    cvAvgSdv(sample,&avg,&sdv); // re-calculate
    fprintf(stderr,"avg: %f, sdv: %f\n--\n",avg.val[0],sdv.val[0]);
    CV_SHOW(sample); // CV_SHOW(mat);
#endif
    memcpy(data->data.fl+max_strlen*ii,sample->data.ptr,max_strlen*sizeof(float));
    cvReleaseImage(&img);
    cvReleaseMat(&mat);
  }
  data->rows = ii;
  cvReleaseMat(&sample);
  cvReleaseMat(&warp);
  assert(CV_MAT_TYPE(data->type)==CV_32F);
  return data;
}
コード例 #12
0
//--------------------------------------------------------------------------------
void setfilter::operator = ( const ofxCvFloatImage& _mom ) {
    
	//-- for using a floatimage --//

    ofxCvFloatImage& mom = const_cast<ofxCvFloatImage&>(_mom); 
	
	if( matchingROI(getROI(), mom.getROI()) ) {
        cvConvert( mom.getCvImage(), cvImage );        
        flagImageChanged();
	} 
	else { ofLog(OF_LOG_ERROR, "in =, ROI mismatch"); }
}
コード例 #13
0
ファイル: CPUImageFilter.cpp プロジェクト: jr-weber/BigBlobby
//--------------------------------------------------------------------------------
void CPUImageFilter::operator = ( const ofxCvFloatImage& _mom ) {
    // cast non-const,  no worries, we will reverse any chages
    ofxCvFloatImage& mom = const_cast<ofxCvFloatImage&>(_mom); 
	if( pushSetBothToTheirIntersectionROI(*this,mom) ) {
		//cvConvertScale( mom.getCvImage(), cvImage, 1.0f, 0);
        cvConvert( mom.getCvImage(), cvImage );
        popROI();       //restore prevoius ROI
        mom.popROI();   //restore prevoius ROI          
        flagImageChanged();
	} else {
        ofLog(OF_LOG_ERROR, "in =, ROI mismatch");
	}
}
コード例 #14
0
UINT WINAPI
//DWORD WINAPI
#elif defined(POSIX_SYS)
// using pthread
void *
#endif
	ChessRecognition::LineSearchThread(
#if defined(WINDOWS_SYS)
	LPVOID
#elif defined(POSIX_SYS)
	void *
#endif
	Param) {
	// mode 2를 통하여 chessboard recognition을 수행하기 위한 thread 함수.
	ChessRecognition *_TChessRecognition = (ChessRecognition *)Param;
	_TChessRecognition->_LineSearchBased = new LineSearchBased();

	IplImage *_TGray = cvCreateImage(cvSize(_TChessRecognition->_Width, _TChessRecognition->_Height), IPL_DEPTH_8U, 1);

	while (_TChessRecognition->_EnableThread != false) {
		// 연산에 필요한 이미지를 main으로부터 복사해 옴.
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.lock();
		if (_TChessRecognition->_ChessBoardDetectionInternalImage->nChannels != 1)
			cvConvert(_TChessRecognition->_ChessBoardDetectionInternalImage, _TGray);
		else
			cvCopy(_TChessRecognition->_ChessBoardDetectionInternalImage, _TGray);
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.unlock();

		// 복사한 이미지를 실제 연산에 사용.
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.lock();
		_TChessRecognition->_LineSearchBased->ChessLineSearchProcess(_TGray, &_TChessRecognition->_CP);
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.unlock();

		Sleep(2);
	}

	// 이미지 할당 해제.
	cvReleaseImage(&_TGray);
	//cvReleaseImage(&img_process);
	delete _TChessRecognition->_LineSearchBased;

#if defined(WINDOWS_SYS)
	_endthread();
#elif defined(POSIX_SYS)

#endif
	_TChessRecognition->_EndThread = true;
	return 0;
}
コード例 #15
0
ファイル: xform.c プロジェクト: BreezeLee/LibSift
/*
Calculates a least-squares planar homography from point correspondeces.
@param pts array of points
@param mpts array of corresponding points; each pts[i], i=0..n-1, corresponds to mpts[i]
@param n number of points in both pts and mpts; must be at least 4
@return Returns the 3 x 3 least-squares planar homography matrix that
	transforms points in pts to their corresponding points in mpts or NULL if
	fewer than 4 correspondences were provided
*/
CvMat* lsq_homog( CvPoint2D64f* pts, CvPoint2D64f* mpts, int n )
{
	CvMat* H, * A, * B, X;
    double x[9];//数组x中的元素就是变换矩阵H中的值
	int i;

    //输入点对个数不够4
	if( n < 4 )
	{
		fprintf( stderr, "Warning: too few points in lsq_homog(), %s line %d\n",
			__FILE__, __LINE__ );
		return NULL;
	}

    //将变换矩阵H展开到一个8维列向量X中,使得AX=B,这样只需一次解线性方程组即可求出X,然后再根据X恢复H
	/* set up matrices so we can unstack homography into X; AX = B */
    A = cvCreateMat( 2*n, 8, CV_64FC1 );//创建2n*8的矩阵,一般是8*8
    B = cvCreateMat( 2*n, 1, CV_64FC1 );//创建2n*1的矩阵,一般是8*1
    X = cvMat( 8, 1, CV_64FC1, x );//创建8*1的矩阵,指定数据为x
    H = cvCreateMat(3, 3, CV_64FC1);//创建3*3的矩阵
    cvZero( A );//将A清零

    //由于是展开计算,需要根据原来的矩阵计算法则重新分配矩阵A和B的值的排列
	for( i = 0; i < n; i++ )
	{
        cvmSet( A, i, 0, pts[i].x );//设置矩阵A的i行0列的值为pts[i].x
		cvmSet( A, i+n, 3, pts[i].x );
		cvmSet( A, i, 1, pts[i].y );
		cvmSet( A, i+n, 4, pts[i].y );
		cvmSet( A, i, 2, 1.0 );
		cvmSet( A, i+n, 5, 1.0 );
		cvmSet( A, i, 6, -pts[i].x * mpts[i].x );
		cvmSet( A, i, 7, -pts[i].y * mpts[i].x );
		cvmSet( A, i+n, 6, -pts[i].x * mpts[i].y );
		cvmSet( A, i+n, 7, -pts[i].y * mpts[i].y );
		cvmSet( B, i, 0, mpts[i].x );
		cvmSet( B, i+n, 0, mpts[i].y );
	}

    //调用OpenCV函数,解线性方程组
    cvSolve( A, B, &X, CV_SVD );//求X,使得AX=B
    x[8] = 1.0;//变换矩阵的[3][3]位置的值为固定值1
	X = cvMat( 3, 3, CV_64FC1, x );
    cvConvert( &X, H );//将数组转换为矩阵

	cvReleaseMat( &A );
	cvReleaseMat( &B );
	return H;
}
コード例 #16
0
ファイル: average.cpp プロジェクト: mvanderkolff/navi-misc
int main() {
  IplImage *frame, *accumulator=NULL;

  cv_dc1394_init();

  while (cv_sdl_process_events()) {
    frame = cv_dc1394_capture_yuv(1)[0];

    if (!accumulator)
      accumulator = cvCreateImage(cvGetSize(frame), 32, 3);

    cvRunningAvg(frame, accumulator, 0.1);
    cvConvert(accumulator, frame);

    cv_sdl_show_yuv_image(frame);
  }
}
コード例 #17
0
ファイル: featurepyramid.cpp プロジェクト: outernets/opencv
/*
// Getting feature pyramid
//
// API
// int getFeaturePyramid(IplImage * image, const filterObject **all_F,
                      const int n_f,
                      const int lambda, const int k,
                      const int startX, const int startY,
                      const int W, const int H, featurePyramid **maps);
// INPUT
// image             - image
// OUTPUT
// maps              - feature maps for all levels
// RESULT
// Error status
*/
int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
{
    IplImage *imgResize;
    float step;
    int   numStep;
    int   maxNumCells;
    int   W, H;

    if(image->depth == IPL_DEPTH_32F)
    {
        imgResize = image;
    }
    else
    {
        imgResize = cvCreateImage(cvSize(image->width , image->height) ,
                                  IPL_DEPTH_32F , 3);
        cvConvert(image, imgResize);
    }

    W = imgResize->width;
    H = imgResize->height;

    step = powf(2.0f, 1.0f / ((float)LAMBDA));
    maxNumCells = W / SIDE_LENGTH;
    if( maxNumCells > H / SIDE_LENGTH )
    {
        maxNumCells = H / SIDE_LENGTH;
    }
    numStep = (int)(logf((float) maxNumCells / (5.0f)) / logf( step )) + 1;

    allocFeaturePyramidObject(maps, numStep + LAMBDA);

    getPathOfFeaturePyramid(imgResize, step   , LAMBDA, 0,
                            SIDE_LENGTH / 2, maps);
    getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA,
                            SIDE_LENGTH    , maps);

    if(image->depth != IPL_DEPTH_32F)
    {
        cvReleaseImage(&imgResize);
    }

    return LATENT_SVM_OK;
}
コード例 #18
0
ファイル: xform.c プロジェクト: BreezeLee/LibSift
/*
Calculates a planar homography from point correspondeces using the direct
linear transform.  Intended for use as a ransac_xform_fn.
  
@param pts array of points
@param mpts array of corresponding points; each pts[i], i=0..n-1,
  corresponds to mpts[i]
@param n number of points in both pts and mpts; must be at least 4
  
@return Returns the 3x3 planar homography matrix that transforms points
  in pts to their corresponding points in mpts or NULL if fewer than 4
  correspondences were provided
*/
CvMat* dlt_homog( CvPoint2D64f* pts, CvPoint2D64f* mpts, int n )
{
	CvMat* H, * A, * VT, * D, h, v9;
	double _h[9];
	int i;

	if( n < 4 )
	    return NULL;

	/* set up matrices so we can unstack homography into h; Ah = 0 */
	A = cvCreateMat( 2*n, 9, CV_64FC1 );
	cvZero( A );
	for( i = 0; i < n; i++ )
	{
		cvmSet( A, 2*i, 3, -pts[i].x );
		cvmSet( A, 2*i, 4, -pts[i].y );
		cvmSet( A, 2*i, 5, -1.0  );
		cvmSet( A, 2*i, 6, mpts[i].y * pts[i].x );
		cvmSet( A, 2*i, 7, mpts[i].y * pts[i].y );
		cvmSet( A, 2*i, 8, mpts[i].y );
		cvmSet( A, 2*i+1, 0, pts[i].x );
		cvmSet( A, 2*i+1, 1, pts[i].y );
		cvmSet( A, 2*i+1, 2, 1.0  );
		cvmSet( A, 2*i+1, 6, -mpts[i].x * pts[i].x );
		cvmSet( A, 2*i+1, 7, -mpts[i].x * pts[i].y );
		cvmSet( A, 2*i+1, 8, -mpts[i].x );
	}
	D = cvCreateMat( 9, 9, CV_64FC1 );
	VT = cvCreateMat( 9, 9, CV_64FC1 );
	cvSVD( A, D, NULL, VT, CV_SVD_MODIFY_A + CV_SVD_V_T );
	v9 = cvMat( 1, 9, CV_64FC1, NULL );
	cvGetRow( VT, &v9, 8 );
	h = cvMat( 1, 9, CV_64FC1, _h );
	cvCopy( &v9, &h, NULL );
	h = cvMat( 3, 3, CV_64FC1, _h );
	H = cvCreateMat( 3, 3, CV_64FC1 );
	cvConvert( &h, H );

	cvReleaseMat( &A );
	cvReleaseMat( &D );
	cvReleaseMat( &VT );
	return H;
}
コード例 #19
0
ファイル: main.cpp プロジェクト: lihw/lensflare
static 
void ShowResult()
{
    switch (g_effectId)
    {
        case EFFECT01: cvConvert(g_pEffect01->GetResult(), g_pImage); break;
        case EFFECT02: cvConvert(g_pEffect02->GetResult(), g_pImage); break;
        case EFFECT03: cvConvert(g_pEffect03->GetResult(), g_pImage); break;
        case EFFECT05: cvConvert(g_pEffect05->GetResult(), g_pImage); break;
        case EFFECT09: cvConvert(g_pEffect09->GetResult(), g_pImage); break;
        case EFFECT10: cvConvert(g_pEffect10->GetResult(), g_pImage); break;
        case EFFECT15: cvConvert(g_pEffect15->GetResult(), g_pImage); break;
        case EFFECT19: cvConvert(g_pEffect19->GetResult(), g_pImage); break;
        default:
            cvZero(g_pImage);
            fprintf(stderr, "Err: Not available!\n");
            break;
    }

    cvShowImage("Lens Flare", g_pImage);
}
コード例 #20
0
ファイル: xform.cpp プロジェクト: Mizutome/Cpp-SIFTICGM
/*
  Calculates a least-squares planar homography from point correspondeces.
  
  @param pts array of points
  @param mpts array of corresponding points; each pts[i], i=1..n, corresponds
    to mpts[i]
  @param n number of points in both pts and mpts; must be at least 4
  
  @return Returns the 3 x 3 least-squares planar homography matrix that
    transforms points in pts to their corresponding points in mpts or NULL if
    fewer than 4 correspondences were provided
*/
CvMat* lsq_homog( CvPoint2D64f* pts, CvPoint2D64f* mpts, int n )
{
  CvMat* H, * A, * B, X;
  double x[9];
  int i;

  if( n < 4 )
    {
      fprintf( stderr, "Warning: too few points in lsq_homog(), %s line %d\n",
	       __FILE__, __LINE__ );
      return NULL;
    }

  /* set up matrices so we can unstack homography into X; AX = B */
  A = cvCreateMat( 2*n, 8, CV_64FC1 );
  B = cvCreateMat( 2*n, 1, CV_64FC1 );
  X = cvMat( 8, 1, CV_64FC1, x );
  H = cvCreateMat(3, 3, CV_64FC1);
  cvZero( A );
  for( i = 0; i < n; i++ )
    {
      cvmSet( A, i, 0, pts[i].x );
      cvmSet( A, i+n, 3, pts[i].x );
      cvmSet( A, i, 1, pts[i].y );
      cvmSet( A, i+n, 4, pts[i].y );
      cvmSet( A, i, 2, 1.0 );
      cvmSet( A, i+n, 5, 1.0 );
      cvmSet( A, i, 6, -pts[i].x * mpts[i].x );
      cvmSet( A, i, 7, -pts[i].y * mpts[i].x );
      cvmSet( A, i+n, 6, -pts[i].x * mpts[i].y );
      cvmSet( A, i+n, 7, -pts[i].y * mpts[i].y );
      cvmSet( B, i, 0, mpts[i].x );
      cvmSet( B, i+n, 0, mpts[i].y );
    }
  cvSolve( A, B, &X, CV_SVD );
  x[8] = 1.0;
  X = cvMat( 3, 3, CV_64FC1, x );
  cvConvert( &X, H );

  cvReleaseMat( &A );
  cvReleaseMat( &B );
  return H;
}
コード例 #21
0
CvMat* myCvGetRotationMatrix(CvPoint2D32f center, CvMat* matrix)
{
    double m[2][3];
    CvMat M =cvMat(2,3,CV_64FC1, m);
    double alpha,beta;
    double angle=CV_PI;
    alpha= cos(angle);
    beta= sin(angle);
    m[0][0]=alpha;
    m[0][1]=beta;
    m[0][2]=(1-alpha)*center.x - beta*center.y;
    m[1][0]=-beta;
    m[1][1]=alpha;
    m[1][2]=beta*center.x + (1-alpha)* center.y;
    
    cvConvert(&M,matrix);
    
    return matrix;
}
コード例 #22
0
ファイル: ClothResizer.cpp プロジェクト: umutgultepe/Thesis
bool optimizeDepthMap()
{	
	cvErode(uImage,uImage,0,2);		//Smoothen the User Map as well
	cvDilate(uImage,uImage,0,2);
	CvScalar depthMean=cvAvg(dImage,uImage);							//Get teh Average Depth Value of the User Pixels
	cvNot(uImage,uImage);												//Invert the user pixels to paint the rest of the image with average user depth									 
	//viewImage(dImage);
	cvSet(dImage,depthMean,uImage);										 
	IplImage* tempImage=cvCreateImage(dSize,IPL_DEPTH_8U,1);
	cvConvertScale(dImage,tempImage,1.0/256);
	cvSmooth(tempImage,tempImage,CV_GAUSSIAN,7);//Perform Gaussian Smoothing, depth map is optimized.
	cvConvert(tempImage,dImage);
	cvScale(dImage,dImage,256);
	cvSet(dImage,cvScalar(0),uImage);	
	//viewImage(dImage);
	//cvSmooth(dImage,dImage,CV_GAUSSIAN,gaussian_m,gaussian_n,gaussian_e);//Perform Gaussian Smoothing, depth map is optimized.
	cvNot(uImage,uImage);
	cvReleaseImage(&tempImage);
	return true;
}
コード例 #23
0
ファイル: Diff2.cpp プロジェクト: KingBing/OpenCV_OLD
int diff2_main( int argc, char** argv )
{
	//声明IplImage指针
	IplImage* pFrame = NULL; 
	IplImage* pFrImg = NULL;
	IplImage* pBkImg = NULL;

	CvMat* pFrameMat = NULL;
	CvMat* pFrMat = NULL;
	CvMat* pBkMat = NULL;

	CvCapture* pCapture = NULL;

	int nFrmNum = 0;

	//创建窗口
	cvNamedWindow("video", 1);
	cvNamedWindow("background",1);
	cvNamedWindow("foreground",1);
	//使窗口有序排列
	cvMoveWindow("video", 30, 0);
	cvMoveWindow("background", 360, 0);
	cvMoveWindow("foreground", 690, 0);


	if( !(pCapture = cvCaptureFromAVI("bike.avi")))
	{
		//pCapture = cvCaptureFromCAM(-1))
		fprintf(stderr, "Can not open camera.\n");
		return -2;
	}


	//逐帧读取视频
	while(pFrame = cvQueryFrame( pCapture ))
	{
		nFrmNum++;

		//如果是第一帧,需要申请内存,并初始化
		if(nFrmNum == 1)
		{
			pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1);
			pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1);

			pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
			pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
			pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);

			//转化成单通道图像再处理
			cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
			cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);

			cvConvert(pFrImg, pFrameMat);
			cvConvert(pFrImg, pFrMat);
			cvConvert(pFrImg, pBkMat);
		}
		else
		{
			cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
			cvConvert(pFrImg, pFrameMat);
			//高斯滤波先,以平滑图像
			//cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);

			//当前帧跟背景图相减
			cvAbsDiff(pFrameMat, pBkMat, pFrMat);

			//二值化前景图
			cvThreshold(pFrMat, pFrImg, 60, 255.0, CV_THRESH_BINARY);

			//进行形态学滤波,去掉噪音  
			//cvErode(pFrImg, pFrImg, 0, 1);
			//cvDilate(pFrImg, pFrImg, 0, 1);

			//更新背景
			cvRunningAvg(pFrameMat, pBkMat, 0.003, 0);
			//将背景转化为图像格式,用以显示
			cvConvert(pBkMat, pBkImg);

			//显示图像
			cvShowImage("video", pFrame);
			cvShowImage("background", pBkImg);
			cvShowImage("foreground", pFrImg);

			//如果有按键事件,则跳出循环
			//此等待也为cvShowImage函数提供时间完成显示
			//等待时间可以根据CPU速度调整
			if( cvWaitKey(20) >= 0 )
			{
				break;
			}
		}
	}
	cvWaitKey();

	//销毁窗口
	cvDestroyWindow("video");
	cvDestroyWindow("background");
	cvDestroyWindow("foreground");

	//释放图像和矩阵
	cvReleaseImage(&pFrImg);
	cvReleaseImage(&pBkImg);

	cvReleaseMat(&pFrameMat);
	cvReleaseMat(&pFrMat);
	cvReleaseMat(&pBkMat);

	cvReleaseCapture(&pCapture);

	return 0;
}
コード例 #24
0
int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps,
        const int bx, const int by)
{
    IplImage *imgResize;
    float step;
    unsigned int numStep;
    unsigned int maxNumCells;
    unsigned int W, H;

    if (image->depth == IPL_DEPTH_32F)
    {
        imgResize = image;
    }
    else
    {
        imgResize = cvCreateImage(cvSize(image->width, image->height),
                IPL_DEPTH_32F, 3);
        cvConvert(image, imgResize);
    }

    W = imgResize->width;
    H = imgResize->height;

    step = powf(2.0f, 1.0f / ((float) Lambda));
    maxNumCells = W / Side_Length;
    if (maxNumCells > H / Side_Length)
    {
        maxNumCells = H / Side_Length;
    }
    numStep = (int) (logf((float) maxNumCells / (5.0f)) / logf(step)) + 1;

    allocFeaturePyramidObject(maps, numStep + Lambda);

#ifdef PROFILE
    TickMeter tm;

    tm.start();
    cout << "(featurepyramid.cpp)getPathOfFeaturePyramid START " << endl;
#endif

    uploadImageToGPU1D(imgResize);

    getPathOfFeaturePyramidGPUStream(imgResize, step , Lambda, 0,
            Side_Length / 2, bx, by, maps);

    getPathOfFeaturePyramidGPUStream(imgResize, step, numStep, Lambda,
            Side_Length , bx, by, maps);

    cleanImageFromGPU1D();

#ifdef PROFILE
    tm.stop();
    cout << "(featurepyramid.cpp)getPathOfFeaturePyramid END time = "
            << tm.getTimeSec() << " sec" << endl;
#endif

    if (image->depth != IPL_DEPTH_32F)
    {
        cvReleaseImage(&imgResize);
    }

    return LATENT_SVM_OK;
}
コード例 #25
0
ファイル: cvtcolor.cpp プロジェクト: acbalingit/scilab-sivp
int int_cvtcolor(char *fname)
{
  char * cvt_str_code = NULL; //input string code
  int cvt_code; //opencv cvt_color code

  IplImage * src_image = NULL;
  IplImage * src_f32_image = NULL;
  IplImage * dst_f32_image = NULL;
  IplImage * dst_f64_image = NULL;

  int m2, n2, l2;

  //check the number of input and output parameters
  CheckRhs(2,2);
  CheckLhs(1,1);

  GetRhsVar(2, "c", &m2, &n2, &l2);
  cvt_str_code = cstk(l2);

  //load the input image
  src_image = Mat2IplImg(1);

  // check if input image is correctly loaded
  if(src_image == NULL)
    {
      sciprint("%s error: can't load the input image.\r\n", fname);
      return -1;
    }
  //check the number of channels
  if(src_image->nChannels != 3)
    {
      sciprint("%s error: The input image must be 3-channel image.\r\n", fname);
      cvReleaseImage(&src_image);
      return -1;
    }
  //this function is called
  //from scilab macros rgb2hsv/hsv2rgb/...
  //the input image can only be double float.
  if(src_image->depth != IPL_DEPTH_64F)
    {
      sciprint("%s error: The input image must be double type.\r\n", fname);
      cvReleaseImage(&src_image);
      return -1;
    }

  if( strcmp(cvt_str_code, "rgb2hsv") == 0)
    cvt_code = CV_BGR2HSV; //the channel order of IplImage in SIVP is BGR
  else if( strcmp(cvt_str_code, "hsv2rgb") == 0)
    cvt_code = CV_HSV2BGR;
  else if( strcmp(cvt_str_code, "rgb2ycrcb") == 0)
    cvt_code = CV_BGR2YCrCb;
  else if( strcmp(cvt_str_code, "ycrcb2rgb") == 0)
    cvt_code = CV_YCrCb2BGR;
  else
    {
      sciprint("%s error: unsupport convertion code %s.\r\n", fname, cvt_str_code);
      cvReleaseImage(&src_image);
      return -1;
    }
  //create images needed
  src_f32_image = cvCreateImage(cvGetSize(src_image),
        IPL_DEPTH_32F,
        3);
  dst_f32_image = cvCreateImage(cvGetSize(src_image),
        IPL_DEPTH_32F,
        3);
  dst_f64_image = cvCreateImage(cvGetSize(src_image),
        IPL_DEPTH_64F,
        3);

  if(src_f32_image == NULL ||
     dst_f32_image == NULL ||
     dst_f64_image == NULL)
    {
      sciprint("Error: create image error.\r\n");
      cvReleaseImage(&src_image);
      cvReleaseImage(&src_f32_image);
      cvReleaseImage(&dst_f32_image);
      cvReleaseImage(&dst_f64_image);
      return -1;
    }

  cvConvert(src_image, src_f32_image);

  cvCvtColor(src_f32_image, dst_f32_image, cvt_code);

  cvConvert(dst_f32_image, dst_f64_image);

  IplImg2Mat(dst_f64_image, 3);
  //send the result
  LhsVar(1)=3;

  cvReleaseImage(&src_image);
  cvReleaseImage(&src_f32_image);
  cvReleaseImage(&dst_f32_image);
  cvReleaseImage(&dst_f64_image);

  return 0;
}
コード例 #26
0
int main()
{
	/*读配置文件*/
	FILE* fp;
	char readin[100],file[100];
	int value1=0, value2=0, value3=0,value4=0,value5=0;
	fopen_s(&fp, "configure/configure.txt", "r");
	fscanf_s(fp, "video: %s", readin, sizeof(readin));
	sprintf(file, readin);
	memset(readin, 0, sizeof(readin));
	fgets(readin, sizeof(readin),fp);
	fscanf_s(fp, "value1: %d", &value1, sizeof(int));
	memset(readin, 0, sizeof(readin));
	fgets(readin, sizeof(readin), fp);
	fscanf_s(fp, "value2: %d", &value2, sizeof(int));
	memset(readin, 0, sizeof(readin));
	fgets(readin, sizeof(readin), fp);;
	fscanf_s(fp, "value3: %d", &value3, sizeof(int));
	memset(readin, 0, sizeof(readin));
	fgets(readin, sizeof(readin), fp);
	fscanf_s(fp, "value4: %d", &value4, sizeof(int));
	memset(readin, 0, sizeof(readin));
	fgets(readin, sizeof(readin), fp);
	fscanf_s(fp, "value5: %d", &value5, sizeof(int));
	printf("%s %d %d %d\n", file,value1,value2,value3);


	/*声明IplImage指针*/
	IplImage *image0 = NULL;		//原始帧
	IplImage *image = NULL;			//当前帧
	IplImage *image_pass = NULL;	//上一帧
	IplImage *res = NULL;			//帧差
	IplImage *res0 = NULL;			//帧差
	IplImage *pFrame = NULL;
	IplImage *pFrImg = NULL;
	IplImage *pBkImg = NULL;

	/*声明CvMat指针*/
	CvMat* pFrameMat = NULL;
	CvMat* pFrMat = NULL;
	CvMat* pBkMat = NULL;
	CvMat* IndexMat = NULL;


	/*声明caputer指针*/
	CvCapture *capture = NULL;
	capture = cvCaptureFromFile(file);
	image0 = cvQueryFrame(capture);
	nFrmNum++;

	// 创建窗口
	//cvNamedWindow("video", 1);
	cvNamedWindow("background", 1);
	cvNamedWindow("tracking", 1);

	// 排列窗口
	//cvMoveWindow("video", 30, 80);
	cvMoveWindow("background",10, 100);
	cvMoveWindow("tracking", 660, 100);

	/*移动物体的队列*/
	movingObject *curr_head = new movingObject();
	movingObject *prev_head = new  movingObject();
	movingObject *p_obj = new movingObject();
	movingObject *share_head = new movingObject();
	curr_head->next = NULL;
	curr_head->share_next = NULL;
	prev_head->next = NULL;
	prev_head->share_next = NULL;
	share_head->next = NULL;

	CvScalar color[7] = { { 0, 0, 0 }, { 0, 255, 0 }, {255, 0, 0 }, { 255, 201, 14 }, { 255, 0, 255 }, { 0, 166, 0 }, {121,255,121} };

	image = cvCreateImage(cvSize(640,360), IPL_DEPTH_8U, 3);
	cvResize(image0, image, CV_INTER_LINEAR);

	int image_width = image->width;
	int image_height = image->height;

	image_pass = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 3);
	res = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 1);
	res0 = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 3);
	pBkImg = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 1);
	pFrImg = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 1);
	pFrame = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 1);

	pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
	pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
	pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
	IndexMat = cvCreateMat(image_height, image_width, CV_32FC1);

	cvCopy(image, image_pass, NULL);


	/*背景*/
	cvCvtColor(image, pBkImg, CV_BGR2GRAY);
	cvConvert(pBkImg, pBkMat);

	while (1)
	{
		image0 = cvQueryFrame(capture);
		if (!image0) break;	

		cvResize(image0, image, CV_INTER_LINEAR);


		/*高斯平滑*/
		cvSmooth(image, image, CV_GAUSSIAN, 3, 0, 0);

		/*运动目标检测*/
		detect_object(image, pBkImg, pFrImg, pFrameMat, pBkMat, pFrMat,value1);

		/*帧差法优化*/
		frame_dif(image, image_pass, res,res0, pFrImg,pFrame,value2);

		cvConvert(pFrame, pFrMat);

		/*计算连通区域和质心*/
		computeObject(pFrMat, image_width, image_height,IndexMat, curr_head,value3);

		/*画出质心*/
		p_obj = curr_head;
		while (p_obj->next != NULL)
		{
			p_obj = p_obj->next;
			cvRectangle(image, cvPoint(p_obj->x - 1, p_obj->y - 1), cvPoint(p_obj->x + 1, p_obj->y + 1), cvScalar(0, 0, 255), 2, 8, 0);
		}


		if (nFrmNum == 2)
		{
			movingObject* q = NULL;
			computeObject(pFrMat, image_width, image_height, IndexMat, prev_head,value3);

		}
		/*画出跟踪框*/
		if (nFrmNum > 2)
		{
			movingObject *q = NULL, *p = NULL;
			movingObject *last=curr_head;

			for (q = curr_head->next; q; )
			{
				int close = 0;
 				share_head->share_next = NULL;
  				for (p = prev_head->next; p; p = p->next)
				{
					int dist = cal_dist(p->x, p->y, q->x, q->y);
					if (dist <= value5)
					{
						close++;
						p->share_next = share_head->share_next; 
						share_head->share_next= p;
					}

				}

				if (close == 1)
				{
					if (share_head->share_next->track == 1)	//已被用,删掉当前结点
					{
						last->next = q->next;
						movingObject* t=q;
						q = q->next;
						delete t;
						continue;
					}
					q->label = (share_head->share_next)->label;
					cvRectangle(image, q->points[0], q->points[1], color[q->label],2);
					q->move = q->x - (share_head->share_next)->x;	//两帧位移
					share_head->share_next->track = 1;
					q->track = 0; 
					q->keeptime = 0;
					last = q;
					q = q->next;
				}
				else if (close==0)
				{
					//生成新标签
					q->label = ++label_num;
					cvRectangle(image, q->points[0], q->points[1], color[q->label],2);
					q->track = 0;
					q->keeptime = 0;
					last = q;
					q = q->next;
				}
				else if (close>1)
				{
					movingObject* t = share_head->share_next;
					while ( t != NULL)
					{
						if (t->track==1)
						t = t->share_next;
						else break;
					}
					if (t==NULL)	//全部跟踪完毕
					{
						last->next = q->next;
						t = q;
						q = q->next;
						delete t;
						continue;
					}

					//重用当前队列的这一object
					q->label = t->label;
					q->move = t->move;
					q->area = t->area;
					q->x = t->x + t->move;
					q->y = t->y;
					q->points[0].x = t->points[0].x+t->move;
					q->points[0].y = t->points[0].y;
					q->points[1].x = t->points[1].x + t->move;
					q->points[1].y = t->points[1].y;
					q->track = 0;
					t->track = 1;
					q->keeptime = 0;
					cvRectangle(image, q->points[0], q->points[1], color[q->label],2);

					t = t->share_next;
					while (t)
					{
						if (t->track == 1)
						{
							t = t->share_next;
							continue;
						}
						movingObject* newobject = new movingObject();
						newobject->area = t->area;
						newobject->label = t->label;
						newobject->move = t->move;
						newobject->next = q->next;
						q->next = newobject;
						q = newobject;
						newobject->points[0].x = t->points[0].x + t->move;
						newobject->points[0].y = t->points[0].y;
						newobject->points[1].x = t->points[1].x + t->move;
						newobject->points[1].y = t->points[1].y;
						newobject->x = t->x + t->move;
						newobject->y = t->y;
						newobject->track = 0;
						newobject->keeptime = 0;
						cvRectangle(image, newobject->points[0], newobject->points[1], color[newobject->label],2);
						t->track = 1;			//已跟踪这一个prev目标
						t = t->share_next;
					}
					last = q;
					q = q->next;

				}

			}//end for
			detect_hiding(prev_head, image_width, curr_head,value4);
		}//end if
		
		cvShowImage("tracking", image);
		release_link(prev_head,share_head);
		prev_head->next = curr_head->next;
		curr_head->next = NULL;
		
		int ctrl;
		if ( (ctrl=cvWaitKey(20)) == 27)//ESC退出
		{
			break;
		}
		else if (ctrl == 32)	//空格暂停
		{
			while ((ctrl=cvWaitKey(0))!=13)//回车继续
			{
				if (ctrl == 27)
				exit(0);
				continue;
			}

			
		}
	}

	cvReleaseImage(&res);
	cvReleaseImage(&res0);
	cvReleaseImage(&image_pass);
	cvReleaseImage(&pBkImg);
	cvReleaseImage(&pFrImg);
	cvReleaseImage(&pFrame);
	cvReleaseCapture(&capture);

	return 1;
}
コード例 #27
0
ファイル: mlem.cpp プロジェクト: glo/ee384b
void CvEM::kmeans( const CvVectors& train_data, int nclusters, CvMat* labels,
                   CvTermCriteria termcrit, const CvMat* centers0 )
{
    CvMat* centers = 0;
    CvMat* old_centers = 0;
    CvMat* counters = 0;

    CV_FUNCNAME( "CvEM::kmeans" );

    __BEGIN__;

    CvRNG rng = cvRNG(-1);
    int i, j, k, nsamples, dims;
    int iter = 0;
    double max_dist = DBL_MAX;

    termcrit = cvCheckTermCriteria( termcrit, 1e-6, 100 );
    termcrit.epsilon *= termcrit.epsilon;
    nsamples = train_data.count;
    dims = train_data.dims;
    nclusters = MIN( nclusters, nsamples );

    CV_CALL( centers = cvCreateMat( nclusters, dims, CV_64FC1 ));
    CV_CALL( old_centers = cvCreateMat( nclusters, dims, CV_64FC1 ));
    CV_CALL( counters = cvCreateMat( 1, nclusters, CV_32SC1 ));
    cvZero( old_centers );

    if( centers0 )
    {
        CV_CALL( cvConvert( centers0, centers ));
    }
    else
    {
        for( i = 0; i < nsamples; i++ )
            labels->data.i[i] = i*nclusters/nsamples;
        cvRandShuffle( labels, &rng );
    }

    for( ;; )
    {
        CvMat* temp;

        if( iter > 0 || centers0 )
        {
            for( i = 0; i < nsamples; i++ )
            {
                const float* s = train_data.data.fl[i];
                int k_best = 0;
                double min_dist = DBL_MAX;

                for( k = 0; k < nclusters; k++ )
                {
                    const double* c = (double*)(centers->data.ptr + k*centers->step);
                    double dist = 0;

                    for( j = 0; j <= dims - 4; j += 4 )
                    {
                        double t0 = c[j] - s[j];
                        double t1 = c[j+1] - s[j+1];
                        dist += t0*t0 + t1*t1;
                        t0 = c[j+2] - s[j+2];
                        t1 = c[j+3] - s[j+3];
                        dist += t0*t0 + t1*t1;
                    }

                    for( ; j < dims; j++ )
                    {
                        double t = c[j] - s[j];
                        dist += t*t;
                    }

                    if( min_dist > dist )
                    {
                        min_dist = dist;
                        k_best = k;
                    }
                }

                labels->data.i[i] = k_best;
            }
        }

        if( ++iter > termcrit.max_iter )
            break;

        CV_SWAP( centers, old_centers, temp );
        cvZero( centers );
        cvZero( counters );

        // update centers
        for( i = 0; i < nsamples; i++ )
        {
            const float* s = train_data.data.fl[i];
            k = labels->data.i[i];
            double* c = (double*)(centers->data.ptr + k*centers->step);

            for( j = 0; j <= dims - 4; j += 4 )
            {
                double t0 = c[j] + s[j];
                double t1 = c[j+1] + s[j+1];

                c[j] = t0;
                c[j+1] = t1;

                t0 = c[j+2] + s[j+2];
                t1 = c[j+3] + s[j+3];

                c[j+2] = t0;
                c[j+3] = t1;
            }
            for( ; j < dims; j++ )
                c[j] += s[j];
            counters->data.i[k]++;
        }

        if( iter > 1 )
            max_dist = 0;

        for( k = 0; k < nclusters; k++ )
        {
            double* c = (double*)(centers->data.ptr + k*centers->step);
            if( counters->data.i[k] != 0 )
            {
                double scale = 1./counters->data.i[k];
                for( j = 0; j < dims; j++ )
                    c[j] *= scale;
            }
            else
            {
                const float* s;
                for( j = 0; j < 10; j++ )
                {
                    i = cvRandInt( &rng ) % nsamples;
                    if( counters->data.i[labels->data.i[i]] > 1 )
                        break;
                }
                s = train_data.data.fl[i];
                for( j = 0; j < dims; j++ )
                    c[j] = s[j];
            }

            if( iter > 1 )
            {
                double dist = 0;
                const double* c_o = (double*)(old_centers->data.ptr + k*old_centers->step);
                for( j = 0; j < dims; j++ )
                {
                    double t = c[j] - c_o[j];
                    dist += t*t;
                }
                if( max_dist < dist )
                    max_dist = dist;
            }
        }

        if( max_dist < termcrit.epsilon )
            break;
    }

    cvZero( counters );
    for( i = 0; i < nsamples; i++ )
        counters->data.i[labels->data.i[i]]++;

    // ensure that we do not have empty clusters
    for( k = 0; k < nclusters; k++ )
        if( counters->data.i[k] == 0 )
            for(;;)
            {
                i = cvRandInt(&rng) % nsamples;
                j = labels->data.i[i];
                if( counters->data.i[j] > 1 )
                {
                    labels->data.i[i] = k;
                    counters->data.i[j]--;
                    counters->data.i[k]++;
                    break;
                }
            }

    __END__;

    cvReleaseMat( &centers );
    cvReleaseMat( &old_centers );
    cvReleaseMat( &counters );
}
コード例 #28
0
ファイル: mlem.cpp プロジェクト: glo/ee384b
void CvEM::init_auto( const CvVectors& train_data )
{
    CvMat* hdr = 0;
    const void** vec = 0;
    CvMat* class_ranges = 0;
    CvMat* labels = 0;

    CV_FUNCNAME( "CvEM::init_auto" );

    __BEGIN__;

    int nclusters = params.nclusters, nsamples = train_data.count, dims = train_data.dims;
    int i, j;

    if( nclusters == nsamples )
    {
        CvMat src = cvMat( 1, dims, CV_32F );
        CvMat dst = cvMat( 1, dims, CV_64F );
        for( i = 0; i < nsamples; i++ )
        {
            src.data.ptr = train_data.data.ptr[i];
            dst.data.ptr = means->data.ptr + means->step*i;
            cvConvert( &src, &dst );
            cvZero( covs[i] );
            cvSetIdentity( cov_rotate_mats[i] );
        }
        cvSetIdentity( probs );
        cvSet( weights, cvScalar(1./nclusters) );
    }
    else
    {
        int max_count = 0;

        CV_CALL( class_ranges = cvCreateMat( 1, nclusters+1, CV_32SC1 ));
        if( nclusters > 1 )
        {
            CV_CALL( labels = cvCreateMat( 1, nsamples, CV_32SC1 ));
            kmeans( train_data, nclusters, labels, cvTermCriteria( CV_TERMCRIT_ITER,
                    params.means ? 1 : 10, 0.5 ), params.means );
            CV_CALL( cvSortSamplesByClasses( (const float**)train_data.data.fl,
                                            labels, class_ranges->data.i ));
        }
        else
        {
            class_ranges->data.i[0] = 0;
            class_ranges->data.i[1] = nsamples;
        }

        for( i = 0; i < nclusters; i++ )
        {
            int left = class_ranges->data.i[i], right = class_ranges->data.i[i+1];
            max_count = MAX( max_count, right - left );
        }
        CV_CALL( hdr = (CvMat*)cvAlloc( max_count*sizeof(hdr[0]) ));
        CV_CALL( vec = (const void**)cvAlloc( max_count*sizeof(vec[0]) ));
        hdr[0] = cvMat( 1, dims, CV_32F );
        for( i = 0; i < max_count; i++ )
        {
            vec[i] = hdr + i;
            hdr[i] = hdr[0];
        }

        for( i = 0; i < nclusters; i++ )
        {
            int left = class_ranges->data.i[i], right = class_ranges->data.i[i+1];
            int cluster_size = right - left;
            CvMat avg;

            if( cluster_size <= 0 )
                continue;

            for( j = left; j < right; j++ )
                hdr[j - left].data.fl = train_data.data.fl[j];

            CV_CALL( cvGetRow( means, &avg, i ));
            CV_CALL( cvCalcCovarMatrix( vec, cluster_size, covs[i],
                &avg, CV_COVAR_NORMAL | CV_COVAR_SCALE ));
            weights->data.db[i] = (double)cluster_size/(double)nsamples;
        }
    }

    __END__;

    cvReleaseMat( &class_ranges );
    cvReleaseMat( &labels );
    cvFree( &hdr );
    cvFree( &vec );
}
コード例 #29
0
ファイル: mlem.cpp プロジェクト: glo/ee384b
void CvEM::init_em( const CvVectors& train_data )
{
    CvMat *w = 0, *u = 0, *tcov = 0;

    CV_FUNCNAME( "CvEM::init_em" );

    __BEGIN__;

    double maxval = 0;
    int i, force_symm_plus = 0;
    int nclusters = params.nclusters, nsamples = train_data.count, dims = train_data.dims;

    if( params.start_step == START_AUTO_STEP || nclusters == 1 || nclusters == nsamples )
        init_auto( train_data );
    else if( params.start_step == START_M_STEP )
    {
        for( i = 0; i < nsamples; i++ )
        {
            CvMat prob;
            cvGetRow( params.probs, &prob, i );
            cvMaxS( &prob, 0., &prob );
            cvMinMaxLoc( &prob, 0, &maxval );
            if( maxval < FLT_EPSILON )
                cvSet( &prob, cvScalar(1./nclusters) );
            else
                cvNormalize( &prob, &prob, 1., 0, CV_L1 );
        }
        EXIT; // do not preprocess covariation matrices,
              // as in this case they are initialized at the first iteration of EM
    }
    else
    {
        CV_ASSERT( params.start_step == START_E_STEP && params.means );
        if( params.weights && params.covs )
        {
            cvConvert( params.means, means );
            cvReshape( weights, weights, 1, params.weights->rows );
            cvConvert( params.weights, weights );
            cvReshape( weights, weights, 1, 1 );
            cvMaxS( weights, 0., weights );
            cvMinMaxLoc( weights, 0, &maxval );
            if( maxval < FLT_EPSILON )
                cvSet( weights, cvScalar(1./nclusters) );
            cvNormalize( weights, weights, 1., 0, CV_L1 );
            for( i = 0; i < nclusters; i++ )
                CV_CALL( cvConvert( params.covs[i], covs[i] ));
            force_symm_plus = 1;
        }
        else
            init_auto( train_data );
    }

    CV_CALL( tcov = cvCreateMat( dims, dims, CV_64FC1 ));
    CV_CALL( w = cvCreateMat( dims, dims, CV_64FC1 ));
    if( params.cov_mat_type == COV_MAT_GENERIC )
        CV_CALL( u = cvCreateMat( dims, dims, CV_64FC1 ));

    for( i = 0; i < nclusters; i++ )
    {
        if( force_symm_plus )
        {
            cvTranspose( covs[i], tcov );
            cvAddWeighted( covs[i], 0.5, tcov, 0.5, 0, tcov );
        }
        else
            cvCopy( covs[i], tcov );
        cvSVD( tcov, w, u, 0, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
        if( params.cov_mat_type == COV_MAT_SPHERICAL )
            cvSetIdentity( covs[i], cvScalar(cvTrace(w).val[0]/dims) );
        else if( params.cov_mat_type == COV_MAT_DIAGONAL )
            cvCopy( w, covs[i] );
        else
        {
            // generic case: covs[i] = (u')'*max(w,0)*u'
            cvGEMM( u, w, 1, 0, 0, tcov, CV_GEMM_A_T );
            cvGEMM( tcov, u, 1, 0, 0, covs[i], 0 );
        }
    }

    __END__;

    cvReleaseMat( &w );
    cvReleaseMat( &u );
    cvReleaseMat( &tcov );
}
コード例 #30
0
void
icvCrossCorr( const CvArr* _img, const CvArr* _templ, CvArr* _corr,
              CvPoint anchor, double delta, int borderType )
{
    // disable OpenMP in the case of Visual Studio,
    // otherwise the performance drops significantly
#undef USE_OPENMP
#if !defined _MSC_VER || defined CV_ICC
    #define USE_OPENMP 1
#endif

    const double block_scale = 4.5;
    const int min_block_size = 256;
    cv::Ptr<CvMat> dft_img[CV_MAX_THREADS];
    cv::Ptr<CvMat> dft_templ;
    std::vector<uchar> buf[CV_MAX_THREADS];
    int k, num_threads = 0;
    
    CvMat istub, *img = (CvMat*)_img;
    CvMat tstub, *templ = (CvMat*)_templ;
    CvMat cstub, *corr = (CvMat*)_corr;
    CvSize dftsize, blocksize;
    int depth, templ_depth, corr_depth, max_depth = CV_32F,
        cn, templ_cn, corr_cn, buf_size = 0,
        tile_count_x, tile_count_y, tile_count;

    img = cvGetMat( img, &istub );
    templ = cvGetMat( templ, &tstub );
    corr = cvGetMat( corr, &cstub );

    if( CV_MAT_DEPTH( img->type ) != CV_8U &&
        CV_MAT_DEPTH( img->type ) != CV_16U &&
        CV_MAT_DEPTH( img->type ) != CV_32F &&
        CV_MAT_DEPTH( img->type ) != CV_64F )
        CV_Error( CV_StsUnsupportedFormat,
        "The function supports only 8u, 16u and 32f data types" );

    if( !CV_ARE_DEPTHS_EQ( img, templ ) && CV_MAT_DEPTH( templ->type ) != CV_32F )
        CV_Error( CV_StsUnsupportedFormat,
        "Template (kernel) must be of the same depth as the input image, or be 32f" );
    
    if( !CV_ARE_DEPTHS_EQ( img, corr ) && CV_MAT_DEPTH( corr->type ) != CV_32F &&
        CV_MAT_DEPTH( corr->type ) != CV_64F )
        CV_Error( CV_StsUnsupportedFormat,
        "The output image must have the same depth as the input image, or be 32f/64f" );

    if( (!CV_ARE_CNS_EQ( img, corr ) || CV_MAT_CN(templ->type) > 1) &&
        (CV_MAT_CN( corr->type ) > 1 || !CV_ARE_CNS_EQ( img, templ)) )
        CV_Error( CV_StsUnsupportedFormat,
        "The output must have the same number of channels as the input (when the template has 1 channel), "
        "or the output must have 1 channel when the input and the template have the same number of channels" );

    depth = CV_MAT_DEPTH(img->type);
    cn = CV_MAT_CN(img->type);
    templ_depth = CV_MAT_DEPTH(templ->type);
    templ_cn = CV_MAT_CN(templ->type);
    corr_depth = CV_MAT_DEPTH(corr->type);
    corr_cn = CV_MAT_CN(corr->type);

    CV_Assert( corr_cn == 1 || delta == 0 );

    max_depth = MAX( max_depth, templ_depth );
    max_depth = MAX( max_depth, depth );
    max_depth = MAX( max_depth, corr_depth );
    if( depth > CV_8U )
        max_depth = CV_64F;

    /*if( img->cols < templ->cols || img->rows < templ->rows )
        CV_Error( CV_StsUnmatchedSizes,
        "Such a combination of image and template/filter size is not supported" );*/

    if( corr->rows > img->rows + templ->rows - 1 ||
        corr->cols > img->cols + templ->cols - 1 )
        CV_Error( CV_StsUnmatchedSizes,
        "output image should not be greater than (W + w - 1)x(H + h - 1)" );

    blocksize.width = cvRound(templ->cols*block_scale);
    blocksize.width = MAX( blocksize.width, min_block_size - templ->cols + 1 );
    blocksize.width = MIN( blocksize.width, corr->cols );
    blocksize.height = cvRound(templ->rows*block_scale);
    blocksize.height = MAX( blocksize.height, min_block_size - templ->rows + 1 );
    blocksize.height = MIN( blocksize.height, corr->rows );

    dftsize.width = cvGetOptimalDFTSize(blocksize.width + templ->cols - 1);
    if( dftsize.width == 1 )
        dftsize.width = 2;
    dftsize.height = cvGetOptimalDFTSize(blocksize.height + templ->rows - 1);
    if( dftsize.width <= 0 || dftsize.height <= 0 )
        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );

    // recompute block size
    blocksize.width = dftsize.width - templ->cols + 1;
    blocksize.width = MIN( blocksize.width, corr->cols );
    blocksize.height = dftsize.height - templ->rows + 1;
    blocksize.height = MIN( blocksize.height, corr->rows );

    dft_templ = cvCreateMat( dftsize.height*templ_cn, dftsize.width, max_depth );

#ifdef USE_OPENMP
    num_threads = cvGetNumThreads();
#else
    num_threads = 1;
#endif

    for( k = 0; k < num_threads; k++ )
        dft_img[k] = cvCreateMat( dftsize.height, dftsize.width, max_depth );

    if( templ_cn > 1 && templ_depth != max_depth )
        buf_size = templ->cols*templ->rows*CV_ELEM_SIZE(templ_depth);

    if( cn > 1 && depth != max_depth )
        buf_size = MAX( buf_size, (blocksize.width + templ->cols - 1)*
            (blocksize.height + templ->rows - 1)*CV_ELEM_SIZE(depth));

    if( (corr_cn > 1 || cn > 1) && corr_depth != max_depth )
        buf_size = MAX( buf_size, blocksize.width*blocksize.height*CV_ELEM_SIZE(corr_depth));

    if( buf_size > 0 )
    {
        for( k = 0; k < num_threads; k++ )
            buf[k].resize(buf_size);
    }

    // compute DFT of each template plane
    for( k = 0; k < templ_cn; k++ )
    {
        CvMat dstub, *src, *dst, temp;
        CvMat* planes[] = { 0, 0, 0, 0 };
        int yofs = k*dftsize.height;

        src = templ;
        dst = cvGetSubRect( dft_templ, &dstub, cvRect(0,yofs,templ->cols,templ->rows));
    
        if( templ_cn > 1 )
        {
            planes[k] = templ_depth == max_depth ? dst :
                cvInitMatHeader( &temp, templ->rows, templ->cols, templ_depth, &buf[0][0] );
            cvSplit( templ, planes[0], planes[1], planes[2], planes[3] );
            src = planes[k];
            planes[k] = 0;
        }

        if( dst != src )
            cvConvert( src, dst );

        if( dft_templ->cols > templ->cols )
        {
            cvGetSubRect( dft_templ, dst, cvRect(templ->cols, yofs,
                          dft_templ->cols - templ->cols, templ->rows) );
            cvZero( dst );
        }
        cvGetSubRect( dft_templ, dst, cvRect(0,yofs,dftsize.width,dftsize.height) );
        cvDFT( dst, dst, CV_DXT_FORWARD + CV_DXT_SCALE, templ->rows );
    }

    tile_count_x = (corr->cols + blocksize.width - 1)/blocksize.width;
    tile_count_y = (corr->rows + blocksize.height - 1)/blocksize.height;
    tile_count = tile_count_x*tile_count_y;

#if defined _OPENMP && defined USE_OPENMP
    #pragma omp parallel for num_threads(num_threads) schedule(dynamic)
#endif
    // calculate correlation by blocks
    for( k = 0; k < tile_count; k++ )
    {
#ifdef USE_OPENMP
        int thread_idx = cvGetThreadNum();
#else
        int thread_idx = 0;
#endif
        int x = (k%tile_count_x)*blocksize.width;
        int y = (k/tile_count_x)*blocksize.height;
        int i, yofs;
        CvMat sstub, dstub, *src, *dst, temp;
        CvMat* planes[] = { 0, 0, 0, 0 };
        CvMat* _dft_img = dft_img[thread_idx];
        uchar* _buf = buf_size > 0 ? &buf[thread_idx][0] : 0;
        CvSize csz = { blocksize.width, blocksize.height }, isz;
        int x0 = x - anchor.x, y0 = y - anchor.y;
        int x1 = MAX( 0, x0 ), y1 = MAX( 0, y0 ), x2, y2;
        csz.width = MIN( csz.width, corr->cols - x );
        csz.height = MIN( csz.height, corr->rows - y );
        isz.width = csz.width + templ->cols - 1;
        isz.height = csz.height + templ->rows - 1;
        x2 = MIN( img->cols, x0 + isz.width );
        y2 = MIN( img->rows, y0 + isz.height );
        
        for( i = 0; i < cn; i++ )
        {
            CvMat dstub1, *dst1;
            yofs = i*dftsize.height;

            src = cvGetSubRect( img, &sstub, cvRect(x1,y1,x2-x1,y2-y1) );
            dst = cvGetSubRect( _dft_img, &dstub,
                cvRect(0,0,isz.width,isz.height) );
            dst1 = dst;
            
            if( x2 - x1 < isz.width || y2 - y1 < isz.height )
                dst1 = cvGetSubRect( _dft_img, &dstub1,
                    cvRect( x1 - x0, y1 - y0, x2 - x1, y2 - y1 ));

            if( cn > 1 )
            {
                planes[i] = dst1;
                if( depth != max_depth )
                    planes[i] = cvInitMatHeader( &temp, y2 - y1, x2 - x1, depth, _buf );
                cvSplit( src, planes[0], planes[1], planes[2], planes[3] );
                src = planes[i];
                planes[i] = 0;
            }

            if( dst1 != src )
                cvConvert( src, dst1 );

            if( dst != dst1 )
                cvCopyMakeBorder( dst1, dst, cvPoint(x1 - x0, y1 - y0), borderType );

            if( dftsize.width > isz.width )
            {
                cvGetSubRect( _dft_img, dst, cvRect(isz.width, 0,
                      dftsize.width - isz.width,dftsize.height) );
                cvZero( dst );
            }

            cvDFT( _dft_img, _dft_img, CV_DXT_FORWARD, isz.height );
            cvGetSubRect( dft_templ, dst,
                cvRect(0,(templ_cn>1?yofs:0),dftsize.width,dftsize.height) );

            cvMulSpectrums( _dft_img, dst, _dft_img, CV_DXT_MUL_CONJ );
            cvDFT( _dft_img, _dft_img, CV_DXT_INVERSE, csz.height );

            src = cvGetSubRect( _dft_img, &sstub, cvRect(0,0,csz.width,csz.height) );
            dst = cvGetSubRect( corr, &dstub, cvRect(x,y,csz.width,csz.height) );

            if( corr_cn > 1 )
            {
                planes[i] = src;
                if( corr_depth != max_depth )
                {
                    planes[i] = cvInitMatHeader( &temp, csz.height, csz.width,
                                                 corr_depth, _buf );
                    cvConvertScale( src, planes[i], 1, delta );
                }
                cvMerge( planes[0], planes[1], planes[2], planes[3], dst );
                planes[i] = 0;                    
            }
            else
            {
                if( i == 0 )
                    cvConvertScale( src, dst, 1, delta );
                else
                {
                    if( max_depth > corr_depth )
                    {
                        cvInitMatHeader( &temp, csz.height, csz.width,
                                         corr_depth, _buf );
                        cvConvert( src, &temp );
                        src = &temp;
                    }
                    cvAcc( src, dst );
                }
            }
        }
    }
}