Example #1
0
Img GaborImage::GaborTransform(Img Image, int Frequency, int Orientation) {
	orientation = Orientation;
	CalculateKernel(Orientation, Frequency);

	Img retImg  = (IplImage*) cvClone(Image);
	
	Img gabor_real = (IplImage*) cvClone(Image);
	Img gabor_img  = (IplImage*) cvClone(Image);
	cvFilter2D(Image, gabor_real, KernelRealData);	//image.Convolution(this.KernelRealData);
	cvFilter2D(Image, gabor_img , KernelImgData);	//image.Convolution(this.KernelImgData);
	
	cvPow(gabor_real, gabor_real, 2);
	cvPow(gabor_img,  gabor_img,  2);
	
	// Img gabor = (gabor_real + gabor_img).Pow(0.5);
	cvAdd(gabor_real, gabor_img, retImg);
	
	cv::Mat in = retImg;
	cv::Mat out;
	cv::sqrt(in, out); 
	
	IplImage dst_img = out;	
	
	cvReleaseImage(&gabor_real);
	cvReleaseImage(&gabor_img);
	
	retImg = (IplImage*) cvClone(&dst_img);
	
	return retImg;
}
/* Standard Deviation */
IplImage* motionDetection::getStandardDeviationFrame(void) {

	// Initialize
	cvZero(mSum);
	for (int i = 0; i < mFrameNumber; ++i) {
		// frame[i] <= | frame[i] - Background Model |
		cvAbsDiff(mpFrame[i], m_imgBackgroundModel, mTmp8U);
		// uchar->float
		cvConvert(mTmp8U, mTmp);
		// mTmp = mTmp * mTmp 
		cvPow(mTmp, mTmp, 2.0);
		// add mSum += mTmp
		cvAdd(mSum, mTmp, mSum);
	}

	// variance: mTmp <= mSum / (mFrameNumber-1)
	for (int i = 0; i < mSize.height; ++i) {
		for (int j = 0; j < mSize.width; ++j) {
			((float*)(mTmp->imageData + i*mTmp->widthStep))[j] = ((float*)(mSum->imageData + i*mSum->widthStep))[j] / (mFrameNumber - 1);
		}
	}

	// standard deviation
	cvPow(mTmp, mTmp, 0.5);

	// float->uchar
	cvConvert(mTmp, m_imgStandardDeviation);

	return m_imgStandardDeviation;
}
Example #3
0
/**
 * @brief CvGabor::conv_img(IplImage *src, IplImage *dst, int Type)
 * @param src
 * @param dst
 * @param Type
 */
void CvGabor::conv_img(IplImage *src, IplImage *dst, int Type)   //函数名:conv_img
{
// printf("CvGabor::conv_img 1\n");
  double ve; //, re,im;
  
  CvMat *mat = cvCreateMat(src->width, src->height, CV_32FC1);
  for (int i = 0; i < src->width; i++) {
    for (int j = 0; j < src->height; j++) {
      ve = CV_IMAGE_ELEM(src, uchar, j, i);   //CV_IMAGE_ELEM 是取图像(j,i)位置的像素值
      CV_MAT_ELEM(*mat, float, i, j) = (float)ve;  //转化成float 类型
    }
  }
  
// printf("CvGabor::conv_img 2\n");
  CvMat *rmat = cvCreateMat(src->width, src->height, CV_32FC1);
  CvMat *imat = cvCreateMat(src->width, src->height, CV_32FC1);
  
  switch (Type)
  {
    case CV_GABOR_REAL:
      cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)Real, cvPoint( (Width-1)/2, (Width-1)/2));
      break;
    case CV_GABOR_IMAG:
      cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)Imag, cvPoint( (Width-1)/2, (Width-1)/2));
      break;
    case CV_GABOR_MAG:
      cvFilter2D( (CvMat*)mat, (CvMat*)rmat, (CvMat*)Real, cvPoint( (Width-1)/2, (Width-1)/2));
      cvFilter2D( (CvMat*)mat, (CvMat*)imat, (CvMat*)Imag, cvPoint( (Width-1)/2, (Width-1)/2));
      
      cvPow(rmat,rmat,2); 
      cvPow(imat,imat,2);
      cvAdd(imat,rmat,mat); 
      cvPow(mat,mat,0.5); 
      break;
    case CV_GABOR_PHASE:
      break;
  }
  
// printf("CvGabor::conv_img 3\n");
  if (dst->depth == IPL_DEPTH_8U)
  {
    cvNormalize((CvMat*)mat, (CvMat*)mat, 0, 255, CV_MINMAX);
    for (int i = 0; i < mat->rows; i++)
    {
      for (int j = 0; j < mat->cols; j++)
      {
        ve = CV_MAT_ELEM(*mat, float, i, j);
        CV_IMAGE_ELEM(dst, uchar, j, i) = (uchar)cvRound(ve);
      }
    }
  }
Example #4
0
static void
icvCalcMinEigenVal( const float* cov, int cov_step, float* dst,
                    int dst_step, CvSize size, CvMat* buffer )
{
    int j;
    float* buf = buffer->data.fl;
    cov_step /= sizeof(cov[0]);
    dst_step /= sizeof(dst[0]);
    buffer->rows = 1;

    for( ; size.height--; cov += cov_step, dst += dst_step )
    {
        for( j = 0; j < size.width; j++ )
        {
            double a = cov[j*3]*0.5;
            double b = cov[j*3+1];
            double c = cov[j*3+2]*0.5;

            buf[j + size.width] = (float)(a + c);
            buf[j] = (float)((a - c)*(a - c) + b*b);
        }

        cvPow( buffer, buffer, 0.5 );

        for( j = 0; j < size.width ; j++ )
            dst[j] = (float)(buf[j + size.width] - buf[j]);
    }
}
Example #5
0
 void  cvbReciprocal( const float* x, float* y, int len )
{
    CvMat mx = cvMat( 1, len, CV_32F, (void*)x );
    CvMat my = mx;
    my.data.fl = (float*)y;

    cvPow( &mx, &my, -1 );
}
Example #6
0
 void  cvbInvSqrt( const float* x, float* y, int len )
{
    CvMat mx = cvMat( 1, len, CV_32F, (void*)x );
    CvMat my = mx;
    my.data.fl = (float*)y;

    cvPow( &mx, &my, -0.5 );
}
Example #7
0
void cvShowInvDFT1(IplImage* im, CvMat* dft_A, int dft_M, int dft_N,char* src)
{

    IplImage* realInput;
    IplImage* imaginaryInput;
    IplImage* complexInput;

    IplImage * image_Re;
    IplImage * image_Im;

    double m, M;
    char str[80];

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    //cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, complexInput->height );
    cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, dft_M);
    strcpy(str,"DFT INVERSE - ");
    strcat(str,src);
    cvNamedWindow(str, 0);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    //cvCvtColor(image_Re, image_Re, CV_GRAY2RGBA);

    cvShowImage(str, image_Re);

}
void* multiThread1_1(void* Arg1)
{
	void** Arg = (void**) Arg1;
	cvZero( (IplImage*)Arg[1] );		
	cvZero( (IplImage*)Arg[4] );
	cvAndDiff( (IplImage*)Arg[4], cvScalar(*(float*)Arg[6]), (IplImage*)Arg[5] ); 
	cvSub( (IplImage*)Arg[0], (IplImage*)Arg[4], (IplImage*)Arg[1] ); //f1=f-f4
	cvPow( (IplImage*)Arg[1], (IplImage*)Arg[1], 2.0 );
	return NULL;
}
Example #9
0
void BModel::wiener_filter_chanel(IplImage *channel, IplImage *kernel, const double sigma)
{
    IplImage *fKernel  = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 2);
    IplImage *fChannel = cvCreateImage(cvGetSize(channel), IPL_DEPTH_64F, 2);
    IplImage *answ = cvCreateImage(cvGetSize(channel), IPL_DEPTH_64F, 2);
    IplImage *reFKernel = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 1);
    IplImage *imFKernel = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 1);
    IplImage *reFChannel = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 1);
    IplImage *imFChannel = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 1);
    IplImage *reAnsw = cvCreateImage(cvGetSize(channel), IPL_DEPTH_64F, 1);
    IplImage *imAnsw = cvCreateImage(cvGetSize(channel), IPL_DEPTH_64F, 1);

    cvDFT(kernel, fKernel, CV_DXT_FORWARD, channel->height);
    cvDFT(channel, fChannel, CV_DXT_FORWARD, channel->height);

    cvMulSpectrums(fChannel, fKernel, answ, CV_DXT_MUL_CONJ);
    cvSplit(answ, reAnsw, imAnsw, 0, 0 );
    cvSplit(fKernel, reFKernel, imFKernel, 0, 0 );

    cvPow(reFKernel, reFKernel, 2);
    cvPow(imFKernel, imFKernel, 2);
    cvAdd(reFKernel, imFKernel, reFKernel, 0);
    cvAddS(reFKernel, cvScalarAll(sigma), reFKernel, 0);

    cvDiv(reAnsw, reFKernel, reAnsw, 1);
    cvDiv(imAnsw, reFKernel, imAnsw, 1);

    cvMerge(reAnsw, imAnsw, NULL, NULL, answ);

    cvDFT(answ, answ, CV_DXT_INV_SCALE, channel->height);
    cvCopy(answ, channel);

    cvReleaseImage(&fKernel);
    cvReleaseImage(&fChannel);
    cvReleaseImage(&answ);
    cvReleaseImage(&reFKernel);
    cvReleaseImage(&imFKernel);
    cvReleaseImage(&reFChannel);
    cvReleaseImage(&imFChannel);
    cvReleaseImage(&reAnsw);
    cvReleaseImage(&imAnsw);
}
Example #10
0
void* multiThread1_2(void* Arg1)
{
	void** Arg = (void**) Arg1;
	//fprintf(stderr, "MT %d\n", __LINE__);
	cvZero( (IplImage*)Arg[2] );
	cvZero( (IplImage*)Arg[3] );
	//fprintf(stderr, "MT %d\n", __LINE__);
	cvAndDiff( (IplImage*)Arg[3], cvScalar(*(float*)Arg[7]), (IplImage*)Arg[5] ); 
	cvSub( (IplImage*)Arg[0], (IplImage*)Arg[3], Arg[2] );  //f2=f-f3
	cvPow( (IplImage*)Arg[2], (IplImage*)Arg[2], 2.0 );
	return NULL;
}
Example #11
0
CvPoint CTransformImage::findCenter()
{
	IplImage* dist8u  = cvCloneImage(m_transImage);
	IplImage* dist32f = cvCreateImage(cvGetSize(m_transImage), IPL_DEPTH_32F, 1);
	IplImage* dist32s = cvCreateImage(cvGetSize(m_transImage), IPL_DEPTH_32S, 1);

	// 거리 변환 행렬
	float mask[3] = {1.f, 1.5f, 0};

	// 거리 변환 함수 사용
	cvDistTransform(m_transImage, dist32f, CV_DIST_USER, 3, mask, NULL);

	// 눈에 보이게 변환
	cvConvertScale(dist32f, dist32f, 1000, 0);
	cvPow(dist32f, dist32f, 0.5);

	cvConvertScale(dist32f, dist32s, 1.0, 0.5);
	cvAndS(dist32s, cvScalarAll(255), dist32s, 0);
	cvConvertScale(dist32s, dist8u, 1, 0);

	// 가장 큰 좌표를 찾는다
	int max;
	for(int i = max = 0; i < dist8u->height; ++i)
	{
		int index = i * dist8u->widthStep;
		for(int j = 0; j < dist8u->width; ++j)
		{
			if((unsigned char)dist8u->imageData[index+j] > max)
			{
				max = (unsigned char)dist8u->imageData[index+j];
				m_center.x = j, m_center.y = i;
			}
		}
	}

	cvReleaseImage(&dist8u);
	cvReleaseImage(&dist32f);
	cvReleaseImage(&dist32s);

	if(m_center.x < 0 || m_center.y < 0)
		m_center.x = 0, m_center.y = 0;

	CvBox2D box;
	box.center = cvPoint2D32f(m_center.x, m_center.y);
	box.size   = cvSize2D32f(3, 3);
	box.angle  = 90;
	cvEllipseBox(m_image, box, CV_RGB(255,242,0), 3);

	return m_center;
}
Example #12
0
void HarrisBuffer::HarrisFunction(double k, IplImage* dst)
{
  // Harris function in 3D
  // original space-time Harris
  /*detC=  
  cxx.*cyy.*ctt +		xx yy tt
  cxy.*cyt.*cxt +		2 * xy yt xt
  cxt.*cxy.*cyt -		.
  cxx.*cyt.*cyt -		xx yt^2
  cxy.*cxy.*ctt -		tt xy^2	
  cxt.*cyy.*cxt ;		yy xt^2
  */
  cvMul(cxx, cyy, tmp1);
  cvMul(ctt, tmp1, tmp1);

  cvMul(cxy, cxt, tmp2);
  cvMul(cyt, tmp2, tmp2,2);

  cvAdd(tmp1,tmp2,tmp1);

  cvMul(cyt,cyt,tmp2);
  cvMul(cxx,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  cvMul(cxy,cxy,tmp2);
  cvMul(ctt,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  cvMul(cxt,cxt,tmp2);
  cvMul(cyy,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  //trace3C=(cxx+cyy+ctt).^3;
  cvAdd(cxx,cyy,tmp2);
  cvAdd(ctt,tmp2,tmp2);
  cvPow(tmp2,tmp2,3);

  //H=detC-stharrisbuffer.kparam*trace3C;
  cvScale(tmp2,tmp2,k,0);
  cvSub(tmp1,tmp2,dst);
}
Example #13
0
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
CvMat *tgso (CvMat &tmap, int ntex, double sigma, double theta, CvMat &tsim, int useChi2) {


	CvMat *roundTmap=cvCreateMat(tmap.rows,tmap.cols,CV_32FC1);
	CvMat *comp=cvCreateMat(tmap.rows,tmap.cols,CV_32FC1);

	for (int i=0;i<tmap.rows;i++)
		for (int j=0;j<tmap.cols;j++)
			cvSetReal2D(roundTmap,i,j,cvRound(cvGetReal2D(&tmap,i,j)));

	cvSub(&tmap,roundTmap,comp);
	if (cvCountNonZero(comp)) {
		printf("texton labels not integral");
		cvReleaseMat(&roundTmap);
		cvReleaseMat(&comp);
		exit(1);
	}

	double min,max;
	cvMinMaxLoc(&tmap,&min,&max);
	if (min<1 && max>ntex) {
		char *msg=new char[50];
		printf(msg,"texton labels out of range [1,%d]",ntex);
		cvReleaseMat(&roundTmap);
		cvReleaseMat(&comp);
		exit(1);
	}

	cvReleaseMat(&roundTmap);
	cvReleaseMat(&comp);


	double wr=floor(sigma); //sigma=radius (Leo) 

	CvMat *x=cvCreateMat(1,wr-(-wr)+1, CV_64FC1);
	CvMat *y=cvCreateMat(wr-(-wr)+1,1, CV_64FC1);

	CvMat *u=cvCreateMat(wr-(-wr)+1,wr-(-wr)+1, CV_64FC1);
	CvMat *v=cvCreateMat(wr-(-wr)+1,wr-(-wr)+1, CV_64FC1);
	CvMat *gamma=cvCreateMat(u->rows,v->rows, CV_64FC1);

	// Set x,y directions 
	for (int j=-wr;j<=wr;j++) {
		cvSetReal2D(x,0,(j+wr),j);
		cvSetReal2D(y,(j+wr),0,j);
	}

	// Set u,v, meshgrids
	for (int i=0;i<u->rows;i++) {
		cvRepeat(x,u);
		cvRepeat(y,v);
	}

	// Compute the gamma matrix from the grid
	for (int i=0;i<u->rows;i++) 
		for (int j=0;j<u->cols;j++)
			cvSetReal2D(gamma,i,j,atan2(cvGetReal2D(v,i,j),cvGetReal2D(u,i,j)));

	cvReleaseMat(&x);
	cvReleaseMat(&y);

	CvMat *sum=cvCreateMat(u->rows,u->cols, CV_64FC1);
	cvMul(u,u,u);
	cvMul(v,v,v);
	cvAdd(u,v,sum);
	CvMat *mask=cvCreateMat(u->rows,u->cols, CV_8UC1);
	cvCmpS(sum,sigma*sigma,mask,CV_CMP_LE);
	cvConvertScale(mask,mask,1.0/255);
	cvSetReal2D(mask,wr,wr,0);
	int count=cvCountNonZero(mask);

	cvReleaseMat(&u);
	cvReleaseMat(&v);
	cvReleaseMat(&sum);

	CvMat *sub=cvCreateMat(mask->rows,mask->cols, CV_64FC1);
	CvMat *side=cvCreateMat(mask->rows,mask->cols, CV_8UC1);

	cvSubS(gamma,cvScalar(theta),sub);
	cvReleaseMat(&gamma);

	for (int i=0;i<mask->rows;i++){
		for (int j=0;j<mask->cols;j++) {
			double n=cvmGet(sub,i,j);
			double n_mod = n-floor(n/(2*M_PI))*2*M_PI;
			cvSetReal2D(side,i,j, 1 + int(n_mod < M_PI));
		}
	}

	cvMul(side,mask,side);
	cvReleaseMat(&sub);
	cvReleaseMat(&mask);

	CvMat *lmask=cvCreateMat(side->rows,side->cols, CV_8UC1);
	CvMat *rmask=cvCreateMat(side->rows,side->cols, CV_8UC1);
	cvCmpS(side,1,lmask,CV_CMP_EQ);
	cvCmpS(side,2,rmask,CV_CMP_EQ);
	int count1=cvCountNonZero(lmask), count2=cvCountNonZero(rmask);
	if (count1 != count2) {
		printf("Bug: imbalance\n");
	}

	CvMat *rlmask=cvCreateMat(side->rows,side->cols, CV_32FC1);
	CvMat *rrmask=cvCreateMat(side->rows,side->cols, CV_32FC1);
	cvConvertScale(lmask,rlmask,1.0/(255*count)*2);
	cvConvertScale(rmask,rrmask,1.0/(255*count)*2);


	cvReleaseMat(&lmask);
	cvReleaseMat(&rmask);
	cvReleaseMat(&side);

	int h=tmap.rows;
	int w=tmap.cols;


	CvMat *d       = cvCreateMat(h*w,ntex,CV_32FC1);
	CvMat *coltemp = cvCreateMat(h*w,1,CV_32FC1);
	CvMat *tgL     = cvCreateMat(h,w, CV_32FC1);
	CvMat *tgR     = cvCreateMat(h,w, CV_32FC1);
	CvMat *temp    = cvCreateMat(h,w,CV_8UC1);
	CvMat *im      = cvCreateMat(h,w, CV_32FC1);
	CvMat *sub2    = cvCreateMat(h,w,CV_32FC1);
	CvMat *sub2t   = cvCreateMat(w,h,CV_32FC1);
	CvMat *prod    = cvCreateMat(h*w,ntex,CV_32FC1);
	CvMat reshapehdr,*reshape;

	CvMat* tgL_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);
	CvMat* tgR_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);
	CvMat* im_pad  = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);

	CvMat *tg=cvCreateMat(h,w,CV_32FC1);
	cvZero(tg);
	
	if (useChi2 == 1){
		CvMat* temp_add1 = cvCreateMat(h,w,CV_32FC1);
		for (int i=0;i<ntex;i++) {
			cvCmpS(&tmap,i+1,temp,CV_CMP_EQ); 
			cvConvertScale(temp,im,1.0/255);

			cvCopyMakeBorder(tgL,tgL_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(tgR,tgR_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(im,im_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);

			cvFilter2D(im_pad,tgL_pad,rlmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));
			cvFilter2D(im_pad,tgR_pad,rrmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));

			cvGetSubRect(tgL_pad,tgL,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgL->cols,tgL->rows));
			cvGetSubRect(tgR_pad,tgR,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgR->cols,tgR->rows));

			cvSub(tgL,tgR,sub2);
			cvPow(sub2,sub2,2.0);
			cvAdd(tgL,tgR,temp_add1);
			cvAddS(temp_add1,cvScalar(0.0000000001),temp_add1);
			cvDiv(sub2,temp_add1,sub2);
			cvAdd(tg,sub2,tg);
		}
		cvScale(tg,tg,0.5);

		cvReleaseMat(&temp_add1);

	}
	else{// if not chi^2
		for (int i=0;i<ntex;i++) {
			cvCmpS(&tmap,i+1,temp,CV_CMP_EQ); 
			cvConvertScale(temp,im,1.0/255);

			cvCopyMakeBorder(tgL,tgL_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(tgR,tgR_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(im,im_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);

			cvFilter2D(im_pad,tgL_pad,rlmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));
			cvFilter2D(im_pad,tgR_pad,rrmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));

			cvGetSubRect(tgL_pad,tgL,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgL->cols,tgL->rows));
			cvGetSubRect(tgR_pad,tgR,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgR->cols,tgR->rows));

			cvSub(tgL,tgR,sub2);
			cvAbs(sub2,sub2);
			cvTranspose(sub2,sub2t);
			reshape=cvReshape(sub2t,&reshapehdr,0,h*w);
			cvGetCol(d,coltemp,i);
			cvCopy(reshape,coltemp);
		}

		cvMatMul(d,&tsim,prod);
		cvMul(prod,d,prod);


		CvMat *sumcols=cvCreateMat(h*w,1,CV_32FC1);
		cvSetZero(sumcols);
		for (int i=0;i<prod->cols;i++) {
			cvGetCol(prod,coltemp,i);
			cvAdd(sumcols,coltemp,sumcols);
		}

		reshape=cvReshape(sumcols,&reshapehdr,0,w);
		cvTranspose(reshape,tg);

		cvReleaseMat(&sumcols);
	}


	//Smooth the gradient now!!
	tg=fitparab(*tg,sigma,sigma/4,theta);
	cvMaxS(tg,0,tg); 

	
	cvReleaseMat(&im_pad);
	cvReleaseMat(&tgL_pad);
	cvReleaseMat(&tgR_pad);
	cvReleaseMat(&rlmask);
	cvReleaseMat(&rrmask);
	cvReleaseMat(&im);
	cvReleaseMat(&tgL);
	cvReleaseMat(&tgR);
	cvReleaseMat(&temp);
	cvReleaseMat(&coltemp);
	cvReleaseMat(&sub2);
	cvReleaseMat(&sub2t);
	cvReleaseMat(&d);
	cvReleaseMat(&prod);

	return tg;

}
 double calcBhattacharyya()
 {
     cvMul(m_HistCandidate,m_HistModel,m_HistTemp);
     cvPow(m_HistTemp,m_HistTemp,0.5);
     return cvSum(m_HistTemp).val[0] / sqrt(m_HistCandidateVolume*m_HistModelVolume);
 }   /* calcBhattacharyyaCoefficient */
Example #15
0
CvMat* cvShowDFT1(IplImage* im, int dft_M, int dft_N,char* src)
{
    IplImage* realInput;
    IplImage* imaginaryInput;
    IplImage* complexInput;

    CvMat* dft_A, tmp;

    IplImage* image_Re;
    IplImage* image_Im;

    char str[80];

    double m, M;

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    cvScale(im, realInput, 1.0, 0.0);
    cvZero(imaginaryInput);
    cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

    dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    // copy A to dft_A and pad dft_A with zeros
    cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
    cvCopy( complexInput, &tmp, NULL );
    if( dft_A->cols > im->width )
    {
        cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
        cvZero( &tmp );
    }

    // no need to pad bottom part of dft_A with zeros because of
    // use nonzero_rows parameter in cvDFT() call below

    cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

    strcpy(str,"DFT -");
    strcat(str,src);
    cvNamedWindow(str, 0);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    cvShowImage(str, image_Re);
    return(dft_A);
}
Example #16
0
int main(int argc, char ** argv)
{
    int height,width,step,channels,depth;
    uchar* data1;

    CvMat *dft_A;
    CvMat *dft_B;

    CvMat *dft_C;
    IplImage* im;
    IplImage* im1;

    IplImage* image_ReB;
    IplImage* image_ImB;

    IplImage* image_ReC;
    IplImage* image_ImC;
    IplImage* complex_ImC;
    CvScalar val;

    IplImage* k_image_hdr;
    int i,j,k;

    FILE *fp;
    fp = fopen("test.txt","w+");

    int dft_M,dft_N;
    int dft_M1,dft_N1;

    CvMat* cvShowDFT1(IplImage*, int, int,char*);
    void cvShowInvDFT1(IplImage*, CvMat*, int, int,char*);

    im1 = cvLoadImage( "../homer.jpg",1 );

    cvNamedWindow("Original-color", 0);
    cvShowImage("Original-color", im1);

    im = cvLoadImage( "../homer.jpg", CV_LOAD_IMAGE_GRAYSCALE );

    if( !im )
        return -1;

    cvNamedWindow("Original-gray", 0);
    cvShowImage("Original-gray", im);

    // Create a random noise matrix
    fp = fopen("test.txt","w+");
    int val_noise[357*383];
    for(i=0; i <im->height;i++){
        for(j=0;j<im->width;j++){
            fprintf(fp, "%d ",(383*i+j));
            val_noise[383*i+j] = rand() % 128;
        }
        fprintf(fp, "/n");
    }

    CvMat noise = cvMat(im->height,im->width, CV_8UC1,val_noise);

    // Add the random noise matric to the image
    cvAdd(im,&noise,im, 0);

    cvNamedWindow("Original + Noise", 0);
    cvShowImage("Original + Noise", im);

    cvSmooth( im, im, CV_GAUSSIAN, 7, 7, 0.5, 0.5 );
    cvNamedWindow("Gaussian Smooth", 0);
    cvShowImage("Gaussian Smooth", im);

    // Create a blur kernel
    IplImage* k_image;
    float r = rad;
    float radius=((int)(r)*2+1)/2.0;

    int rowLength=(int)(2*radius);
    printf("rowlength %d/n",rowLength);
    float kernels[rowLength*rowLength];
    printf("rowl: %i",rowLength);
    int norm=0; //Normalization factor
    int x,y;
    CvMat kernel;
    for(x = 0; x < rowLength; x++)
        for (y = 0; y < rowLength; y++)
            if (sqrt((x - (int)(radius) ) * (x - (int)(radius) ) + (y - (int)(radius))* (y - (int)(radius))) <= (int)(radius))
                norm++;
    // Populate matrix
    for (y = 0; y < rowLength; y++) //populate array with values
    {
        for (x = 0; x < rowLength; x++) {
            if (sqrt((x - (int)(radius) ) * (x - (int)(radius) ) + (y - (int)(radius))
                     * (y - (int)(radius))) <= (int)(radius)) {
                //kernels[y * rowLength + x] = 255;
                kernels[y * rowLength + x] =1.0/norm;
                printf("%f ",1.0/norm);
            }
            else{
                kernels[y * rowLength + x] =0;
            }
        }
    }

    kernel= cvMat(rowLength, // number of rows
                  rowLength, // number of columns
                  CV_32FC1, // matrix data type
                  &kernels);
    k_image_hdr = cvCreateImageHeader( cvSize(rowLength,rowLength), IPL_DEPTH_32F,1);
    k_image = cvGetImage(&kernel,k_image_hdr);

    height = k_image->height;
    width = k_image->width;
    step = k_image->widthStep/sizeof(float);
    depth = k_image->depth;

    channels = k_image->nChannels;
    //data1 = (float *)(k_image->imageData);
    data1 = (uchar *)(k_image->imageData);
    cvNamedWindow("blur kernel", 0);
    cvShowImage("blur kernel", k_image);

    dft_M = cvGetOptimalDFTSize( im->height - 1 );
    dft_N = cvGetOptimalDFTSize( im->width - 1 );

    //dft_M1 = cvGetOptimalDFTSize( im->height+99 - 1 );
    //dft_N1 = cvGetOptimalDFTSize( im->width+99 - 1 );

    dft_M1 = cvGetOptimalDFTSize( im->height+3 - 1 );
    dft_N1 = cvGetOptimalDFTSize( im->width+3 - 1 );

    printf("dft_N1=%d,dft_M1=%d/n",dft_N1,dft_M1);

    // Perform DFT of original image
    dft_A = cvShowDFT1(im, dft_M1, dft_N1,"original");
    //Perform inverse (check)
    //cvShowInvDFT1(im,dft_A,dft_M1,dft_N1, "original"); - Commented as it overwrites the DFT

    // Perform DFT of kernel
    dft_B = cvShowDFT1(k_image,dft_M1,dft_N1,"kernel");
    //Perform inverse of kernel (check)
    //cvShowInvDFT1(k_image,dft_B,dft_M1,dft_N1, "kernel");- Commented as it overwrites the DFT

    // Multiply numerator with complex conjugate
    dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );

    printf("%d %d %d %d/n",dft_M,dft_N,dft_M1,dft_N1);

    // Multiply DFT(blurred image) * complex conjugate of blur kernel
    cvMulSpectrums(dft_A,dft_B,dft_C,CV_DXT_MUL_CONJ);
    //cvShowInvDFT1(im,dft_C,dft_M1,dft_N1,"blur1");

    // Split Fourier in real and imaginary parts
    image_ReC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    image_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    complex_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 2);

    printf("%d %d %d %d/n",dft_M,dft_N,dft_M1,dft_N1);

    //cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );
    cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );

    // Compute A^2 + B^2 of denominator or blur kernel
    image_ReB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    image_ImB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);

    // Split Real and imaginary parts
    cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
    cvPow( image_ReB, image_ReB, 2.0);
    cvPow( image_ImB, image_ImB, 2.0);
    cvAdd(image_ReB, image_ImB, image_ReB,0);

    val = cvScalarAll(kappa);
    cvAddS(image_ReB,val,image_ReB,0);

    //Divide Numerator/A^2 + B^2
    cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
    cvDiv(image_ImC, image_ReB, image_ImC, 1.0);

    // Merge Real and complex parts
    cvMerge(image_ReC, image_ImC, NULL, NULL, complex_ImC);

    // Perform Inverse
    cvShowInvDFT1(im, (CvMat *)complex_ImC,dft_M1,dft_N1,"O/p Wiener k=1 rad=2");

    cvWaitKey(-1);
    return 0;
}
Example #17
0
int main(int argc, char **argv)
{
  bool isStop = false;
  const int INIT_TIME = 50;
  const double BG_RATIO = 0.02; // 背景領域更新レート
  const double OBJ_RATIO = 0.005; // 物体領域更新レート
  const double Zeta = 10.0;
  IplImage *img = NULL;

  CvCapture *capture = NULL;
  capture = cvCreateCameraCapture(0);
  //capture = cvCaptureFromAVI("test.avi");
  if(capture == NULL){
    printf("capture device not found!!");
    return -1;
  }

  img = cvQueryFrame(capture);
  int w = img->width;
  int h = img->height;

  IplImage *imgAverage = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgSgm = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgTmp = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *img_lower = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *img_upper = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgSilhouette = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgSilhouetteInv = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgResult = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 3);

  printf("背景初期化中...\n");
  cvSetZero(imgAverage);
  for(int i = 0; i < INIT_TIME; i++){
    img = cvQueryFrame(capture);
    cvAcc(img, imgAverage);
    printf("輝度平均 %d/%d\n", i, INIT_TIME);
  }
  cvConvertScale(imgAverage, imgAverage, 1.0 / INIT_TIME);
  cvSetZero(imgSgm);
  for(int i = 0; i < INIT_TIME; i++){
    img = cvQueryFrame(capture);
    cvConvert(img, imgTmp);
    cvSub(imgTmp, imgAverage, imgTmp);
    cvPow(imgTmp, imgTmp, 2.0);
    cvConvertScale(imgTmp, imgTmp, 2.0);
    cvPow(imgTmp, imgTmp, 0.5);
    cvAcc(imgTmp, imgSgm);
    printf("輝度振幅 %d/%d\n", i, INIT_TIME);
  }
  cvConvertScale(imgSgm, imgSgm, 1.0 / INIT_TIME);
  printf("背景初期化完了\n");

  char winNameCapture[] = "Capture";
  char winNameSilhouette[] = "Silhouette";
  cvNamedWindow(winNameCapture, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameSilhouette, CV_WINDOW_AUTOSIZE);

  while(1){
    if(!isStop){
      img = cvQueryFrame(capture);
      if(img == NULL) break;
      cvConvert(img, imgTmp);

      // 輝度範囲
      cvSub(imgAverage, imgSgm, img_lower);
      cvSubS(img_lower, cvScalarAll(Zeta), img_lower);
      cvAdd(imgAverage, imgSgm, img_upper);
      cvAddS(img_upper, cvScalarAll(Zeta), img_upper);
      cvInRange(imgTmp, img_lower, img_upper, imgSilhouette);

      // 輝度振幅
      cvSub(imgTmp, imgAverage, imgTmp);
      cvPow(imgTmp, imgTmp, 2.0);
      cvConvertScale(imgTmp, imgTmp, 2.0);
      cvPow(imgTmp, imgTmp, 0.5);

      // 背景領域を更新
      cvRunningAvg(img, imgAverage, BG_RATIO, imgSilhouette);
      cvRunningAvg(imgTmp, imgSgm, BG_RATIO, imgSilhouette);

      // 物体領域を更新
      cvNot(imgSilhouette, imgSilhouetteInv);
      cvRunningAvg(imgTmp, imgSgm, OBJ_RATIO, imgSilhouetteInv);

      cvErode(imgSilhouette, imgSilhouette, NULL, 1); // 収縮
      cvDilate(imgSilhouette, imgSilhouette, NULL, 2); // 膨張
      cvErode(imgSilhouette, imgSilhouette, NULL, 1); // 収縮

      cvMerge(imgSilhouette, imgSilhouette, imgSilhouette, NULL, imgResult);
      cvShowImage(winNameCapture, img);
      cvShowImage(winNameSilhouette, imgResult);
    }
    int waitKey = cvWaitKey(33);
    if(waitKey == 'q') break;
    if(waitKey == ' '){
      isStop = !isStop;
      if(isStop) printf("stop\n");
      else printf("start\n");
    }
  }

  cvReleaseCapture(&capture);
  cvDestroyWindow(winNameCapture);
  cvDestroyWindow(winNameSilhouette);

  return 0;
}
Example #18
0
//function definitions
void ComputeBrisqueFeature(IplImage *orig, vector<double>& featurevector)
{
    IplImage *orig_bw_int = cvCreateImage(cvGetSize(orig), orig->depth, 1); 
    cvCvtColor(orig, orig_bw_int, CV_RGB2GRAY);
    IplImage *orig_bw = cvCreateImage(cvGetSize(orig_bw_int), IPL_DEPTH_64F, 1);
    cvConvertScale(orig_bw_int, orig_bw, 1.0/255);
    cvReleaseImage(&orig_bw_int);
    
    //orig_bw now contains the grayscale image normalized to the range 0,1
    
    int scalenum = 2;
    for (int itr_scale = 1; itr_scale<=scalenum; itr_scale++)
	{
		IplImage *imdist_scaled = cvCreateImage(cvSize(orig_bw->width/pow((double)2,itr_scale-1), orig_bw->height/pow((double)2,itr_scale-1)), IPL_DEPTH_64F, 1);
		cvResize(orig_bw, imdist_scaled,CV_INTER_CUBIC); 
		
		//compute mu and mu squared
		IplImage* mu = cvCreateImage(cvGetSize(imdist_scaled), IPL_DEPTH_64F, 1);
		cvSmooth( imdist_scaled, mu, CV_GAUSSIAN, 7, 7, 1.16666 );
		IplImage* mu_sq = cvCreateImage(cvGetSize(imdist_scaled), IPL_DEPTH_64F, 1);
		cvMul(mu, mu, mu_sq);

		//compute sigma
		IplImage* sigma = cvCreateImage(cvGetSize(imdist_scaled), IPL_DEPTH_64F, 1);
		cvMul(imdist_scaled, imdist_scaled, sigma);
		cvSmooth(sigma, sigma, CV_GAUSSIAN, 7, 7, 1.16666 );
		cvSub(sigma, mu_sq, sigma);
		cvPow(sigma, sigma, 0.5);

		//compute structdis = (x-mu)/sigma
		cvAddS(sigma, cvScalar(1.0/255), sigma);
		IplImage* structdis = cvCreateImage(cvGetSize(imdist_scaled), IPL_DEPTH_64F, 1);
		cvSub(imdist_scaled, mu, structdis);
		cvDiv(structdis, sigma, structdis);

		//Compute AGGD fit
                double lsigma_best, rsigma_best, gamma_best;
                AGGDfit(structdis, lsigma_best, rsigma_best, gamma_best);
		featurevector.push_back(gamma_best);
		featurevector.push_back((lsigma_best*lsigma_best + rsigma_best*rsigma_best)/2);
		
		//Compute paired product images
		int shifts[4][2]={{0,1},{1,0},{1,1},{-1,1}};
		for(int itr_shift=1; itr_shift<=4; itr_shift++)
		{
			int* reqshift = shifts[itr_shift-1];

                        IplImage* shifted_structdis = cvCreateImage(cvGetSize(imdist_scaled), IPL_DEPTH_64F, 1);
			BwImage OrigArr(structdis);
			BwImage ShiftArr(shifted_structdis);
			for(int i=0; i<structdis->height; i++)
			{
				for(int j=0; j<structdis->width; j++)
				{
					if(i+reqshift[0]>=0 && i+reqshift[0]<structdis->height && j+reqshift[1]>=0 && j+reqshift[1]<structdis->width)
					{
						ShiftArr[i][j]=OrigArr[i+reqshift[0]][j+reqshift[1]];
					}
					else
					{
						ShiftArr[i][j]=0;
					}
				}
			}
		
			//computing correlation
			cvMul(structdis, shifted_structdis, shifted_structdis);
			AGGDfit(shifted_structdis, lsigma_best, rsigma_best, gamma_best);
		
			double constant = sqrt(tgamma(1/gamma_best))/sqrt(tgamma(3/gamma_best));
			double meanparam = (rsigma_best-lsigma_best)*(tgamma(2/gamma_best)/tgamma(1/gamma_best))*constant;
			
			featurevector.push_back(gamma_best);
			featurevector.push_back(meanparam);
			featurevector.push_back(pow(lsigma_best,2));
			featurevector.push_back(pow(rsigma_best,2));

                        cvReleaseImage(&shifted_structdis);	
		}

                cvReleaseImage(&mu);
		cvReleaseImage(&mu_sq);
		cvReleaseImage(&sigma);
		cvReleaseImage(&structdis);
		cvReleaseImage(&imdist_scaled);
	}
	
}
Example #19
0
static void
icvCalcEigenValsVecs( const float* cov, int cov_step, float* dst,
                      int dst_step, CvSize size, CvMat* buffer )
{
    static int y0 = 0;

    int j;
    float* buf = buffer->data.fl;
    cov_step /= sizeof(cov[0]);
    dst_step /= sizeof(dst[0]);

    for( ; size.height--; cov += cov_step, dst += dst_step )
    {
        for( j = 0; j < size.width; j++ )
        {
            double a = cov[j*3]*0.5;
            double b = cov[j*3+1];
            double c = cov[j*3+2]*0.5;

            buf[j + size.width] = (float)(a + c);
            buf[j] = (float)((a - c)*(a - c) + b*b);
        }

        buffer->rows = 1;
        cvPow( buffer, buffer, 0.5 );

        for( j = 0; j < size.width; j++ )
        {
            double a = cov[j*3];
            double b = cov[j*3+1];
            double c = cov[j*3+2];

            double l1 = buf[j + size.width] + buf[j];
            double l2 = buf[j + size.width] - buf[j];

            double x = b;
            double y = l1 - a;
            double e = fabs(x);

            if( e + fabs(y) < 1e-4 )
            {
                y = b;
                x = l1 - c;
                e = fabs(x);
                if( e + fabs(y) < 1e-4 )
                {
                    e = 1./(e + fabs(y) + FLT_EPSILON);
                    x *= e, y *= e;
                }
            }

            buf[j] = (float)(x*x + y*y + DBL_EPSILON);
            dst[6*j] = (float)l1;
            dst[6*j + 2] = (float)x;
            dst[6*j + 3] = (float)y;

            x = b;
            y = l2 - a;
            e = fabs(x);

            if( e + fabs(y) < 1e-4 )
            {
                y = b;
                x = l2 - c;
                e = fabs(x);
                if( e + fabs(y) < 1e-4 )
                {
                    e = 1./(e + fabs(y) + FLT_EPSILON);
                    x *= e, y *= e;
                }
            }

            buf[j + size.width] = (float)(x*x + y*y + DBL_EPSILON);
            dst[6*j + 1] = (float)l2;
            dst[6*j + 4] = (float)x;
            dst[6*j + 5] = (float)y;
        }

        buffer->rows = 2;
        cvPow( buffer, buffer, -0.5 );

        for( j = 0; j < size.width; j++ )
        {
            double t0 = buf[j]*dst[6*j + 2];
            double t1 = buf[j]*dst[6*j + 3];

            dst[6*j + 2] = (float)t0;
            dst[6*j + 3] = (float)t1;

            t0 = buf[j + size.width]*dst[6*j + 4];
            t1 = buf[j + size.width]*dst[6*j + 5];

            dst[6*j + 4] = (float)t0;
            dst[6*j + 5] = (float)t1;
        }

        y0++;
    }
}
Example #20
0
IplImage*
create_fourier_image(const IplImage *im)
{

  IplImage *realInput;
  IplImage *imaginaryInput;
  IplImage *complexInput;
  int dft_M, dft_N;
  CvMat *dft_A, tmp;
  IplImage *image_Re;
  IplImage *image_Im;

  realInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
  imaginaryInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
  complexInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

  cvScale(im, realInput, 1.0, 0.0);
  cvZero(imaginaryInput);
  cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

  dft_M = cvGetOptimalDFTSize( im->height - 1 );
  dft_N = cvGetOptimalDFTSize( im->width - 1 );

  dft_A = rb_cvCreateMat( dft_M, dft_N, CV_64FC2 );
  image_Re = rb_cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
  image_Im = rb_cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

  // copy A to dft_A and pad dft_A with zeros
  cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
  cvCopy( complexInput, &tmp, NULL );
  if( dft_A->cols > im->width )
  {
    cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
    cvZero( &tmp );
  }

  // no need to pad bottom part of dft_A with zeros because of
  // use nonzero_rows parameter in cvDFT() call below

  cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

  // Split Fourier in real and imaginary parts
  cvSplit( dft_A, image_Re, image_Im, 0, 0 );

  // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
  cvPow( image_Re, image_Re, 2.0);
  cvPow( image_Im, image_Im, 2.0);
  cvAdd( image_Re, image_Im, image_Re, NULL);
  cvPow( image_Re, image_Re, 0.5 );

  // Compute log(1 + Mag)
  cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
  cvLog( image_Re, image_Re ); // log(1 + Mag)

  // Rearrange the quadrants of Fourier image so that the origin is at
  // the image center
  cvShiftDFT( image_Re, image_Re );

  cvReleaseImage(&realInput);
  cvReleaseImage(&imaginaryInput);
  cvReleaseImage(&complexInput);
  cvReleaseImage(&image_Im);

  cvReleaseMat(&dft_A);

  return image_Re;

}
Example #21
0
CvScalar calcSSIM :: compare(IplImage *source1, IplImage *source2, Colorspace space)
{
  IplImage *src1, *src2;
  src1 = colorspaceConversion(source1, space);
  src2 = colorspaceConversion(source2, space);
  int x = source1->width, y = source1->height;
  // default settings
  const double C1 = (K1 * L) * (K1 * L); 
  const double C2 = (K2 * L) * (K2 * L);

  int nChan = src1->nChannels;
  int d = IPL_DEPTH_32F;
  CvSize size = cvSize(x, y);
  
  //creating FLOAT type images of src1 and src2 
  IplImage *img1 = cvCreateImage(size, d, nChan);
  IplImage *img2 = cvCreateImage(size, d, nChan);

  //Image squares
  IplImage *img1_sq = cvCreateImage(size, d, nChan);
  IplImage *img2_sq = cvCreateImage(size, d, nChan);
  IplImage *img1_img2 = cvCreateImage(size, d, nChan);
  
  cvConvert(src1, img1);
  cvConvert(src2, img2);
  
  //Squaring the images thus created
  cvPow(img1, img1_sq, 2);
  cvPow(img2, img2_sq, 2);
  cvMul(img1, img2, img1_img2, 1);

  IplImage *mu1 = cvCreateImage(size, d, nChan);
  IplImage *mu2 = cvCreateImage(size, d, nChan);
  IplImage *mu1_sq = cvCreateImage(size, d, nChan);
  IplImage *mu2_sq = cvCreateImage(size, d, nChan);
  IplImage *mu1_mu2 = cvCreateImage(size, d, nChan);

  IplImage *sigma1_sq = cvCreateImage(size, d, nChan);
  IplImage *sigma2_sq = cvCreateImage(size, d, nChan);
  IplImage *sigma12 = cvCreateImage(size, d, nChan);

  //PRELIMINARY COMPUTING
  //gaussian smoothing is performed
  cvSmooth(img1, mu1, CV_GAUSSIAN, gaussian_window, gaussian_window, gaussian_sigma);
  cvSmooth(img2, mu2, CV_GAUSSIAN, gaussian_window, gaussian_window, gaussian_sigma);

  //gettting mu, mu_sq, mu1_mu2
  cvPow(mu1, mu1_sq, 2);
  cvPow(mu2, mu2_sq, 2);
  cvMul(mu1, mu2, mu1_mu2, 1);

  //calculating sigma1, sigma2, sigma12
  cvSmooth(img1_sq, sigma1_sq, CV_GAUSSIAN, gaussian_window, gaussian_window, gaussian_sigma);
  cvSub(sigma1_sq, mu1_sq, sigma1_sq);

  cvSmooth(img2_sq, sigma2_sq, CV_GAUSSIAN, gaussian_window, gaussian_window, gaussian_sigma);
  cvSub(sigma2_sq, mu2_sq, sigma2_sq);

  cvSmooth(img1_img2, sigma12, CV_GAUSSIAN, gaussian_window, gaussian_window, gaussian_sigma);
  cvSub(sigma12, mu1_mu2, sigma12);
  
  //releasing some junk buffers
  cvReleaseImage(&img1);
  cvReleaseImage(&img2);
  cvReleaseImage(&img1_sq);
  cvReleaseImage(&img2_sq);
  cvReleaseImage(&img1_img2);
  cvReleaseImage(&mu1);
  cvReleaseImage(&mu2);
  
  // creating buffers for numerator and denominator 
  IplImage *numerator1 = cvCreateImage(size, d, nChan);
  IplImage *numerator2 = cvCreateImage(size, d, nChan);
  IplImage *numerator = cvCreateImage(size, d, nChan);
  IplImage *denominator1 = cvCreateImage(size, d, nChan);
  IplImage *denominator2 = cvCreateImage(size, d, nChan);
  IplImage *denominator = cvCreateImage(size, d, nChan);

  // FORMULA to calculate SSIM
  // (2*mu1_mu2 + C1)
  cvScale(mu1_mu2, numerator1, 2);
  cvAddS(numerator1, cvScalarAll(C1), numerator1);
  // (2*sigma12 + C2) 
  cvScale(sigma12, numerator2, 2);
  cvAddS(numerator2, cvScalarAll(C2), numerator2);
  // ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
  cvMul(numerator1, numerator2, numerator, 1);

  // (mu1_sq + mu2_sq + C1)
  cvAdd(mu1_sq, mu2_sq, denominator1);
  cvAddS(denominator1, cvScalarAll(C1), denominator1);
  // (sigma1_sq + sigma2_sq + C2) >>>
  cvAdd(sigma1_sq, sigma2_sq, denominator2);
  cvAddS(denominator2, cvScalarAll(C2),denominator2);
  // ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
  cvMul(denominator1, denominator2, denominator, 1);

  //Release some junk buffers 
  cvReleaseImage(&numerator1);
  cvReleaseImage(&denominator1);
  cvReleaseImage(&mu1_sq);
  cvReleaseImage(&mu2_sq);
  cvReleaseImage(&mu1_mu2);
  cvReleaseImage(&sigma1_sq);
  cvReleaseImage(&sigma2_sq);
  cvReleaseImage(&sigma12);

  //ssim map and cs_map
  ssim_map = cvCreateImage(size, d, nChan);
  cs_map = cvCreateImage(size, d, nChan);
  // SSIM_INDEX map 
  // ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
  cvDiv(numerator, denominator, ssim_map, 1);
  // Contrast Structure CS_index map
  // (2*sigma12 + C2)./(sigma1_sq + sigma2_sq + C2)
  cvDiv(numerator2, denominator2, cs_map, 1);

  // average is taken for both SSIM_map and CS_map 
  mssim_value = cvAvg(ssim_map);
  mean_cs_value = cvAvg(cs_map);

  //Release images
  cvReleaseImage(&numerator);
  cvReleaseImage(&denominator);
  cvReleaseImage(&numerator2);
  cvReleaseImage(&denominator2);
  cvReleaseImage(&src1);
  cvReleaseImage(&src2);

  return mssim_value;
}
Example #22
0
CvScalar calcQualityIndex :: compare(IplImage *source1, IplImage *source2, Colorspace space)
{
    IplImage *src1,* src2;
    src1 = colorspaceConversion(source1, space);
    src2 = colorspaceConversion(source2, space);

    int x = src1->width, y = src1->height;
    int nChan = src1->nChannels, d = IPL_DEPTH_32F;
    CvSize size = cvSize(x, y);

    //creating FLOAT type images of src1 and src2
    IplImage *img1 = cvCreateImage(size, d, nChan);
    IplImage *img2 = cvCreateImage(size, d, nChan);

    //Image squares
    IplImage *img1_sq = cvCreateImage(size, d, nChan);
    IplImage *img2_sq = cvCreateImage(size, d, nChan);
    IplImage *img1_img2 = cvCreateImage(size, d, nChan);

    cvConvert(src1, img1);
    cvConvert(src2, img2);

    //Squaring the images thus created
    cvPow(img1, img1_sq, 2);
    cvPow(img2, img2_sq, 2);
    cvMul(img1, img2, img1_img2, 1);

    IplImage *mu1 = cvCreateImage(size, d, nChan);
    IplImage *mu2 = cvCreateImage(size, d, nChan);
    IplImage *mu1_sq = cvCreateImage(size, d, nChan);
    IplImage *mu2_sq = cvCreateImage(size, d, nChan);
    IplImage *mu1_mu2 = cvCreateImage(size, d, nChan);

    IplImage *sigma1_sq = cvCreateImage(size, d, nChan);
    IplImage *sigma2_sq = cvCreateImage(size, d, nChan);
    IplImage *sigma12 = cvCreateImage(size, d, nChan);

    //PRELIMINARY COMPUTING

    //average smoothing is performed
    cvSmooth(img1, mu1, CV_BLUR, B, B);
    cvSmooth(img2, mu2, CV_BLUR, B, B);

    //gettting mu, mu_sq, mu1_mu2
    cvPow(mu1, mu1_sq, 2);
    cvPow(mu2, mu2_sq, 2);
    cvMul(mu1, mu2, mu1_mu2, 1);

    //calculating sigma1, sigma2, sigma12
    cvSmooth(img1_sq, sigma1_sq, CV_BLUR, B, B);
    cvSub(sigma1_sq, mu1_sq, sigma1_sq);

    cvSmooth(img2_sq, sigma2_sq, CV_BLUR, B, B);
    cvSub(sigma2_sq, mu2_sq, sigma2_sq);

    cvSmooth(img1_img2, sigma12, CV_BLUR, B, B);
    cvSub(sigma12, mu1_mu2, sigma12);

    //Releasing unused images
    cvReleaseImage(&img1);
    cvReleaseImage(&img2);
    cvReleaseImage(&img1_sq);
    cvReleaseImage(&img2_sq);
    cvReleaseImage(&img1_img2);

    // creating buffers for numerator and denominator
    IplImage *numerator1 = cvCreateImage(size, d, nChan);
    IplImage *numerator = cvCreateImage(size, d, nChan);
    IplImage *denominator1 = cvCreateImage(size, d, nChan);
    IplImage *denominator2 = cvCreateImage(size, d, nChan);
    IplImage *denominator = cvCreateImage(size, d, nChan);

    // FORMULA to calculate Image Quality Index

    // (4*sigma12)
    cvScale(sigma12, numerator1, 4);

    // (4*sigma12).*(mu1*mu2)
    cvMul(numerator1, mu1_mu2, numerator, 1);

    // (mu1_sq + mu2_sq)
    cvAdd(mu1_sq, mu2_sq, denominator1);

    // (sigma1_sq + sigma2_sq)
    cvAdd(sigma1_sq, sigma2_sq, denominator2);

    //Release images
    cvReleaseImage(&mu1);
    cvReleaseImage(&mu2);
    cvReleaseImage(&mu1_sq);
    cvReleaseImage(&mu2_sq);
    cvReleaseImage(&mu1_mu2);
    cvReleaseImage(&sigma1_sq);
    cvReleaseImage(&sigma2_sq);
    cvReleaseImage(&sigma12);
    cvReleaseImage(&numerator1);

    // ((mu1_sq + mu2_sq).*(sigma1_sq + sigma2_sq))
    cvMul(denominator1, denominator2, denominator, 1);

    //image_quality map
    image_quality_map = cvCreateImage(size, d, nChan);
    float *num, *den, *res;
    num = (float*)(numerator->imageData);
    den = (float*)(denominator->imageData);
    res = (float*)(image_quality_map->imageData);

    // dividing by hand
    // ((4*sigma12).*(mu1_mu2))./((mu1_sq + mu2_sq).*(sigma1_sq + sigma2_sq))
    for (int i=0; i<(x*y*nChan); i++) {
        if (den[i] == 0)
        {
            num[i] = (float)(1.0);
            den[i] = (float)(1.0);
        }
        res[i] = 1.0*(num[i]/den[i]);
    }

    // cvDiv doesnt work
    //cvDiv(numerator, denominator, image_quality_map, 1);

    // image_quality_map created in image_quality_map
    // average is taken
    image_quality_value = cvAvg(image_quality_map);

    //Release images
    cvReleaseImage(&numerator);
    cvReleaseImage(&denominator);
    cvReleaseImage(&denominator1);
    cvReleaseImage(&denominator2);
    cvReleaseImage(&src1);
    cvReleaseImage(&src2);

    return image_quality_value;
}
Example #23
0
static void
icvTrueDistTrans( const CvMat* src, CvMat* dst )
{
    cv::Ptr<CvMat> buffer = 0;

    int i, m, n;
    int sstep, dstep;
    const float inf = 1e6f;
    int thread_count = cvGetNumThreads();
    int pass1_sz, pass2_sz;

    if( !CV_ARE_SIZES_EQ( src, dst ))
        CV_Error( CV_StsUnmatchedSizes, "" );

    if( CV_MAT_TYPE(src->type) != CV_8UC1 ||
        CV_MAT_TYPE(dst->type) != CV_32FC1 )
        CV_Error( CV_StsUnsupportedFormat,
        "The input image must have 8uC1 type and the output one must have 32fC1 type" );

    m = src->rows;
    n = src->cols;

    // (see stage 1 below):
    // sqr_tab: 2*m, sat_tab: 3*m + 1, d: m*thread_count,
    pass1_sz = src->rows*(5 + thread_count) + 1;
    // (see stage 2):
    // sqr_tab & inv_tab: n each; f & v: n*thread_count each; z: (n+1)*thread_count
    pass2_sz = src->cols*(2 + thread_count*3) + thread_count;
    buffer = cvCreateMat( 1, MAX(pass1_sz, pass2_sz), CV_32FC1 );

    sstep = src->step;
    dstep = dst->step / sizeof(float);

    // stage 1: compute 1d distance transform of each column
    float* sqr_tab = buffer->data.fl;
    int* sat_tab = (int*)(sqr_tab + m*2);
    const int shift = m*2;

    for( i = 0; i < m; i++ )
        sqr_tab[i] = (float)(i*i);
    for( i = m; i < m*2; i++ )
        sqr_tab[i] = inf;
    for( i = 0; i < shift; i++ )
        sat_tab[i] = 0;
    for( ; i <= m*3; i++ )
        sat_tab[i] = i - shift;

#ifdef _OPENMP
    #pragma omp parallel for num_threads(thread_count)
#endif
    for( i = 0; i < n; i++ )
    {
        const uchar* sptr = src->data.ptr + i + (m-1)*sstep;
        float* dptr = dst->data.fl + i;
        int* d = (int*)(sat_tab + m*3+1+m*cvGetThreadNum());
        int j, dist = m-1;

        for( j = m-1; j >= 0; j--, sptr -= sstep )
        {
            dist = (dist + 1) & (sptr[0] == 0 ? 0 : -1);
            d[j] = dist;
        }

        dist = m-1;
        for( j = 0; j < m; j++, dptr += dstep )
        {
            dist = dist + 1 - sat_tab[dist + 1 - d[j] + shift];
            d[j] = dist;
            dptr[0] = sqr_tab[dist];
        }
    }

    // stage 2: compute modified distance transform for each row
    float* inv_tab = buffer->data.fl;
    sqr_tab = inv_tab + n;

    inv_tab[0] = sqr_tab[0] = 0.f;
    for( i = 1; i < n; i++ )
    {
        inv_tab[i] = (float)(0.5/i);
        sqr_tab[i] = (float)(i*i);
    }

#ifdef _OPENMP
    #pragma omp parallel for num_threads(thread_count) schedule(dynamic)
#endif
    for( i = 0; i < m; i++ )
    {
        float* d = (float*)(dst->data.ptr + i*dst->step);
        float* f = sqr_tab + n + (n*3+1)*cvGetThreadNum();
        float* z = f + n;
        int* v = (int*)(z + n + 1);
        int p, q, k;

        v[0] = 0;
        z[0] = -inf;
        z[1] = inf;
        f[0] = d[0];

        for( q = 1, k = 0; q < n; q++ )
        {
            float fq = d[q];
            f[q] = fq;

            for(;;k--)
            {
                p = v[k];
                float s = (fq + sqr_tab[q] - d[p] - sqr_tab[p])*inv_tab[q - p];
                if( s > z[k] )
                {
                    k++;
                    v[k] = q;
                    z[k] = s;
                    z[k+1] = inf;
                    break;
                }
            }
        }

        for( q = 0, k = 0; q < n; q++ )
        {
            while( z[k+1] < q )
                k++;
            p = v[k];
            d[q] = sqr_tab[abs(q - p)] + f[p];
        }
    }

    cvPow( dst, dst, 0.5 );
}
Example #24
0
/*
 * Parameters : complete path to the two image to be compared
 * The file format must be supported by your OpenCV build
 */
int main(int argc, char** argv)
{
	if(argc!=3)
		return -1;
	
	// default settings
	double C1 = 6.5025, C2 = 58.5225;

	IplImage
		*img1=NULL, *img2=NULL, *img1_img2=NULL,
		*img1_temp=NULL, *img2_temp=NULL,
		*img1_sq=NULL, *img2_sq=NULL,
		*mu1=NULL, *mu2=NULL,
		*mu1_sq=NULL, *mu2_sq=NULL, *mu1_mu2=NULL,
		*sigma1_sq=NULL, *sigma2_sq=NULL, *sigma12=NULL,
		*ssim_map=NULL, *temp1=NULL, *temp2=NULL, *temp3=NULL;
	

	/***************************** INITS **********************************/
	img1_temp = cvLoadImage(argv[1]);
	img2_temp = cvLoadImage(argv[2]);

	if(img1_temp==NULL || img2_temp==NULL)
		return -1;

	int x=img1_temp->width, y=img1_temp->height;
	int nChan=img1_temp->nChannels, d=IPL_DEPTH_32F;
	CvSize size = cvSize(x, y);

	img1 = cvCreateImage( size, d, nChan);
	img2 = cvCreateImage( size, d, nChan);

	cvConvert(img1_temp, img1);
	cvConvert(img2_temp, img2);
	cvReleaseImage(&img1_temp);
	cvReleaseImage(&img2_temp);

	
	img1_sq = cvCreateImage( size, d, nChan);
	img2_sq = cvCreateImage( size, d, nChan);
	img1_img2 = cvCreateImage( size, d, nChan);
	
	cvPow( img1, img1_sq, 2 );
	cvPow( img2, img2_sq, 2 );
	cvMul( img1, img2, img1_img2, 1 );

	mu1 = cvCreateImage( size, d, nChan);
	mu2 = cvCreateImage( size, d, nChan);

	mu1_sq = cvCreateImage( size, d, nChan);
	mu2_sq = cvCreateImage( size, d, nChan);
	mu1_mu2 = cvCreateImage( size, d, nChan);
	

	sigma1_sq = cvCreateImage( size, d, nChan);
	sigma2_sq = cvCreateImage( size, d, nChan);
	sigma12 = cvCreateImage( size, d, nChan);

	temp1 = cvCreateImage( size, d, nChan);
	temp2 = cvCreateImage( size, d, nChan);
	temp3 = cvCreateImage( size, d, nChan);

	ssim_map = cvCreateImage( size, d, nChan);
	/*************************** END INITS **********************************/


	//////////////////////////////////////////////////////////////////////////
	// PRELIMINARY COMPUTING
	cvSmooth( img1, mu1, CV_GAUSSIAN, 11, 11, 1.5 );
	cvSmooth( img2, mu2, CV_GAUSSIAN, 11, 11, 1.5 );
	
	cvPow( mu1, mu1_sq, 2 );
	cvPow( mu2, mu2_sq, 2 );
	cvMul( mu1, mu2, mu1_mu2, 1 );


	cvSmooth( img1_sq, sigma1_sq, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq );
	
	cvSmooth( img2_sq, sigma2_sq, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq );

	cvSmooth( img1_img2, sigma12, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma12, 1, mu1_mu2, -1, 0, sigma12 );
	

	//////////////////////////////////////////////////////////////////////////
	// FORMULA

	// (2*mu1_mu2 + C1)
	cvScale( mu1_mu2, temp1, 2 );
	cvAddS( temp1, cvScalarAll(C1), temp1 );

	// (2*sigma12 + C2)
	cvScale( sigma12, temp2, 2 );
	cvAddS( temp2, cvScalarAll(C2), temp2 );

	// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
	cvMul( temp1, temp2, temp3, 1 );

	// (mu1_sq + mu2_sq + C1)
	cvAdd( mu1_sq, mu2_sq, temp1 );
	cvAddS( temp1, cvScalarAll(C1), temp1 );

	// (sigma1_sq + sigma2_sq + C2)
	cvAdd( sigma1_sq, sigma2_sq, temp2 );
	cvAddS( temp2, cvScalarAll(C2), temp2 );

	// ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
	cvMul( temp1, temp2, temp1, 1 );

	// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
	cvDiv( temp3, temp1, ssim_map, 1 );


	CvScalar index_scalar = cvAvg( ssim_map );
	
	// through observation, there is approximately 
	// 1% error max with the original matlab program

	cout << "(R, G & B SSIM index)" << endl ;
	cout << index_scalar.val[2] * 100 << "%" << endl ;
	cout << index_scalar.val[1] * 100 << "%" << endl ;
	cout << index_scalar.val[0] * 100 << "%" << endl ;

	// if you use this code within a program
	// don't forget to release the IplImages
	return 0;
}
Example #25
0
int main(int argc, char ** argv)
{
int height,width,step,channels;
uchar* data;
uchar* data1;
int i,j,k;
float s;

CvMat *dft_A;
CvMat *dft_B;
CvMat *dft_C;
IplImage* im;
IplImage* im1;

IplImage* image_ReB;
IplImage* image_ImB;

IplImage* image_ReC;
IplImage* image_ImC;
IplImage* complex_ImC;

IplImage* image_ReDen;
IplImage* image_ImDen;

FILE *fp;
fp = fopen("test.txt","w+");

int dft_M,dft_N;
int dft_M1,dft_N1;
CvMat* cvShowDFT();
void cvShowInvDFT();

im1 = cvLoadImage( "kutty-1.jpg",1 );
cvNamedWindow("original-color", 0);
cvShowImage("original-color", im1);
im = cvLoadImage( "kutty-1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !im )
return -1;

cvNamedWindow("original-gray", 0);
cvShowImage("original-gray", im);
// Create blur kernel (non-blind)
//float vals[]={.000625,.000625,.000625,.003125,.003125,.003125,.000625,.000625,.000625};
//float vals[]={-0.167,0.333,0.167,-0.167,.333,.167,-0.167,.333,.167};

float vals[]={.055,.055,.055,.222,.222,.222,.055,.055,.055};
CvMat kernel = cvMat(3, // number of rows
3, // number of columns
CV_32FC1, // matrix data type
vals);
IplImage* k_image_hdr;
IplImage* k_image;

k_image_hdr = cvCreateImageHeader(cvSize(3,3),IPL_DEPTH_64F,2);
k_image = cvCreateImage(cvSize(3,3),IPL_DEPTH_64F,1);
k_image = cvGetImage(&kernel,k_image_hdr);

/*IplImage* k_image;
k_image = cvLoadImage( "kernel4.bmp",0 );*/
cvNamedWindow("blur kernel", 0);

height = k_image->height;
width = k_image->width;
step = k_image->widthStep;

channels = k_image->nChannels;
//data1 = (float *)(k_image->imageData);
data1 = (uchar *)(k_image->imageData);

cvShowImage("blur kernel", k_image);

dft_M = cvGetOptimalDFTSize( im->height - 1 );
dft_N = cvGetOptimalDFTSize( im->width - 1 );

//dft_M1 = cvGetOptimalDFTSize( im->height+99 - 1 );
//dft_N1 = cvGetOptimalDFTSize( im->width+99 - 1 );

dft_M1 = cvGetOptimalDFTSize( im->height+3 - 1 );
dft_N1 = cvGetOptimalDFTSize( im->width+3 - 1 );

// Perform DFT of original image
dft_A = cvShowDFT(im, dft_M1, dft_N1,"original");
//Perform inverse (check & comment out) - Commented as it overwrites dft_A
//cvShowInvDFT(im,dft_A,dft_M1,dft_N1,fp, "original");

// Perform DFT of kernel
dft_B = cvShowDFT(k_image,dft_M1,dft_N1,"kernel");
//Perform inverse of kernel (check & comment out) - commented as it overwrites dft_B
//cvShowInvDFT(k_image,dft_B,dft_M1,dft_N1,fp, "kernel");

// Multiply numerator with complex conjugate
dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );

printf("%d %d %d %d\n",dft_M,dft_N,dft_M1,dft_N1);

// Multiply DFT(blurred image) * complex conjugate of blur kernel
cvMulSpectrums(dft_A,dft_B,dft_C,CV_DXT_MUL_CONJ);

// Split Fourier in real and imaginary parts
image_ReC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
complex_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 2);

printf("%d %d %d %d\n",dft_M,dft_N,dft_M1,dft_N1);

//cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );
cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );

// Compute A^2 + B^2 of denominator or blur kernel
image_ReB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);

// Split Real and imaginary parts
cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
cvPow( image_ReB, image_ReB, 2.0);
cvPow( image_ImB, image_ImB, 2.0);
cvAdd(image_ReB, image_ImB, image_ReB,0);

//Divide Numerator/A^2 + B^2
cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
cvDiv(image_ImC, image_ReB, image_ImC, 1.0);

// Merge Real and complex parts
cvMerge(image_ReC, image_ImC, NULL, NULL, complex_ImC);

// Perform Inverse
cvShowInvDFT(im, complex_ImC,dft_M1,dft_N1,fp,"deblur");
cvWaitKey(-1);
return 0;
}
Example #26
0
int main(int argc, char ** argv)
{
    const char* filename = argc >=2 ? argv[1] : "lena.jpg";
    IplImage * im;

    IplImage * realInput;
    IplImage * imaginaryInput;
    IplImage * complexInput;
    int dft_M, dft_N;
    CvMat* dft_A, tmp;
    IplImage * image_Re;
    IplImage * image_Im;
    double m, M;

    im = cvLoadImage( filename, CV_LOAD_IMAGE_GRAYSCALE );
    if( !im )
        return -1;

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    cvScale(im, realInput, 1.0, 0.0);
    cvZero(imaginaryInput);
    cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

    dft_M = cvGetOptimalDFTSize( im->height - 1 );
    dft_N = cvGetOptimalDFTSize( im->width - 1 );

    dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    // copy A to dft_A and pad dft_A with zeros
    cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
    cvCopy( complexInput, &tmp, NULL );
    if( dft_A->cols > im->width )
    {
        cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
        cvZero( &tmp );
    }

    // no need to pad bottom part of dft_A with zeros because of
    // use nonzero_rows parameter in cvDFT() call below

    cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

    cvNamedWindow("win", 0);
    cvNamedWindow("magnitude", 0);
    cvShowImage("win", im);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)


    // Rearrange the quadrants of Fourier image so that the origin is at
    // the image center
    cvShiftDFT( image_Re, image_Re );

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    cvShowImage("magnitude", image_Re);

    cvWaitKey(-1);
    return 0;
}
void CMagicCabsineUniversalProperty_feature_texture_spectral_DFT::ComputeProperty()
{

	IplImage * im;//输入图像必须是灰度图像
	
	
	IplImage * realInput;
	IplImage * imaginaryInput;
	IplImage * complexInput;
	int dft_M, dft_N;
	CvMat* dft_A, tmp;
	IplImage * image_Im;
	IplImage * image_Re;

	double m, M;
	
	//灰度图转化
	IplImage *srcimg_cvt = cvCreateImage(cvGetSize(srcImage),IPL_DEPTH_8U,3);//源图像格式化图像
	im = cvCreateImage(cvGetSize(srcImage),IPL_DEPTH_8U,1);
	cvConvertScale(srcImage,srcimg_cvt);//源图像格式化 转换成 IPL_DEPTH_32F 3通道
	cvCvtColor(srcImage,im,CV_BGR2GRAY);//源图像转换成灰度图
	cvReleaseImage(&srcimg_cvt);

	realInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 1);//灰度图像
	imaginaryInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 1);//灰度图像
	complexInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 2);

	cvScale(im, realInput, 1.0, 0.0);//从8u转换成32f 提高精度
	cvZero(imaginaryInput);
	//将单通道图像合并成多通道图像(将realInput和imageinaryImput合并到双通道图像complexInput)
	cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

	/*dft_M = cvGetOptimalDFTSize( im->height - 1 );
	dft_N = cvGetOptimalDFTSize( im->width - 1 );*/

	dft_M = im->height;
	dft_N = im->width;
	dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
	image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);//DFT最佳尺寸的图像
	image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

	// copy A to dft_A and pad dft_A with zeros
	cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
	cvCopy( complexInput, &tmp, NULL );
	if( dft_A->cols > im->width )
	{
		cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
		cvZero( &tmp );
	}

	// no need to pad bottom part of dft_A with zeros because of
	// use nonzero_rows parameter in cvDFT() call below

	cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );
	

	// Split Fourier in real and imaginary parts
	cvSplit( dft_A, image_Re, image_Im, 0, 0 );
	
	// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
	cvPow( image_Re, image_Re, 2.0);
	cvPow( image_Im, image_Im, 2.0);
	cvAdd( image_Re, image_Im, image_Re, NULL);
	cvPow( image_Re, image_Re, 0.5 );

	// Compute log(1 + Mag)
	cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
	cvLog( image_Re, image_Re ); // log(1 + Mag)
		
	// Rearrange the quadrants of Fourier image so that the origin is at
	// the image center
	cvShiftDFT( image_Re, image_Re );
	cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
	cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
	//cvShowImage("magnitude", image_Re);

	//正规化傅里叶变换图像使其可以被存储
	image_Re_save = cvCreateImage(cvSize(dft_N,dft_M),IPL_DEPTH_64F,1);//注意本来应该在初始化中创建,但是由于大小参数无法估计故在此创建需要在析构函数中release
	cvNormalize(image_Re,image_Re_save,255,0,CV_MINMAX,NULL);



	cvReleaseImage(&im);
	cvReleaseImage(&realInput);
	cvReleaseImage(&imaginaryInput);
	cvReleaseImage(&complexInput);
	cvReleaseImage(&image_Re);
	cvReleaseImage(&image_Im);

	////对傅里叶进行正规化使用L2(欧几里得)范式获取正规化傅里叶系数NFP,以计算3个傅里叶系数,未加绝对值
	//cvNormalize(image_Re,image_Re,1,0,CV_L2,NULL);

	////test
	//cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
	////对傅里叶NFP取绝对值
	//cvAbs(image_Re,image_Re);

	////test
	//cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);



	////计算傅里叶系数熵
	//double h = FT_Entropy(image_Re, dft_N, dft_M);

	//double e = FT_Energy(image_Re, dft_N, dft_M);

	//double i = FT_Inertia(image_Re, dft_N, dft_M);


	////存储特征
	//featurefilename+=".txt";
	//
	//FILE* file = fopen(featurefilename.c_str(),"w");
	//fprintf_s(file,"%.6f\n",h);
	//fprintf_s(file,"%.6f\n",e);
	//fprintf_s(file,"%.6f\n",i);
	//fclose(file);
	//return 0;
}
Example #28
0
// threshold trackbar callback
void on_trackbar( int dummy )
{
    static const uchar colors[][3] = 
    {
        {0,0,0},
        {255,0,0},
        {255,128,0},
        {255,255,0},
        {0,255,0},
        {0,128,255},
        {0,255,255},
        {0,0,255},
        {255,0,255}
    };
    
    int msize = mask_size;
    int _dist_type = build_voronoi ? CV_DIST_L2 : dist_type;

    cvThreshold( gray, edge, (float)edge_thresh, (float)edge_thresh, CV_THRESH_BINARY );

    if( build_voronoi )
        msize = CV_DIST_MASK_5;

    if( _dist_type == CV_DIST_L1 )
    {
        cvDistTransform( edge, edge, _dist_type, msize, NULL, NULL );
        cvConvert( edge, dist );
    }
    else
        cvDistTransform( edge, dist, _dist_type, msize, NULL, build_voronoi ? labels : NULL );

    if( !build_voronoi )
    {
        // begin "painting" the distance transform result
        cvConvertScale( dist, dist, 5000.0, 0 );
        cvPow( dist, dist, 0.5 );
    
        cvConvertScale( dist, dist32s, 1.0, 0.5 );
        cvAndS( dist32s, cvScalarAll(255), dist32s, 0 );
        cvConvertScale( dist32s, dist8u1, 1, 0 );
        cvConvertScale( dist32s, dist32s, -1, 0 );
        cvAddS( dist32s, cvScalarAll(255), dist32s, 0 );
        cvConvertScale( dist32s, dist8u2, 1, 0 );
        cvMerge( dist8u1, dist8u2, dist8u2, 0, dist8u );
        // end "painting" the distance transform result
    }
    else
    {
        int i, j;
        for( i = 0; i < labels->height; i++ )
        {
            int* ll = (int*)(labels->imageData + i*labels->widthStep);
            float* dd = (float*)(dist->imageData + i*dist->widthStep);
            uchar* d = (uchar*)(dist8u->imageData + i*dist8u->widthStep);
            for( j = 0; j < labels->width; j++ )
            {
                int idx = ll[j] == 0 || dd[j] == 0 ? 0 : (ll[j]-1)%8 + 1;
                int b = cvRound(colors[idx][0]);
                int g = cvRound(colors[idx][1]);
                int r = cvRound(colors[idx][2]);
                d[j*3] = (uchar)b;
                d[j*3+1] = (uchar)g;
                d[j*3+2] = (uchar)r;
            }
        }
    }
    
    cvShowImage( wndname, dist8u );
}
Example #29
0
/* calculates length of a curve (e.g. contour perimeter) */
CV_IMPL  double
cvArcLength( const void *array, CvSlice slice, int is_closed )
{
    double perimeter = 0;

    int i, j = 0, count;
    const int N = 16;
    float buf[N];
    CvMat buffer = cvMat( 1, N, CV_32F, buf );
    CvSeqReader reader;
    CvContour contour_header;
    CvSeq* contour = 0;
    CvSeqBlock block;

    if( CV_IS_SEQ( array ))
    {
        contour = (CvSeq*)array;
        if( !CV_IS_SEQ_POLYLINE( contour ))
            CV_Error( CV_StsBadArg, "Unsupported sequence type" );
        if( is_closed < 0 )
            is_closed = CV_IS_SEQ_CLOSED( contour );
    }
    else
    {
        is_closed = is_closed > 0;
        contour = cvPointSeqFromMat(
                                    CV_SEQ_KIND_CURVE | (is_closed ? CV_SEQ_FLAG_CLOSED : 0),
                                    array, &contour_header, &block );
    }

    if( contour->total > 1 )
    {
        int is_float = CV_SEQ_ELTYPE( contour ) == CV_32FC2;

        cvStartReadSeq( contour, &reader, 0 );
        cvSetSeqReaderPos( &reader, slice.start_index );
        count = cvSliceLength( slice, contour );

        count -= !is_closed && count == contour->total;

        // scroll the reader by 1 point
        reader.prev_elem = reader.ptr;
        CV_NEXT_SEQ_ELEM( sizeof(CvPoint), reader );

        for( i = 0; i < count; i++ )
        {
            float dx, dy;

            if( !is_float )
            {
                CvPoint* pt = (CvPoint*)reader.ptr;
                CvPoint* prev_pt = (CvPoint*)reader.prev_elem;

                dx = (float)pt->x - (float)prev_pt->x;
                dy = (float)pt->y - (float)prev_pt->y;
            }
            else
            {
                CvPoint2D32f* pt = (CvPoint2D32f*)reader.ptr;
                CvPoint2D32f* prev_pt = (CvPoint2D32f*)reader.prev_elem;

                dx = pt->x - prev_pt->x;
                dy = pt->y - prev_pt->y;
            }

            reader.prev_elem = reader.ptr;
            CV_NEXT_SEQ_ELEM( contour->elem_size, reader );
            // Bugfix by Axel at rubico.com 2010-03-22, affects closed slices only
            // wraparound not handled by CV_NEXT_SEQ_ELEM
            if( is_closed && i == count - 2 )
                cvSetSeqReaderPos( &reader, slice.start_index );

            buffer.data.fl[j] = dx * dx + dy * dy;
            if( ++j == N || i == count - 1 )
            {
                buffer.cols = j;
                cvPow( &buffer, &buffer, 0.5 );
                for( ; j > 0; j-- )
                    perimeter += buffer.data.fl[j-1];
            }
        }
    }

    return perimeter;
}
Example #30
0
ReturnType FrequencyFilter::onExecute()
{
    // 영상을 Inport로부터 취득
    opros_any *pData = ImageIn.pop();
    RawImage result;

    if(pData != NULL) {

        // 포트로 부터 이미지 취득
        RawImage Image = ImageIn.getContent(*pData);
        RawImageData *RawImage = Image.getImage();

        // 현재영상의 크기를 취득
        m_in_width = RawImage->getWidth();
        m_in_height = RawImage->getHeight();

        // 받은 영상의 2의 승수임을 확인
        if(!Check2Square(m_in_width) || !Check2Square(m_in_height)) {

            std::cout << "This image is not a multifplier of 2" << std::endl;

            return OPROS_BAD_INPUT_PARAMETER;

        }

        // 받은 영상의 가로 세로 사이즈가 같은지 확인
        if(m_in_width != m_in_height) {

            std::cout << "Size(width and height) of Image is not equal" << std::endl;

            return OPROS_BAD_INPUT_PARAMETER;

        }

        // 원본영상의 이미지영역 확보
        if(m_orig_img == NULL) {
            m_orig_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
        }
        // 바이너리 영상영역의 확보
        if(m_gray_img == NULL) {
            m_gray_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
        }
        // 수행결과 영상영역의 확보
        if(m_result_img == NULL) {
            m_result_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
        }
        // 출력결과 영상영역의 확보
        if(m_final_img == NULL) {
            m_final_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
        }
        // Re영역 영상영역의 확보(실수)
        if(m_image_Re == NULL) {
            m_image_Re = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 1);
        }
        // Im영역 영상영역의 확보(허수)
        if(m_image_Im == NULL) {
            m_image_Im = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 1);
        }
        // 주파수 변환 영상영역의 확보.
        if(m_pDFT_A == NULL) {
            m_pDFT_A = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 2);
        }

        // 영상에 대한 정보를 확보!memcpy
        memcpy(m_orig_img->imageData, RawImage->getData(), RawImage->getSize());

        // 둘다 none 이 아니거나, 둘중에 하나가 none 일경우
        if((m_low_Pass_Filtering != "none" || m_high_Pass_Filtering != "none") &&
                (m_low_Pass_Filtering == "none" || m_high_Pass_Filtering == "none")) {

            // 입력 받은 영상을 이진화 시킴
            cvCvtColor( m_orig_img, m_gray_img, CV_BGR2GRAY );

            // 주파수영역으로의 작업을 위한 깊이 정보 변경
            cvConvertScale(m_gray_img, m_image_Re);		// 8U -> 32F
            // m_image_Im의 초기화
            cvZero(m_image_Im);

            // shift center
            // 입력영상을 실수부로 변환한 이미지가 홀수인 화소의 부호를 변경하여
            // 푸리에변환에 의한 주파수 영역의 원점을 중심으로 이동시키기 위함
            ChangePosition(m_image_Re);

            cvMerge(m_image_Re, m_image_Im, NULL, NULL, m_pDFT_A);

            // m_pDFT_A에 대해 푸리에 변환을 수행
            cvDFT(m_pDFT_A, m_pDFT_A, CV_DXT_FORWARD);

            // 이상적 저주파 통과 필터링 실행
            if(m_low_Pass_Filtering == "ideal" && m_high_Pass_Filtering == "none") {

                IdealLowPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 버터워스 저주파 통과 필터링 실행
            else if(m_low_Pass_Filtering == "butterworth" && m_high_Pass_Filtering == "none") {

                ButterworthLowPassFiltering(m_pDFT_A, m_cutoff_Frequency, 2);

            }
            // 가우시안 저주파 통과 필터링 실행
            else if(m_low_Pass_Filtering == "gaussian" && m_high_Pass_Filtering == "none") {

                GaussianLowPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 이상적 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "ideal" && m_low_Pass_Filtering == "none") {

                IdealHighPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 버터워스 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "butterworth" && m_low_Pass_Filtering == "none") {

                ButterworthHighPassFiltering(m_pDFT_A, m_cutoff_Frequency, 2);

            }
            // 가우시안 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "gaussian" && m_low_Pass_Filtering == "none") {

                GaussianHighpassFiltering(m_pDFT_A, m_cutoff_Frequency);


            }
            else {
                //none
            }

            // 퓨리에 역변환 실행
            cvDFT(m_pDFT_A, m_pDFT_A, CV_DXT_INV_SCALE);

            // 다중 채널의 행렬을 단일 채널 행렬로 분할(Re, Im으로)
            cvSplit(m_pDFT_A, m_image_Re, m_image_Im, NULL, NULL);

            // 저주파일때만 실행
            if((m_low_Pass_Filtering == "ideal" || m_low_Pass_Filtering == "butterworth" || m_low_Pass_Filtering == "gaussian")
                    && m_high_Pass_Filtering == "none") {
                ChangePosition(m_image_Re);
                cvScale(m_image_Re, m_result_img, 1);
            }

            // 고주파일때만 실행
            if((m_high_Pass_Filtering == "ideal" || m_high_Pass_Filtering == "butterworth" || m_high_Pass_Filtering == "gaussian")
                    && m_low_Pass_Filtering == "none") {

                // 스펙트럼의 진폭을 계산 Mag=sqrt(Re^2 + Im^2)
                cvPow(m_image_Re, m_image_Re, 2.0);
                cvPow(m_image_Im, m_image_Im, 2.0);
                cvAdd(m_image_Re, m_image_Re, m_image_Re);
                cvPow(m_image_Re, m_image_Re, 0.5);

                // 진폭 화상의 픽셀치가 min과 max사이에 분포하로독 스케일링
                double min_val, max_val;
                cvMinMaxLoc(m_image_Re, &min_val, &max_val, NULL, NULL);
                cvScale(m_image_Re, m_result_img, 255.0/max_val);
            }

            // 1채널 영상의 3채널 영상으로의 변환
            cvMerge(m_result_img, m_result_img, m_result_img, NULL, m_final_img);

            // 아웃풋 push
            // RawImage의 이미지 포인터 변수 할당
            RawImageData *pimage = result.getImage();

            // 입력된 이미지 사이즈 및 채널수로 로 재 설정
            pimage->resize(m_final_img->width, m_final_img->height, m_final_img->nChannels);

            // 영상의 총 크기(pixels수) 취득
            int size = m_final_img->width * m_final_img->height * m_final_img->nChannels;

            // 영상 데이터로부터 영상값만을 할당하기 위한 변수
            unsigned char *ptrdata = pimage->getData();

            // 현재 프레임 영상을 사이즈 만큼 memcpy
            memcpy(ptrdata, m_final_img->imageData, size);

            // 포트아웃
            opros_any mdata = result;
            ImageOut.push(result);//전달

            delete pData;

        } else {

            // 아웃풋 push
            // 아웃풋 push
            // RawImage의 이미지 포인터 변수 할당
            RawImageData *pimage = result.getImage();

            // 입력된 이미지 사이즈 및 채널수로 로 재 설정
            pimage->resize(m_orig_img->width, m_orig_img->height, m_orig_img->nChannels);

            // 영상의 총 크기(pixels수) 취득
            int size = m_orig_img->width * m_orig_img->height * m_orig_img->nChannels;

            // 영상 데이터로부터 영상값만을 할당하기 위한 변수
            unsigned char *ptrdata = pimage->getData();

            // 현재 프레임 영상을 사이즈 만큼 memcpy
            memcpy(ptrdata, m_orig_img->imageData, size);

            // 포트아웃
            opros_any mdata = result;
            ImageOut.push(result);//전달

            delete pData;

        }

    }

    return OPROS_SUCCESS;
}