IplImage* cvTestSeqQueryFrame(CvTestSeq* pTestSeq)
{
    CvTestSeq_*     pTS = (CvTestSeq_*)pTestSeq;
    CvTestSeqElem*  p = pTS->pElemList;
    IplImage*       pImg = pTS->pImg;
    IplImage*       pImgAdd = cvCloneImage(pTS->pImg);
    IplImage*       pImgAddG = cvCreateImage(cvSize(pImgAdd->width,pImgAdd->height),IPL_DEPTH_8U,1);
    IplImage*       pImgMask = pTS->pImgMask;
    IplImage*       pImgMaskAdd = cvCloneImage(pTS->pImgMask);
    CvMat*          pT = cvCreateMat(2,3,CV_32F);

    if(pTS->CurFrame >= pTS->FrameNum) return NULL;
    cvZero(pImg);
    cvZero(pImgMask);

    for(p=pTS->pElemList; p; p=p->next)
    {
        int             DirectCopy = FALSE;
        int             frame = pTS->CurFrame - p->FrameBegin;
        //float           t = p->FrameNum>1?((float)frame/(p->FrameNum-1)):0;
        CvTSTrans*      pTrans = p->pTrans + frame%p->TransNum;

        assert(pTrans);

        if( p->FrameNum > 0 && (frame < 0 || frame >= p->FrameNum) )
        {   /* Current frame is out of range: */
            //if(p->pAVI)cvReleaseCapture(&p->pAVI);
            p->pAVI = NULL;
            continue;
        }

        cvZero(pImgAdd);
        cvZero(pImgAddG);
        cvZero(pImgMaskAdd);

        if(p->noise_type == CV_NOISE_NONE)
        {   /* For not noise:  */
            /* Get next frame: */
            icvTestSeqQureyFrameElem(p, frame);
            if(p->pImg == NULL) continue;

#if 1 /* transform using T filed in Trans */
            {   /* Calculate transform matrix: */
                float   W = (float)(pImgAdd->width-1);
                float   H = (float)(pImgAdd->height-1);
                float   W0 = (float)(p->pImg->width-1);
                float   H0 = (float)(p->pImg->height-1);
                cvZero(pT);
                {   /* Calcualte inverse matrix: */
                    CvMat   mat = cvMat(2,3,CV_32F, pTrans->T);
                    mat.width--;
                    pT->width--;
                    cvInvert(&mat, pT);
                    pT->width++;
                }

                CV_MAT_ELEM(pT[0], float, 0, 2) =
                    CV_MAT_ELEM(pT[0], float, 0, 0)*(W0/2-pTrans->T[2])+
                    CV_MAT_ELEM(pT[0], float, 0, 1)*(H0/2-pTrans->T[5]);

                CV_MAT_ELEM(pT[0], float, 1, 2) =
                    CV_MAT_ELEM(pT[0], float, 1, 0)*(W0/2-pTrans->T[2])+
                    CV_MAT_ELEM(pT[0], float, 1, 1)*(H0/2-pTrans->T[5]);

                CV_MAT_ELEM(pT[0], float, 0, 0) *= W0/W;
                CV_MAT_ELEM(pT[0], float, 0, 1) *= H0/H;
                CV_MAT_ELEM(pT[0], float, 1, 0) *= W0/W;
                CV_MAT_ELEM(pT[0], float, 1, 1) *= H0/H;

            }   /* Calculate transform matrix. */
#else
            {   /* Calculate transform matrix: */
                float   SX = (float)(p->pImg->width-1)/((pImgAdd->width-1)*pTrans->Scale.x);
                float   SY = (float)(p->pImg->height-1)/((pImgAdd->height-1)*pTrans->Scale.y);
                float   DX = pTrans->Shift.x;
                float   DY = pTrans->Shift.y;;
                cvZero(pT);
                ((float*)(pT->data.ptr+pT->step*0))[0]=SX;
                ((float*)(pT->data.ptr+pT->step*1))[1]=SY;
                ((float*)(pT->data.ptr+pT->step*0))[2]=SX*(pImgAdd->width-1)*(0.5f-DX);
                ((float*)(pT->data.ptr+pT->step*1))[2]=SY*(pImgAdd->height-1)*(0.5f-DY);
            }   /* Calculate transform matrix. */
#endif


            {   /* Check for direct copy: */
                DirectCopy = TRUE;
                if( fabs(CV_MAT_ELEM(pT[0],float,0,0)-1) > 0.00001) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,1,0)) > 0.00001) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,0,1)) > 0.00001) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,0,1)) > 0.00001) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,0,2)-(pImg->width-1)*0.5) > 0.5) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,1,2)-(pImg->height-1)*0.5) > 0.5) DirectCopy = FALSE;
            }

            /* Extract image and mask: */
            if(p->pImg->nChannels == 1)
            {
                if(DirectCopy)
                {
                    cvCvtColor( p->pImg,pImgAdd,CV_GRAY2BGR);
                }
                else
                {
                    cvGetQuadrangleSubPix( p->pImg, pImgAddG, pT);
                    cvCvtColor( pImgAddG,pImgAdd,CV_GRAY2BGR);
                }
            }

            if(p->pImg->nChannels == 3)
            {
                if(DirectCopy)
                    cvCopyImage(p->pImg, pImgAdd);
                else
                    cvGetQuadrangleSubPix( p->pImg, pImgAdd, pT);
            }

            if(p->pImgMask)
            {
                if(DirectCopy)
                    cvCopyImage(p->pImgMask, pImgMaskAdd);
                else
                    cvGetQuadrangleSubPix( p->pImgMask, pImgMaskAdd, pT);

                cvThreshold(pImgMaskAdd,pImgMaskAdd,128,255,CV_THRESH_BINARY);
            }

            if(pTrans->C != 1 || pTrans->I != 0)
            {   /* Intensity transformation: */
                cvScale(pImgAdd, pImgAdd, pTrans->C,pTrans->I);
            }   /* Intensity transformation: */

            if(pTrans->GN > 0)
            {   /* Add noise: */
                IplImage* pImgN = cvCloneImage(pImgAdd);
                cvRandSetRange( &p->rnd_state, pTrans->GN, 0, -1 );
                cvRand(&p->rnd_state, pImgN);
                cvAdd(pImgN,pImgAdd,pImgAdd);
                cvReleaseImage(&pImgN);
            }   /* Add noise. */

            if(p->Mask)
            {   /* Update only mask: */
                cvOr(pImgMaskAdd, pImgMask, pImgMask);
            }
            else
            {   /* Add image and mask to exist main image and mask: */
                if(p->BG)
                {   /* If image is background: */
                    cvCopy( pImgAdd, pImg, NULL);
                }
                else
                {   /* If image is foreground: */
                    cvCopy( pImgAdd, pImg, pImgMaskAdd);
                    if(p->ObjID>=0)
                        cvOr(pImgMaskAdd, pImgMask, pImgMask);
                }
            }   /* Not mask. */
        }   /*  For not noise. */
        else
        {   /* Process noise video: */

            if( p->noise_type == CV_NOISE_GAUSSIAN ||
示例#2
0
IplImage*
create_fourier_image(const IplImage *im)
{

  IplImage *realInput;
  IplImage *imaginaryInput;
  IplImage *complexInput;
  int dft_M, dft_N;
  CvMat *dft_A, tmp;
  IplImage *image_Re;
  IplImage *image_Im;

  realInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
  imaginaryInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
  complexInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

  cvScale(im, realInput, 1.0, 0.0);
  cvZero(imaginaryInput);
  cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

  dft_M = cvGetOptimalDFTSize( im->height - 1 );
  dft_N = cvGetOptimalDFTSize( im->width - 1 );

  dft_A = rb_cvCreateMat( dft_M, dft_N, CV_64FC2 );
  image_Re = rb_cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
  image_Im = rb_cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

  // copy A to dft_A and pad dft_A with zeros
  cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
  cvCopy( complexInput, &tmp, NULL );
  if( dft_A->cols > im->width )
  {
    cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
    cvZero( &tmp );
  }

  // no need to pad bottom part of dft_A with zeros because of
  // use nonzero_rows parameter in cvDFT() call below

  cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

  // Split Fourier in real and imaginary parts
  cvSplit( dft_A, image_Re, image_Im, 0, 0 );

  // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
  cvPow( image_Re, image_Re, 2.0);
  cvPow( image_Im, image_Im, 2.0);
  cvAdd( image_Re, image_Im, image_Re, NULL);
  cvPow( image_Re, image_Re, 0.5 );

  // Compute log(1 + Mag)
  cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
  cvLog( image_Re, image_Re ); // log(1 + Mag)

  // Rearrange the quadrants of Fourier image so that the origin is at
  // the image center
  cvShiftDFT( image_Re, image_Re );

  cvReleaseImage(&realInput);
  cvReleaseImage(&imaginaryInput);
  cvReleaseImage(&complexInput);
  cvReleaseImage(&image_Im);

  cvReleaseMat(&dft_A);

  return image_Re;

}
示例#3
0
ReturnType FrequencyFilter::onExecute()
{
    // 영상을 Inport로부터 취득
    opros_any *pData = ImageIn.pop();
    RawImage result;

    if(pData != NULL) {

        // 포트로 부터 이미지 취득
        RawImage Image = ImageIn.getContent(*pData);
        RawImageData *RawImage = Image.getImage();

        // 현재영상의 크기를 취득
        m_in_width = RawImage->getWidth();
        m_in_height = RawImage->getHeight();

        // 받은 영상의 2의 승수임을 확인
        if(!Check2Square(m_in_width) || !Check2Square(m_in_height)) {

            std::cout << "This image is not a multifplier of 2" << std::endl;

            return OPROS_BAD_INPUT_PARAMETER;

        }

        // 받은 영상의 가로 세로 사이즈가 같은지 확인
        if(m_in_width != m_in_height) {

            std::cout << "Size(width and height) of Image is not equal" << std::endl;

            return OPROS_BAD_INPUT_PARAMETER;

        }

        // 원본영상의 이미지영역 확보
        if(m_orig_img == NULL) {
            m_orig_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
        }
        // 바이너리 영상영역의 확보
        if(m_gray_img == NULL) {
            m_gray_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
        }
        // 수행결과 영상영역의 확보
        if(m_result_img == NULL) {
            m_result_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
        }
        // 출력결과 영상영역의 확보
        if(m_final_img == NULL) {
            m_final_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
        }
        // Re영역 영상영역의 확보(실수)
        if(m_image_Re == NULL) {
            m_image_Re = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 1);
        }
        // Im영역 영상영역의 확보(허수)
        if(m_image_Im == NULL) {
            m_image_Im = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 1);
        }
        // 주파수 변환 영상영역의 확보.
        if(m_pDFT_A == NULL) {
            m_pDFT_A = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 2);
        }

        // 영상에 대한 정보를 확보!memcpy
        memcpy(m_orig_img->imageData, RawImage->getData(), RawImage->getSize());

        // 둘다 none 이 아니거나, 둘중에 하나가 none 일경우
        if((m_low_Pass_Filtering != "none" || m_high_Pass_Filtering != "none") &&
                (m_low_Pass_Filtering == "none" || m_high_Pass_Filtering == "none")) {

            // 입력 받은 영상을 이진화 시킴
            cvCvtColor( m_orig_img, m_gray_img, CV_BGR2GRAY );

            // 주파수영역으로의 작업을 위한 깊이 정보 변경
            cvConvertScale(m_gray_img, m_image_Re);		// 8U -> 32F
            // m_image_Im의 초기화
            cvZero(m_image_Im);

            // shift center
            // 입력영상을 실수부로 변환한 이미지가 홀수인 화소의 부호를 변경하여
            // 푸리에변환에 의한 주파수 영역의 원점을 중심으로 이동시키기 위함
            ChangePosition(m_image_Re);

            cvMerge(m_image_Re, m_image_Im, NULL, NULL, m_pDFT_A);

            // m_pDFT_A에 대해 푸리에 변환을 수행
            cvDFT(m_pDFT_A, m_pDFT_A, CV_DXT_FORWARD);

            // 이상적 저주파 통과 필터링 실행
            if(m_low_Pass_Filtering == "ideal" && m_high_Pass_Filtering == "none") {

                IdealLowPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 버터워스 저주파 통과 필터링 실행
            else if(m_low_Pass_Filtering == "butterworth" && m_high_Pass_Filtering == "none") {

                ButterworthLowPassFiltering(m_pDFT_A, m_cutoff_Frequency, 2);

            }
            // 가우시안 저주파 통과 필터링 실행
            else if(m_low_Pass_Filtering == "gaussian" && m_high_Pass_Filtering == "none") {

                GaussianLowPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 이상적 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "ideal" && m_low_Pass_Filtering == "none") {

                IdealHighPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 버터워스 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "butterworth" && m_low_Pass_Filtering == "none") {

                ButterworthHighPassFiltering(m_pDFT_A, m_cutoff_Frequency, 2);

            }
            // 가우시안 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "gaussian" && m_low_Pass_Filtering == "none") {

                GaussianHighpassFiltering(m_pDFT_A, m_cutoff_Frequency);


            }
            else {
                //none
            }

            // 퓨리에 역변환 실행
            cvDFT(m_pDFT_A, m_pDFT_A, CV_DXT_INV_SCALE);

            // 다중 채널의 행렬을 단일 채널 행렬로 분할(Re, Im으로)
            cvSplit(m_pDFT_A, m_image_Re, m_image_Im, NULL, NULL);

            // 저주파일때만 실행
            if((m_low_Pass_Filtering == "ideal" || m_low_Pass_Filtering == "butterworth" || m_low_Pass_Filtering == "gaussian")
                    && m_high_Pass_Filtering == "none") {
                ChangePosition(m_image_Re);
                cvScale(m_image_Re, m_result_img, 1);
            }

            // 고주파일때만 실행
            if((m_high_Pass_Filtering == "ideal" || m_high_Pass_Filtering == "butterworth" || m_high_Pass_Filtering == "gaussian")
                    && m_low_Pass_Filtering == "none") {

                // 스펙트럼의 진폭을 계산 Mag=sqrt(Re^2 + Im^2)
                cvPow(m_image_Re, m_image_Re, 2.0);
                cvPow(m_image_Im, m_image_Im, 2.0);
                cvAdd(m_image_Re, m_image_Re, m_image_Re);
                cvPow(m_image_Re, m_image_Re, 0.5);

                // 진폭 화상의 픽셀치가 min과 max사이에 분포하로독 스케일링
                double min_val, max_val;
                cvMinMaxLoc(m_image_Re, &min_val, &max_val, NULL, NULL);
                cvScale(m_image_Re, m_result_img, 255.0/max_val);
            }

            // 1채널 영상의 3채널 영상으로의 변환
            cvMerge(m_result_img, m_result_img, m_result_img, NULL, m_final_img);

            // 아웃풋 push
            // RawImage의 이미지 포인터 변수 할당
            RawImageData *pimage = result.getImage();

            // 입력된 이미지 사이즈 및 채널수로 로 재 설정
            pimage->resize(m_final_img->width, m_final_img->height, m_final_img->nChannels);

            // 영상의 총 크기(pixels수) 취득
            int size = m_final_img->width * m_final_img->height * m_final_img->nChannels;

            // 영상 데이터로부터 영상값만을 할당하기 위한 변수
            unsigned char *ptrdata = pimage->getData();

            // 현재 프레임 영상을 사이즈 만큼 memcpy
            memcpy(ptrdata, m_final_img->imageData, size);

            // 포트아웃
            opros_any mdata = result;
            ImageOut.push(result);//전달

            delete pData;

        } else {

            // 아웃풋 push
            // 아웃풋 push
            // RawImage의 이미지 포인터 변수 할당
            RawImageData *pimage = result.getImage();

            // 입력된 이미지 사이즈 및 채널수로 로 재 설정
            pimage->resize(m_orig_img->width, m_orig_img->height, m_orig_img->nChannels);

            // 영상의 총 크기(pixels수) 취득
            int size = m_orig_img->width * m_orig_img->height * m_orig_img->nChannels;

            // 영상 데이터로부터 영상값만을 할당하기 위한 변수
            unsigned char *ptrdata = pimage->getData();

            // 현재 프레임 영상을 사이즈 만큼 memcpy
            memcpy(ptrdata, m_orig_img->imageData, size);

            // 포트아웃
            opros_any mdata = result;
            ImageOut.push(result);//전달

            delete pData;

        }

    }

    return OPROS_SUCCESS;
}
示例#4
0
CV_IMPL void
cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
{
    CvMat* temp = 0;
    
    CV_FUNCNAME( "cvReduce" );

    __BEGIN__;

    CvMat sstub, *src = (CvMat*)srcarr;
    CvMat dstub, *dst = (CvMat*)dstarr, *dst0;
    int sdepth, ddepth, cn, op0 = op;
    CvSize size;

    if( !CV_IS_MAT(src) )
        CV_CALL( src = cvGetMat( src, &sstub ));

    if( !CV_IS_MAT(dst) )
        CV_CALL( dst = cvGetMat( dst, &dstub ));

    if( !CV_ARE_CNS_EQ(src, dst) )
        CV_ERROR( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" );

    sdepth = CV_MAT_DEPTH(src->type);
    ddepth = CV_MAT_DEPTH(dst->type);
    cn = CV_MAT_CN(src->type);
    dst0 = dst;

    size = cvGetMatSize(src);

    if( dim < 0 )
        dim = src->rows > dst->rows ? 0 : src->cols > dst->cols ? 1 : dst->cols == 1;

    if( dim > 1 )
        CV_ERROR( CV_StsOutOfRange, "The reduced dimensionality index is out of range" );

    if( dim == 0 && (dst->cols != src->cols || dst->rows != 1) ||
        dim == 1 && (dst->rows != src->rows || dst->cols != 1) )
        CV_ERROR( CV_StsBadSize, "The output array size is incorrect" );

    if( op == CV_REDUCE_AVG )
    {
        int ttype = sdepth == CV_8U ? CV_MAKETYPE(CV_32S,cn) : dst->type;
        if( ttype != dst->type )
            CV_CALL( dst = temp = cvCreateMat( dst->rows, dst->cols, ttype ));
        op = CV_REDUCE_SUM;
        ddepth = CV_MAT_DEPTH(ttype);
    }

    if( op != CV_REDUCE_SUM && op != CV_REDUCE_MAX && op != CV_REDUCE_MIN )
        CV_ERROR( CV_StsBadArg, "Unknown reduce operation index, must be one of CV_REDUCE_*" );

    if( dim == 0 )
    {
        CvReduceToRowFunc rfunc =
            op == CV_REDUCE_SUM ?
            (sdepth == CV_8U && ddepth == CV_32S ? (CvReduceToRowFunc)icvSumRows_8u32s_C1R :
             sdepth == CV_8U && ddepth == CV_32F ? (CvReduceToRowFunc)icvSumRows_8u32f_C1R :
             sdepth == CV_16U && ddepth == CV_32F ? (CvReduceToRowFunc)icvSumRows_16u32f_C1R :
             sdepth == CV_16U && ddepth == CV_64F ? (CvReduceToRowFunc)icvSumRows_16u64f_C1R :
             sdepth == CV_16S && ddepth == CV_32F ? (CvReduceToRowFunc)icvSumRows_16s32f_C1R :
             sdepth == CV_16S && ddepth == CV_64F ? (CvReduceToRowFunc)icvSumRows_16s64f_C1R :
             sdepth == CV_32F && ddepth == CV_32F ? (CvReduceToRowFunc)icvSumRows_32f_C1R :
             sdepth == CV_32F && ddepth == CV_64F ? (CvReduceToRowFunc)icvSumRows_32f64f_C1R :        
             sdepth == CV_64F && ddepth == CV_64F ? (CvReduceToRowFunc)icvSumRows_64f_C1R : 0) :
            op == CV_REDUCE_MAX ?
            (sdepth == CV_8U && ddepth == CV_8U ? (CvReduceToRowFunc)icvMaxRows_8u_C1R :
             sdepth == CV_32F && ddepth == CV_32F ? (CvReduceToRowFunc)icvMaxRows_32f_C1R :
             sdepth == CV_64F && ddepth == CV_64F ? (CvReduceToRowFunc)icvMaxRows_64f_C1R : 0) :

            (sdepth == CV_8U && ddepth == CV_8U ? (CvReduceToRowFunc)icvMinRows_8u_C1R :
             sdepth == CV_32F && ddepth == CV_32F ? (CvReduceToRowFunc)icvMinRows_32f_C1R :
             sdepth == CV_64F && ddepth == CV_64F ? (CvReduceToRowFunc)icvMinRows_64f_C1R : 0);

        if( !rfunc )
            CV_ERROR( CV_StsUnsupportedFormat,
            "Unsupported combination of input and output array formats" );

        size.width *= cn;
        IPPI_CALL( rfunc( src->data.ptr, src->step ? src->step : CV_STUB_STEP,
                          dst->data.ptr, size ));
    }
    else
    {
        CvReduceToColFunc cfunc =
            op == CV_REDUCE_SUM ?
            (sdepth == CV_8U && ddepth == CV_32S ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_8u32s_C1R :
                                cn == 3 ? icvSumCols_8u32s_C3R :
                                cn == 4 ? icvSumCols_8u32s_C4R : 0) :
             sdepth == CV_8U && ddepth == CV_32F ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_8u32f_C1R :
                                cn == 3 ? icvSumCols_8u32f_C3R :
                                cn == 4 ? icvSumCols_8u32f_C4R : 0) :
             sdepth == CV_16U && ddepth == CV_32F ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_16u32f_C1R : 0) :
             sdepth == CV_16U && ddepth == CV_64F ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_16u64f_C1R : 0) :
             sdepth == CV_16S && ddepth == CV_32F ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_16s32f_C1R : 0) :
             sdepth == CV_16S && ddepth == CV_64F ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_16s64f_C1R : 0) :
             sdepth == CV_32F && ddepth == CV_32F ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_32f_C1R :
                                cn == 3 ? icvSumCols_32f_C3R :
                                cn == 4 ? icvSumCols_32f_C4R : 0) :
             sdepth == CV_32F && ddepth == CV_64F ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_32f64f_C1R : 0) :
             sdepth == CV_64F && ddepth == CV_64F ?
            (CvReduceToColFunc)(cn == 1 ? icvSumCols_64f_C1R :
                                cn == 3 ? icvSumCols_64f_C3R :
                                cn == 4 ? icvSumCols_64f_C4R : 0) : 0) :
             op == CV_REDUCE_MAX && cn == 1 ?
             (sdepth == CV_8U && ddepth == CV_8U ? (CvReduceToColFunc)icvMaxCols_8u_C1R :
              sdepth == CV_32F && ddepth == CV_32F ? (CvReduceToColFunc)icvMaxCols_32f_C1R :
              sdepth == CV_64F && ddepth == CV_64F ? (CvReduceToColFunc)icvMaxCols_64f_C1R : 0) :
             op == CV_REDUCE_MIN && cn == 1 ?
             (sdepth == CV_8U && ddepth == CV_8U ? (CvReduceToColFunc)icvMinCols_8u_C1R :
              sdepth == CV_32F && ddepth == CV_32F ? (CvReduceToColFunc)icvMinCols_32f_C1R :
              sdepth == CV_64F && ddepth == CV_64F ? (CvReduceToColFunc)icvMinCols_64f_C1R : 0) : 0;

        if( !cfunc )
            CV_ERROR( CV_StsUnsupportedFormat,
            "Unsupported combination of input and output array formats" );

        IPPI_CALL( cfunc( src->data.ptr, src->step ? src->step : CV_STUB_STEP,
                          dst->data.ptr, dst->step ? dst->step : CV_STUB_STEP, size ));
    }

    if( op0 == CV_REDUCE_AVG )
        cvScale( dst, dst0, 1./(dim == 0 ? src->rows : src->cols) );

    __END__;

    if( temp )
        cvReleaseMat( &temp );
}
示例#5
0
void main(){



Gabor gabor;

run_camera();


cvCvtColor( src, grayscale, CV_RGB2GRAY );
cvCopy(src,dst);
Smooth(src,smooth);
//cvShowImage( "smooth",smooth);


process_image();

//normalize(float xp,float xi, float yp,float yi, float rp,float ri ,IplImage *src)
normalize(xp, yp, rp, xi, yi,ri ,src);

cvCvtColor(normalized,greynormalized,CV_RGB2GRAY);
//cvShowImage("greynormalized",greynormalized);

cvEqualizeHist(greynormalized,eqnormalized);
//cvShowImage("eqnormalized",eqnormalized);



//1-void create_gabor_kernel_v1( float sig, float thet, float lm, float gamma , float ps , float bw)
//gabor.create_gabor_kernel_v1(0,45,8,1,0,1);   //sigma = 1.6 , gamma = 1 for kernel of size 9



//2-void create_gabor_kernel_v2(int kernel_size, float sig, float thet, float lm, float gamma , float ps);
//gabor.create_gabor_kernel_v2(21,5,45,10,1,90); 



//3-void create_gabor_kernel_v3(int kernel_size, float sig, float thet, float freq, float gamma , float ps)
//gabor.create_gabor_kernel_v3(9,2,0,0.2,1,0); 



//4-void create_gabor_kernel_v4(int kernel_size, double sigma, double theta, double lambd, double gamma, double psi, int ktype);
//gabor.create_gabor_kernel_v4(9,2,0,35,1,0,CV_32F); 



//5-void create_gabor_kernel_v5 (int kernel_size, double sigma,double theta ,  double freq ,double gamma)
//gabor.create_gabor_kernel_v5(9,2,0,0.25,1);


/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////


cvFilter2D(eqnormalized,real_gabor, kernel_real);
cvShowImage("real wavelet",real_gabor);


cvFilter2D(eqnormalized,imag_gabor, kernel_imag);
cvShowImage("imag wavelet",imag_gabor);

//cvFilter2D(eqnormalized,mag_gabor, kernel_mag);
//cvShowImage("mag wavelet",mag_gabor);





//quantize_gabor_image_real();
//quantize_gabor_image_imag();



generate_code();

//save_image_code("code_4.bmp",code);       //scales code before saving

//display_image_values(0,kernel_real);     // FOR DEBUGING PURPOSES
//display_image_values(code,0);            // FOR DEBUGING PURPOSES



float HD =compare_code(code,"code_4.bmp");
printf("Hamming Distance = %1.2f",HD);


cvScale(code,scaled_code,255);
cvShowImage("code",scaled_code);

cvNamedWindow( "Source");
cvShowImage( "Source",dst);

//printf ( "xp:%f ,xi:%f, yp:%f, yi:%f, rp:%f, ri:%f",xp,xi,yp,yi,rp,ri);



cvWaitKey();
cvReleaseImage(&dst);
cvReleaseImage(&smooth);
cvReleaseImage(&pupil);
cvReleaseImage(&iris);
cvReleaseImage(&pedge);
cvReleaseImage(&iedge);
cvReleaseImage(&src);

cvReleaseImage(&eqnormalized);
cvReleaseImage(&greynormalized);
cvReleaseImage(&normalized);
cvReleaseImage(&real_gabor);
cvReleaseImage(&imag_gabor);
cvReleaseImage(&mag_gabor);
//cvReleaseImage(&phase_gabor);
cvReleaseImage(&quantized_real);
cvReleaseImage(&quantized_imag);
cvReleaseImage(&code);




}
IplImage* ConformalResizing::Resize(IplImage*& img8U3C, int h, int w, int meshQuadSize /* = 10 */)
{
	//fix me
	if(w%4!=0)
		w = w-(w%4);
	// Normalize image
	IplImage* srcImg32F = cvCreateImage(cvGetSize(img8U3C), IPL_DEPTH_32F, 1);
	{
		IplImage* imgG = cvCreateImage(cvGetSize(img8U3C), img8U3C->depth, 1);
		cvCvtColor(img8U3C, imgG, CV_BGR2GRAY);
		cvScale(imgG, srcImg32F, 1.0/250.0);
		cvReleaseImage(&imgG);
		NormalizeImg(srcImg32F, meshQuadSize);
		NormalizeImg(img8U3C, meshQuadSize);
	}

	// Get constrain units
	vector<double> ppos;
	CvSize szGrid = cvSize(srcImg32F->width / meshQuadSize, srcImg32F->height / meshQuadSize); // Number of quads 
	vector<ConstrainUnits> quads; // Quads units
	vector<ConstrainUnits> qaud5s;  // Constrain unit of qaud5s
	vector<ConstrainUnits> edges; // Constrain unit of edges
	CvSize szA = GetConstrainUnits(srcImg32F, img8U3C, szGrid, quads, qaud5s, edges, ppos, meshQuadSize);

	// Least square problem of min |A*X|^2 with boundary conditions X(ind) = val
	SparseMat A(0, szA.width);
	vector<double> val;
	vector<int> ind;
	vector<double> X;
	ind.reserve((szA.width+szA.height)*3);
	val.reserve(ind.size());

	//for(int i=0;i<10;i++)
	//{
	//	A.Clear();
	//	A.n=szA.width;
	//	ind.clear();
	//	val.clear();
	//	BuildConstrainsEquation(quads, qaud5s, edges, szGrid, cvSize(w, h), A, ind, val);
	//}
	BuildConstrainsEquation(quads, qaud5s, edges, szGrid, cvSize(w, h), A, ind, val);
	//init values
	double xratio=double(w)/srcImg32F->width;
	double yratio=double(h)/srcImg32F->height;
	for(int i=0;i<szA.width;i+=2)
	{
		ppos[i] *= xratio;
		ppos[i+1] *= yratio;
	}
	CmAssert(A.m == szA.height);

	cout << "quads.size=" << quads.size() << endl;
	cout << "quad5s.size=" << qaud5s.size() << endl;
	cout << "edges.size=" << edges.size() << endl;
	cout << "A.m=" << A.m << " A.n=" <<A.n<< endl;

	// Solve least square problem with boundary condition
	
	SolveConstrainedLeastSquare(A, ind, val, X, ppos);

	vector<ConstrainUnits> newQuads = quads;
	vector<ConstrainUnits> newqaud5s = qaud5s;
	vector<ConstrainUnits> newEdges = edges;

	RefreshPositions(newQuads, X);
	RefreshPositions(newqaud5s, X);
	RefreshPositions(newEdges, X);

	//SaveConstrainUnits(newQuads, "quads.txt");
	//SaveConstrainUnits(newQuads, "qaud5s.txt");
	//SaveConstrainUnits(newQuads, "edges.txt");

	IplImage* dstImg = Warpping(img8U3C, meshQuadSize, X, cvSize(w, h));
	if (dstImg == NULL)
	{
		dstImg = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 3);
	}
	ShowConstrains(dstImg, newQuads, newqaud5s, newEdges, "Reshaped constrains", 1, FileNames::dstMeshName);
	cvReleaseImage(&srcImg32F);
	return dstImg;
}
CvSize ConformalResizing::GetConstrainUnits(const IplImage* srcImg32F,  
										 const IplImage* img8U3C,
										 const CvSize szGrid, 
										 vector<ConstrainUnits>& quads, 
										 vector<ConstrainUnits>& qaud5s,
										 vector<ConstrainUnits>& edges,
										 vector<double>& ppos,/*added 2009.08.16*/
										 int meshQuadSize)
{

	// Get importance map
	//IplImage* impImg32F = cvCreateImage(cvGetSize(srcImg32F), IPL_DEPTH_32F, 1);
	//cvScale(srcImg32F, impImg32F);

	IplImage* impImg32F = NULL;
	if (strlen(FileNames::impName) > 0)
	{
		IplImage* impMap = cvLoadImage(FileNames::impName, CV_LOAD_IMAGE_GRAYSCALE);
		//if (impMap != NULL)
		{
			NormalizeImg(impMap, meshQuadSize);
			impImg32F = cvCreateImage(cvGetSize(impMap), IPL_DEPTH_32F, 1);
			cvScale(impMap, impImg32F, 1/255.0);
			cvReleaseImage(&impMap);
			cvNamedWindow("Importance");
			cvShowImage("Importance", impImg32F);
			cvAddS(impImg32F, cvScalarAll(gSet("minWeight")), impImg32F);
		}
	}

	CmImportance imp;
	if (impImg32F == NULL)
	{
		double weights[5];
		weights[0] = gSet("edgeWeight");
		weights[1] = gSet("faceWeight");
		weights[2] = gSet("motionWeight");
		weights[3] = gSet("contrastWeight"); 
		weights[4] = gSet("minWeight");

		impImg32F = imp.calcEnergy(img8U3C, weights);
		imp.showEnergy();
	}

	{
		IplImage* impSave = cvCreateImage(cvGetSize(impImg32F), IPL_DEPTH_8U, 1);
		cvScale(impImg32F, impSave, 255);
		//cvSaveImage(FileNames::outImp, impSave);
		cvReleaseImage(&impSave);
	}

//#ifdef _DEBUG
//	cvSave("impd.xml", img8U3C, "impImg32F");
//#else
//	cvSave("imp.xml", img8U3C, "impImg32F");
//#endif // _DEBUG

	//
	IplImage *pGridNodeX64F, *pGridNodeY64F;
	CmCvHelper::MeshGrid(pGridNodeX64F, pGridNodeY64F, 0, srcImg32F->width, 0, srcImg32F->height, meshQuadSize, meshQuadSize);
	double (*pGridPos)[2] = new double[szGrid.width * szGrid.height][2]; //Original edge point position within each grid. (x, y)
	int *pGridIdx = new int[szGrid.width * szGrid.height];  // Index of grid point variable
	int *pGridIdxE = new int[szGrid.width * szGrid.height]; // Index of edge contain this grid point
	typedef vector<pair<int, int>> EdgePos;  // Position of edge point in grid
	vector<EdgePos> edgePntPos;
	int varaInd = (szGrid.height + 1) * (szGrid.width + 1);
	/*added 2009.08.16*/
	{
		ppos.reserve(varaInd*2);
		for(int y=0;y<=szGrid.height;y++)
			for(int x=0;x<=szGrid.width;x++)
			{
				ppos.push_back(x*meshQuadSize);//x
				ppos.push_back(y*meshQuadSize);//y
			}
	}
	{
		//Get Edges
		const IplImage* pLineInd;
		vector<CEdge> edge;
		CDetectEdge detEdge(edge, gSet("Sigma"));
		detEdge.Initial(srcImg32F);
		detEdge.CalFirDer();
		detEdge.NoneMaximalSuppress((float)gSet("LinkEndBound"), (float)gSet("LinkStartBound"));
		detEdge.Link(gSet["ShortRemoveBound"]);
		pLineInd = detEdge.LineIdx();

		int* pTmp = pGridIdx;  // Borrow memory inside
		memset(pTmp, 0xff, szGrid.width * szGrid.height * sizeof(int));
		memset(pGridIdxE, 0xff, szGrid.width * szGrid.height * sizeof(int));

		for (int y = 0; y < srcImg32F->height; y++)
		{
			int* lineIdx = (int*)(pLineInd->imageData + pLineInd->widthStep * y); 
			for (int x = 0; x < srcImg32F->width; x++)
			{
				if (lineIdx[x] > 0) // it's an edge point
				{
					int dx = x % meshQuadSize;
					dx = min(dx, meshQuadSize - dx);
					int dy = y % meshQuadSize;
					dy = min(dy, meshQuadSize - dy);
					dx = min(dx, dy);
					int gridPos = y / meshQuadSize * szGrid.width + x / meshQuadSize;

					if (dx > pTmp[gridPos] && dx > gSet("minEdgeRatio") * meshQuadSize)
					{
						pGridPos[gridPos][0] = x;
						pGridPos[gridPos][1] = y;
						pGridIdxE[gridPos] = lineIdx[x];
						pTmp[gridPos] = dx;
					}
				}
			}
		}

		map<int, EdgePos> edgePntPosMap;
		for (int y = 0; y < szGrid.height; y++)
		{
			for (int x = 0; x < szGrid.width; x++)
			{
				int gridPos = y * szGrid.width + x;
				int idx = pGridIdxE[gridPos];
				if (idx > 0) // an edge point within grid
				{
					edgePntPosMap[idx].push_back(pair<int, int>(x, y));
				}
			}
		}

		for (map<int, EdgePos>::iterator it = edgePntPosMap.begin(); it != edgePntPosMap.end(); it++)
		{
			EdgePos& edPos = it->second;
			if (edPos.size() >= 3)
				edgePntPos.push_back(edPos);
			else
			{
				for (size_t i = 0; i < edPos.size(); i++)
					pGridIdxE[edPos[i].first + edPos[i].second * szGrid.width] = -1;
			}
		}

		for (int y = 0; y < szGrid.height; y++)
		{
			for (int x = 0; x < szGrid.width; x++)
			{
				int gridPos = y * szGrid.width + x;
				int idx = pGridIdxE[gridPos];
				if (idx > 0) // an edge point within grid
				{
					pGridIdx[gridPos] = varaInd;
					varaInd++;
					ppos.push_back(pGridPos[gridPos][0]);
					ppos.push_back(pGridPos[gridPos][1]);
				}
				else
					pGridIdx[gridPos] = -1;
			}
		}

		for (size_t i = 0; i < edgePntPos.size(); i++)
		{
			for (size_t j = 0; j < edgePntPos[i].size(); j++)
			{
				int gridPos = edgePntPos[i][j].first + szGrid.width * edgePntPos[i][j].second;
				pGridIdxE[gridPos] = i;
			}
		}

		CmShow::Labels(pLineInd, "Labels", 1); // Show Line Idx
		//CmShow::MixedMesh(img8U3C, pGridNodeX64F, pGridNodeY64F, szGrid, pGridPos, pGridIdxE, "Mixed", 1);
	}

	CvSize szConstrainA = cvSize(varaInd, 0);

	// Get constrain units
	{
		IplImage* gridImp32F = cvCreateImage(szGrid, IPL_DEPTH_32F, 1);
		cvResize(impImg32F, gridImp32F, CV_INTER_AREA/*added 2009-7-27*/);

		double* pNodeX = (double*)(pGridNodeX64F->imageData);
		double* pNodeY = (double*)(pGridNodeY64F->imageData);

		// Quads constrains and qaud5 constrains
		for (int y = 0; y < szGrid.height; y++)
		{
			for (int x = 0; x < szGrid.width; x++)
			{
				int gridpos = szGrid.width * y + x;

				ConstrainUnits unit;
				unit.SetNumber(pGridIdxE[gridpos] >= 0 ? 5 : 4);

				unit.ind[0] = y * (szGrid.width + 1) + x;
				unit.ind[1] = unit.ind[0] + szGrid.width + 1;
				unit.ind[2] = unit.ind[0] + 1;
				unit.ind[3] = unit.ind[0] + szGrid.width + 2;

				if (pGridIdxE[gridpos] >= 0)
				{
					unit.ind[4] = pGridIdx[gridpos];
					unit.pnts[4].x = pGridPos[gridpos][0];
					unit.pnts[4].y = pGridPos[gridpos][1];
				}

				for (int i = 0; i < 4; i++)
				{
					unit.pnts[i].x = pNodeX[unit.ind[i]];
					unit.pnts[i].y = pNodeY[unit.ind[i]];
				}

				unit.imp = CV_IMAGE_ELEM(gridImp32F, float, y, x);
				if (pGridIdxE[gridpos] >= 0)
				{
					//unit.imp *=1.2;
					qaud5s.push_back(unit);					
				}
				else
					quads.push_back(unit);
			}
		}

		szConstrainA.height = quads.size() * 8 + qaud5s.size() * 10;

		// Edge constrains
		for (size_t i = 0; i < edgePntPos.size(); i++)
		{
			ConstrainUnits unit;
			unit.SetNumber(edgePntPos[i].size());
			double imp = 0;
			for (size_t j = 0; j < edgePntPos[i].size(); j++)
			{
				int gridPos = edgePntPos[i][j].first + edgePntPos[i][j].second * szGrid.width;
				unit.ind[j] = pGridIdx[gridPos];
				unit.pnts[j].x = pGridPos[gridPos][0];
				unit.pnts[j].y = pGridPos[gridPos][1];
				imp += ((float*)(gridImp32F->imageData))[gridPos];
			}
			unit.imp = imp/unit.n * gSet("EdgeConstrainRation");
			edges.push_back(unit);
			szConstrainA.height += unit.n * 2;
		}

		cvReleaseImage(&gridImp32F);
		ShowConstrains(img8U3C, quads, qaud5s, edges, "Constrains", 1, FileNames::srcMeshName);
	}
	delete []pGridIdxE;
	delete []pGridIdx;
	delete []pGridPos;
	cvReleaseImage(&pGridNodeY64F);
	cvReleaseImage(&pGridNodeX64F);
	//cvReleaseImage(&impImg32F);

	szConstrainA.width *= 2;
	return szConstrainA;
}
CV_IMPL void
cvCalcImageHomography( float* line, CvPoint3D32f* _center,
                       float* _intrinsic, float* _homography )
{
    double norm_xy, norm_xz, xy_sina, xy_cosa, xz_sina, xz_cosa, nx1, plane_dist;
    float _ry[3], _rz[3], _r_trans[9];
    CvMat rx = cvMat( 1, 3, CV_32F, line );
    CvMat ry = cvMat( 1, 3, CV_32F, _ry );
    CvMat rz = cvMat( 1, 3, CV_32F, _rz );
    CvMat r_trans = cvMat( 3, 3, CV_32F, _r_trans );
    CvMat center = cvMat( 3, 1, CV_32F, _center );

    float _sub[9];
    CvMat sub = cvMat( 3, 3, CV_32F, _sub );
    float _t_trans[3];
    CvMat t_trans = cvMat( 3, 1, CV_32F, _t_trans );

    CvMat intrinsic = cvMat( 3, 3, CV_32F, _intrinsic );
    CvMat homography = cvMat( 3, 3, CV_32F, _homography );

    if( !line || !_center || !_intrinsic || !_homography )
        CV_Error( CV_StsNullPtr, "" );

    norm_xy = cvSqrt( line[0] * line[0] + line[1] * line[1] );
    xy_cosa = line[0] / norm_xy;
    xy_sina = line[1] / norm_xy;

    norm_xz = cvSqrt( line[0] * line[0] + line[2] * line[2] );
    xz_cosa = line[0] / norm_xz;
    xz_sina = line[2] / norm_xz;

    nx1 = -xz_sina;

    _rz[0] = (float)(xy_cosa * nx1);
    _rz[1] = (float)(xy_sina * nx1);
    _rz[2] = (float)xz_cosa;
    cvScale( &rz, &rz, 1./cvNorm(&rz,0,CV_L2) );

    /*  new axe  y  */
    cvCrossProduct( &rz, &rx, &ry );
    cvScale( &ry, &ry, 1./cvNorm( &ry, 0, CV_L2 ) );

    /*  transpone rotation matrix    */
    memcpy( &_r_trans[0], line, 3*sizeof(float));
    memcpy( &_r_trans[3], _ry, 3*sizeof(float));
    memcpy( &_r_trans[6], _rz, 3*sizeof(float));

    /*  calculate center distanse from arm plane  */
    plane_dist = cvDotProduct( &center, &rz );

    /* calculate (I - r_trans)*center */
    cvSetIdentity( &sub );
    cvSub( &sub, &r_trans, &sub );
    cvMatMul( &sub, &center, &t_trans ); 

    cvMatMul( &t_trans, &rz, &sub );
    cvScaleAdd( &sub, cvRealScalar(1./plane_dist), &r_trans, &sub ); /* ? */

    cvMatMul( &intrinsic, &sub, &r_trans );
    cvInvert( &intrinsic, &sub, CV_SVD );
    cvMatMul( &r_trans, &sub, &homography );
}
void CMagicCabsineUniversalProperty_feature_texture_spectral_DFT::ComputeProperty()
{

	IplImage * im;//输入图像必须是灰度图像
	
	
	IplImage * realInput;
	IplImage * imaginaryInput;
	IplImage * complexInput;
	int dft_M, dft_N;
	CvMat* dft_A, tmp;
	IplImage * image_Im;
	IplImage * image_Re;

	double m, M;
	
	//灰度图转化
	IplImage *srcimg_cvt = cvCreateImage(cvGetSize(srcImage),IPL_DEPTH_8U,3);//源图像格式化图像
	im = cvCreateImage(cvGetSize(srcImage),IPL_DEPTH_8U,1);
	cvConvertScale(srcImage,srcimg_cvt);//源图像格式化 转换成 IPL_DEPTH_32F 3通道
	cvCvtColor(srcImage,im,CV_BGR2GRAY);//源图像转换成灰度图
	cvReleaseImage(&srcimg_cvt);

	realInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 1);//灰度图像
	imaginaryInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 1);//灰度图像
	complexInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 2);

	cvScale(im, realInput, 1.0, 0.0);//从8u转换成32f 提高精度
	cvZero(imaginaryInput);
	//将单通道图像合并成多通道图像(将realInput和imageinaryImput合并到双通道图像complexInput)
	cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

	/*dft_M = cvGetOptimalDFTSize( im->height - 1 );
	dft_N = cvGetOptimalDFTSize( im->width - 1 );*/

	dft_M = im->height;
	dft_N = im->width;
	dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
	image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);//DFT最佳尺寸的图像
	image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

	// copy A to dft_A and pad dft_A with zeros
	cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
	cvCopy( complexInput, &tmp, NULL );
	if( dft_A->cols > im->width )
	{
		cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
		cvZero( &tmp );
	}

	// no need to pad bottom part of dft_A with zeros because of
	// use nonzero_rows parameter in cvDFT() call below

	cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );
	

	// Split Fourier in real and imaginary parts
	cvSplit( dft_A, image_Re, image_Im, 0, 0 );
	
	// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
	cvPow( image_Re, image_Re, 2.0);
	cvPow( image_Im, image_Im, 2.0);
	cvAdd( image_Re, image_Im, image_Re, NULL);
	cvPow( image_Re, image_Re, 0.5 );

	// Compute log(1 + Mag)
	cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
	cvLog( image_Re, image_Re ); // log(1 + Mag)
		
	// Rearrange the quadrants of Fourier image so that the origin is at
	// the image center
	cvShiftDFT( image_Re, image_Re );
	cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
	cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
	//cvShowImage("magnitude", image_Re);

	//正规化傅里叶变换图像使其可以被存储
	image_Re_save = cvCreateImage(cvSize(dft_N,dft_M),IPL_DEPTH_64F,1);//注意本来应该在初始化中创建,但是由于大小参数无法估计故在此创建需要在析构函数中release
	cvNormalize(image_Re,image_Re_save,255,0,CV_MINMAX,NULL);



	cvReleaseImage(&im);
	cvReleaseImage(&realInput);
	cvReleaseImage(&imaginaryInput);
	cvReleaseImage(&complexInput);
	cvReleaseImage(&image_Re);
	cvReleaseImage(&image_Im);

	////对傅里叶进行正规化使用L2(欧几里得)范式获取正规化傅里叶系数NFP,以计算3个傅里叶系数,未加绝对值
	//cvNormalize(image_Re,image_Re,1,0,CV_L2,NULL);

	////test
	//cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
	////对傅里叶NFP取绝对值
	//cvAbs(image_Re,image_Re);

	////test
	//cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);



	////计算傅里叶系数熵
	//double h = FT_Entropy(image_Re, dft_N, dft_M);

	//double e = FT_Energy(image_Re, dft_N, dft_M);

	//double i = FT_Inertia(image_Re, dft_N, dft_M);


	////存储特征
	//featurefilename+=".txt";
	//
	//FILE* file = fopen(featurefilename.c_str(),"w");
	//fprintf_s(file,"%.6f\n",h);
	//fprintf_s(file,"%.6f\n",e);
	//fprintf_s(file,"%.6f\n",i);
	//fclose(file);
	//return 0;
}
示例#10
0
void HarrisBuffer::ProcessFrame(IplImage* frm)
{
	gray=frm;
	//todo:scale depending on input type and IMGTYPE 
	//cvNormalize(gray,frame,1,0,CV_MINMAX);
	/*double m, M;
	cvMinMaxLoc(gray, &m, &M, NULL, NULL, NULL);*/
	cvScale(gray, frame, 1.0/255.0, 0.0);// 
	/*double m, M;
	cvMinMaxLoc(frame, &m, &M, NULL, NULL, NULL); 
	std::cout<<m<<"\t"<<M<<endl;*/

	
	original.Update(frame);

	//spatial filtering
	CVUtil::GaussianSmooth(frame,tmp,sig2,FFT);
	databuffer.Update(tmp);

	//temporal filtering
	int tstamp1=databuffer.TemporalConvolve(tmp1, TemporalMask1);
	convbuffer.Update(tmp1,tstamp1);

	int tstamp1d=convbuffer.TemporalConvolve(Lt,DerivMask);
	cvScale(Lt,Lt, sqrt(tau2) , 0);

	convbuffer.GetFrame(tstamp1d,L);
	CVUtil::ImageGradient(L,Lx,Ly);//prb: a possible scale
	cvScale(Lx,Lx, sqrt(sig2)*0.5 , 0);
	cvScale(Ly,Ly, sqrt(sig2)*0.5 , 0);

	//update second-moment matrix
	GaussianSmoothingMul(Lx,Lx, tmp1,2*sig2);
	cxxbuffer.Update(tmp1,tstamp1d);
	GaussianSmoothingMul(Lx,Ly, tmp1,2*sig2);
	cxybuffer.Update(tmp1,tstamp1d);
	GaussianSmoothingMul(Lx,Lt, tmp1,2*sig2);
	cxtbuffer.Update(tmp1,tstamp1d);
	GaussianSmoothingMul(Ly,Ly, tmp1,2*sig2);
	cyybuffer.Update(tmp1,tstamp1d);
	GaussianSmoothingMul(Ly,Lt, tmp1,2*sig2);
	cytbuffer.Update(tmp1,tstamp1d);
	GaussianSmoothingMul(Lt,Lt, tmp1,2*sig2);
	cttbuffer.Update(tmp1,tstamp1d);

	//update Harris buffer

	int tstamp2=0;
	tstamp2=cxxbuffer.TemporalConvolve(cxx, TemporalMask2);
	tstamp2=cxybuffer.TemporalConvolve(cxy, TemporalMask2);
	tstamp2=cxtbuffer.TemporalConvolve(cxt, TemporalMask2);
	tstamp2=cyybuffer.TemporalConvolve(cyy, TemporalMask2);
	tstamp2=cytbuffer.TemporalConvolve(cyt, TemporalMask2);
	tstamp2=cttbuffer.TemporalConvolve(ctt, TemporalMask2);

	HarrisFunction(kparam, tmp);
	Hbuffer.Update(tmp,tstamp2);
	
	//LogMinMax(Hbuffer.Buffer,logfile);
	//databuffer.FrameIndices.print(std::cout);
	//databuffer.FrameIndices.print(logfile);
	//convbuffer.FrameIndices.print(logfile);
	//Hbuffer.FrameIndices.print(logfile);
	//std::cout<<iFrame<<std::endl;
	DetectInterestPoints(Border);

	iFrame++;
	return;
}
示例#11
0
文件: VarFlow.cpp 项目: horsewin/ARD
/**
   Calculates the optical flow between two images.

   @param[in] imgA   First input image
   @param[in] imgB   Second input image, the flow is calculated from imgA to imgB
   @param[out] imgU   Horizontal flow field
   @param[out] imgV   Vertical flow field
   @param[in] saved_data   Flag indicates previous imgB is now imgA (ie subsequent frames), save some time in calculation
   
   @return   Flag to indicate succesful completion

*/
int VarFlow::CalcFlow(IplImage* imgA, IplImage* imgB, IplImage* imgU, IplImage* imgV, bool saved_data = false){
    
    if(!initialized)
      return 0;
      
    IplImage* swap_img;
      
    //Don't recalculate imgAfloat, just swap with imgBfloat
    if(saved_data){
        
       CV_SWAP(imgAfloat, imgBfloat, swap_img);
       
       cvResize(imgB, imgBsmall, CV_INTER_LINEAR);
       cvConvert(imgBsmall, imgBfloat);  // Calculate new imgBfloat
       cvSmooth(imgBfloat, imgBfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
       
    }
    
    //Calculate imgAfloat as well as imgBfloat
    else{
    
        cvResize(imgA, imgAsmall, CV_INTER_LINEAR);
        cvResize(imgB, imgBsmall, CV_INTER_LINEAR);
    
        cvConvert(imgAsmall, imgAfloat);
        cvConvert(imgBsmall, imgBfloat);
    
        cvSmooth(imgAfloat, imgAfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
        cvSmooth(imgBfloat, imgBfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
        
    }
    
    cvFilter2D(imgAfloat, imgAfx, &fx_mask, cvPoint(-1,-1));  // X spacial derivative
    cvFilter2D(imgAfloat, imgAfy, &fy_mask, cvPoint(-1,-1));  // Y spacial derivative
    
    cvSub(imgBfloat, imgAfloat, imgAft, NULL);  // Temporal derivative
    
    cvMul(imgAfx,imgAfx,imgAfxfx_array[0], 1);
    cvMul(imgAfx,imgAfy,imgAfxfy_array[0], 1);
    cvMul(imgAfx,imgAft,imgAfxft_array[0], 1);
    cvMul(imgAfy,imgAfy,imgAfyfy_array[0], 1);
    cvMul(imgAfy,imgAft,imgAfyft_array[0], 1);
    
    cvSmooth(imgAfxfx_array[0], imgAfxfx_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfxfy_array[0], imgAfxfy_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfxft_array[0], imgAfxft_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfyfy_array[0], imgAfyfy_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfyft_array[0], imgAfyft_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    
    int i;
    
    //Fill all the levels of the multigrid algorithm with resized images
    for(i = 1; i < (max_level - start_level + 1); i++){
        
        cvResize(imgAfxfx_array[i-1], imgAfxfx_array[i], CV_INTER_LINEAR);
        cvResize(imgAfxfy_array[i-1], imgAfxfy_array[i], CV_INTER_LINEAR);
        cvResize(imgAfxft_array[i-1], imgAfxft_array[i], CV_INTER_LINEAR);
        cvResize(imgAfyfy_array[i-1], imgAfyfy_array[i], CV_INTER_LINEAR);
        cvResize(imgAfyft_array[i-1], imgAfyft_array[i], CV_INTER_LINEAR);
        
    }
    
    int k = (max_level - start_level);

    while(1)
		{    
        gauss_seidel_recursive(k, (max_level - start_level), k, pow((float)2.0,(float)(k)), imgAfxft_array, imgAfyft_array);
            
        if(k > 0){            
            // Transfer velocity from coarse to fine                           
            cvResize(imgU_array[k], imgU_array[k-1], CV_INTER_LINEAR);
            cvResize(imgV_array[k], imgV_array[k-1], CV_INTER_LINEAR);
            
            k--;            
        }else{
          break;
				}            
    }
    
    // Transfer to output image, resize if necessary
    cvResize(imgU_array[0], imgU, CV_INTER_LINEAR);
    cvResize(imgV_array[0], imgV, CV_INTER_LINEAR);
    
    // If started algorithm with smaller image than original, scale the flow field
    if(start_level > 0){
	
		cvScale(imgU, imgU, pow(2.0, start_level));
		cvScale(imgV, imgV, pow(2.0, start_level));
		
	}
    
    return 1;
}
示例#12
0
bool CvCaptureCAM_DC1394_v2_CPP::initVidereRectifyMaps( const char* info,
    IplImage* ml[2], IplImage* mr[2] )
{
    float identity_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    CvMat l_rect = cvMat(3, 3, CV_32F, identity_data), r_rect = l_rect;
    float l_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    float r_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    CvMat l_intrinsic = cvMat(3, 3, CV_32F, l_intrinsic_data);
    CvMat r_intrinsic = cvMat(3, 3, CV_32F, r_intrinsic_data);
    float l_distortion_data[] = {0,0,0,0,0}, r_distortion_data[] = {0,0,0,0,0};
    CvMat l_distortion = cvMat(1, 5, CV_32F, l_distortion_data);
    CvMat r_distortion = cvMat(1, 5, CV_32F, r_distortion_data);
    IplImage* mx = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
    IplImage* my = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
    int k, j;

    for( k = 0; k < 2; k++ )
    {
        const char* section_name = k == 0 ? "[left_camera]" : "[right_camera]";
        static const char* param_names[] = { "f ", "fy", "Cx", "Cy" "kappa1", "kappa2", "tau1", "tau2", "kappa3", 0 };
        const char* section_start = strstr( info, section_name );
        CvMat* intrinsic = k == 0 ? &l_intrinsic : &r_intrinsic;
        CvMat* distortion = k == 0 ? &l_distortion : &r_distortion;
        CvMat* rectification = k == 0 ? &l_rect : &r_rect;
        IplImage** dst = k == 0 ? ml : mr;
        if( !section_start )
            break;
        section_start += strlen(section_name);
        for( j = 0; param_names[j] != 0; j++ )
        {
            const char* param_value_start = strstr(section_start, param_names[j]);
            float val=0;
            if(!param_value_start)
                break;
            sscanf(param_value_start + strlen(param_names[j]), "%f", &val);
            if( j < 4 )
                intrinsic->data.fl[j == 0 ? 0 : j == 1 ? 4 : j == 2 ? 2 : 5] = val;
            else
                distortion->data.fl[j - 4] = val;
        }
        if( param_names[j] != 0 )
            break;

        // some sanity check for the principal point
        if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.1 ||
            fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.1 )
        {
            cvScale( &intrinsic, &intrinsic, 0.5 ); // try the corrected intrinsic matrix for 2x lower resolution
            if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.05 ||
                fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.05 )
                cvScale( &intrinsic, &intrinsic, 2 ); // revert it back if the new variant is not much better
            intrinsic->data.fl[8] = 1;
        }

        cvInitUndistortRectifyMap( intrinsic, distortion,
                    rectification, intrinsic, mx, my );
        cvConvertMaps( mx, my, dst[0], dst[1] );
    }

    cvReleaseImage( &mx );
    cvReleaseImage( &my );
    return k >= 2;
}
示例#13
0
CvScalar calcQualityIndex :: compare(IplImage *source1, IplImage *source2, Colorspace space)
{
    IplImage *src1,* src2;
    src1 = colorspaceConversion(source1, space);
    src2 = colorspaceConversion(source2, space);

    int x = src1->width, y = src1->height;
    int nChan = src1->nChannels, d = IPL_DEPTH_32F;
    CvSize size = cvSize(x, y);

    //creating FLOAT type images of src1 and src2
    IplImage *img1 = cvCreateImage(size, d, nChan);
    IplImage *img2 = cvCreateImage(size, d, nChan);

    //Image squares
    IplImage *img1_sq = cvCreateImage(size, d, nChan);
    IplImage *img2_sq = cvCreateImage(size, d, nChan);
    IplImage *img1_img2 = cvCreateImage(size, d, nChan);

    cvConvert(src1, img1);
    cvConvert(src2, img2);

    //Squaring the images thus created
    cvPow(img1, img1_sq, 2);
    cvPow(img2, img2_sq, 2);
    cvMul(img1, img2, img1_img2, 1);

    IplImage *mu1 = cvCreateImage(size, d, nChan);
    IplImage *mu2 = cvCreateImage(size, d, nChan);
    IplImage *mu1_sq = cvCreateImage(size, d, nChan);
    IplImage *mu2_sq = cvCreateImage(size, d, nChan);
    IplImage *mu1_mu2 = cvCreateImage(size, d, nChan);

    IplImage *sigma1_sq = cvCreateImage(size, d, nChan);
    IplImage *sigma2_sq = cvCreateImage(size, d, nChan);
    IplImage *sigma12 = cvCreateImage(size, d, nChan);

    //PRELIMINARY COMPUTING

    //average smoothing is performed
    cvSmooth(img1, mu1, CV_BLUR, B, B);
    cvSmooth(img2, mu2, CV_BLUR, B, B);

    //gettting mu, mu_sq, mu1_mu2
    cvPow(mu1, mu1_sq, 2);
    cvPow(mu2, mu2_sq, 2);
    cvMul(mu1, mu2, mu1_mu2, 1);

    //calculating sigma1, sigma2, sigma12
    cvSmooth(img1_sq, sigma1_sq, CV_BLUR, B, B);
    cvSub(sigma1_sq, mu1_sq, sigma1_sq);

    cvSmooth(img2_sq, sigma2_sq, CV_BLUR, B, B);
    cvSub(sigma2_sq, mu2_sq, sigma2_sq);

    cvSmooth(img1_img2, sigma12, CV_BLUR, B, B);
    cvSub(sigma12, mu1_mu2, sigma12);

    //Releasing unused images
    cvReleaseImage(&img1);
    cvReleaseImage(&img2);
    cvReleaseImage(&img1_sq);
    cvReleaseImage(&img2_sq);
    cvReleaseImage(&img1_img2);

    // creating buffers for numerator and denominator
    IplImage *numerator1 = cvCreateImage(size, d, nChan);
    IplImage *numerator = cvCreateImage(size, d, nChan);
    IplImage *denominator1 = cvCreateImage(size, d, nChan);
    IplImage *denominator2 = cvCreateImage(size, d, nChan);
    IplImage *denominator = cvCreateImage(size, d, nChan);

    // FORMULA to calculate Image Quality Index

    // (4*sigma12)
    cvScale(sigma12, numerator1, 4);

    // (4*sigma12).*(mu1*mu2)
    cvMul(numerator1, mu1_mu2, numerator, 1);

    // (mu1_sq + mu2_sq)
    cvAdd(mu1_sq, mu2_sq, denominator1);

    // (sigma1_sq + sigma2_sq)
    cvAdd(sigma1_sq, sigma2_sq, denominator2);

    //Release images
    cvReleaseImage(&mu1);
    cvReleaseImage(&mu2);
    cvReleaseImage(&mu1_sq);
    cvReleaseImage(&mu2_sq);
    cvReleaseImage(&mu1_mu2);
    cvReleaseImage(&sigma1_sq);
    cvReleaseImage(&sigma2_sq);
    cvReleaseImage(&sigma12);
    cvReleaseImage(&numerator1);

    // ((mu1_sq + mu2_sq).*(sigma1_sq + sigma2_sq))
    cvMul(denominator1, denominator2, denominator, 1);

    //image_quality map
    image_quality_map = cvCreateImage(size, d, nChan);
    float *num, *den, *res;
    num = (float*)(numerator->imageData);
    den = (float*)(denominator->imageData);
    res = (float*)(image_quality_map->imageData);

    // dividing by hand
    // ((4*sigma12).*(mu1_mu2))./((mu1_sq + mu2_sq).*(sigma1_sq + sigma2_sq))
    for (int i=0; i<(x*y*nChan); i++) {
        if (den[i] == 0)
        {
            num[i] = (float)(1.0);
            den[i] = (float)(1.0);
        }
        res[i] = 1.0*(num[i]/den[i]);
    }

    // cvDiv doesnt work
    //cvDiv(numerator, denominator, image_quality_map, 1);

    // image_quality_map created in image_quality_map
    // average is taken
    image_quality_value = cvAvg(image_quality_map);

    //Release images
    cvReleaseImage(&numerator);
    cvReleaseImage(&denominator);
    cvReleaseImage(&denominator1);
    cvReleaseImage(&denominator2);
    cvReleaseImage(&src1);
    cvReleaseImage(&src2);

    return image_quality_value;
}
UINT WINAPI
//DWORD WINAPI
#elif defined(POSIX_SYS)
// using pthread
void *
#endif
	ChessRecognition::HoughLineThread(
#if defined(WINDOWS_SYS)
	LPVOID
#elif defined(POSIX_SYS)
	void *
#endif
	Param) {
	// 실제로 뒤에서 동작하는 windows용 thread함수.
	// 함수 인자로 클래스를 받아옴.
	ChessRecognition *_TChessRecognition = (ChessRecognition *)Param;
	_TChessRecognition->_HoughLineBased = new HoughLineBased();

	CvSeq *_TLineX, *_TLineY;
	double _TH[] = { -1, -7, -15, 0, 15, 7, 1 };

	CvMat _TDoGX = cvMat(1, 7, CV_64FC1, _TH);
	CvMat* _TDoGY = cvCreateMat(7, 1, CV_64FC1);
	cvTranspose(&_TDoGX, _TDoGY); // transpose(&DoGx) -> DoGy

	double _TMinValX, _TMaxValX, _TMinValY, _TMaxValY, _TMinValT, _TMaxValT;
	int _TKernel = 1;

	// Hough 사용되는 Image에 대한 Initialize.
	IplImage *iplTemp = cvCreateImage(cvSize(_TChessRecognition->_Width, _TChessRecognition->_Height), IPL_DEPTH_32F, 1);                   
	IplImage *iplDoGx = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGy = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGyClone = cvCloneImage(iplDoGy);
	IplImage *iplDoGxClone = cvCloneImage(iplDoGx);
	IplImage *iplEdgeX = cvCreateImage(cvGetSize(iplTemp), 8, 1);
	IplImage *iplEdgeY = cvCreateImage(cvGetSize(iplTemp), 8, 1);

	CvMemStorage* _TStorageX = cvCreateMemStorage(0), *_TStorageY = cvCreateMemStorage(0);

	while (_TChessRecognition->_EnableThread != false) {
		// 이미지를 받아옴. main루프와 동기를 맞추기 위해서 critical section 사용.
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.lock();
		//EnterCriticalSection(&(_TChessRecognition->cs));
		cvConvert(_TChessRecognition->_ChessBoardDetectionInternalImage, iplTemp);
		//LeaveCriticalSection(&_TChessRecognition->cs);
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.unlock();

		// 각 X축 Y축 라인을 검출해 내기 위해서 filter 적용.
		cvFilter2D(iplTemp, iplDoGx, &_TDoGX); // 라인만 축출해내고.
		cvFilter2D(iplTemp, iplDoGy, _TDoGY);
		cvAbs(iplDoGx, iplDoGx);
		cvAbs(iplDoGy, iplDoGy);

		// 이미지 내부에서 최댓값과 최소값을 구하여 정규화.
		cvMinMaxLoc(iplDoGx, &_TMinValX, &_TMaxValX);
		cvMinMaxLoc(iplDoGy, &_TMinValY, &_TMaxValY);
		cvMinMaxLoc(iplTemp, &_TMinValT, &_TMaxValT);
		cvScale(iplDoGx, iplDoGx, 2.0 / _TMaxValX); // 정규화.
		cvScale(iplDoGy, iplDoGy, 2.0 / _TMaxValY);
		cvScale(iplTemp, iplTemp, 2.0 / _TMaxValT);

		cvCopy(iplDoGy, iplDoGyClone);
		cvCopy(iplDoGx, iplDoGxClone);

		// NMS진행후 추가 작업
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGx, iplDoGyClone, _TKernel);
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGy, iplDoGxClone, _TKernel);

		cvConvert(iplDoGx, iplEdgeY); // IPL_DEPTH_8U로 다시 재변환.
		cvConvert(iplDoGy, iplEdgeX);

		double rho = 1.0; // distance resolution in pixel-related units.
		double theta = 1.0; // angle resolution measured in radians.
		int threshold = 20;

		if (threshold == 0)
			threshold = 1;

		// detecting 해낸 edge에서 hough line 검출.
		_TLineX = cvHoughLines2(iplEdgeX, _TStorageX, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);
		_TLineY = cvHoughLines2(iplEdgeY, _TStorageY, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);

		// cvSeq를 vector로 바꾸기 위한 연산.
		_TChessRecognition->_Vec_ProtectionMutex.lock();
		_TChessRecognition->_HoughLineBased->CastSequence(_TLineX, _TLineY);
		_TChessRecognition->_Vec_ProtectionMutex.unlock();

		Sleep(2);
	}

	// mat 할당 해제.
	cvReleaseMat(&_TDoGY);

	// 내부 연산에 사용된 이미지 할당 해제.
	cvReleaseImage(&iplTemp);
	cvReleaseImage(&iplDoGx);
	cvReleaseImage(&iplDoGy);
	cvReleaseImage(&iplDoGyClone);
	cvReleaseImage(&iplDoGxClone);
	cvReleaseImage(&iplEdgeX);
	cvReleaseImage(&iplEdgeY);

	// houghline2에 사용된 opencv 메모리 할당 해제.
	cvReleaseMemStorage(&_TStorageX);
	cvReleaseMemStorage(&_TStorageY);

	delete _TChessRecognition->_HoughLineBased;
#if defined(WINDOWS_SYS)
	_endthread();
#elif defined(POSIX_SYS)

#endif
	_TChessRecognition->_EndThread = true;
	return 0;
}