Пример #1
0
void asef_locate_eyes(AsefEyeLocator *asef){
	asef->face_image.cols = asef->face_rect.width;
	asef->face_image.rows = asef->face_rect.height;
	asef->face_image.type = CV_8UC1;
	asef->face_image.step = asef->face_rect.width;

	cvGetSubRect(asef->input_image, &asef->face_image, asef->face_rect);

	double xscale = ((double)asef->scaled_face_image_8uc1->cols)/((double)asef->face_image.cols);
	double yscale = ((double)asef->scaled_face_image_8uc1->rows)/((double)asef->face_image.rows);

	cvResize(&asef->face_image, asef->scaled_face_image_8uc1, CV_INTER_LINEAR);

	cvLUT(asef->scaled_face_image_8uc1, asef->scaled_face_image_32fc1, asef->lut);

	cvDFT(asef->scaled_face_image_32fc1, asef->scaled_face_image_32fc1, CV_DXT_FORWARD, 0);
	cvMulSpectrums(asef->scaled_face_image_32fc1, asef->lfilter_dft, asef->lcorr, CV_DXT_MUL_CONJ);
	cvMulSpectrums(asef->scaled_face_image_32fc1, asef->rfilter_dft, asef->rcorr, CV_DXT_MUL_CONJ);

	cvDFT(asef->lcorr, asef->lcorr, CV_DXT_INV_SCALE, 0);
	cvDFT(asef->rcorr, asef->rcorr, CV_DXT_INV_SCALE, 0);

	cvMinMaxLoc(asef->lroi, NULL, NULL, NULL, &asef->left_eye, NULL);
	cvMinMaxLoc(asef->rroi, NULL, NULL, NULL, &asef->right_eye, NULL);

	asef->left_eye.x = (asef->lrect.x + asef->left_eye.x)/xscale + asef->face_rect.x;
	asef->left_eye.y = (asef->lrect.y + asef->left_eye.y)/yscale + asef->face_rect.y;
	asef->right_eye.x = (asef->rrect.x + asef->right_eye.x)/xscale + asef->face_rect.x;
	asef->right_eye.y = (asef->rrect.y + asef->right_eye.y)/yscale + asef->face_rect.y;
}
Пример #2
0
Файл: asef.c Проект: o2co2/ASEF
void asef_initialze(AsefEyeLocator *asef, const char *file_name){

  load_asef_filters(file_name, &asef->n_rows, &asef->n_cols, &asef->lrect, 
    &asef->rrect, &asef->lfilter, &asef->rfilter);


  asef->lfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);
  asef->rfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);

  asef->image = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);
  asef->image_tile = cvCreateMat(asef->n_rows, asef->n_cols, CV_8UC1);

  asef->lcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);
  asef->rcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);

  asef->lroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1);
  asef->rroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1);

  cvDFT(asef->lfilter, asef->lfilter_dft, CV_DXT_FORWARD, 0);
  cvDFT(asef->rfilter, asef->rfilter_dft, CV_DXT_FORWARD, 0);

  cvGetSubRect(asef->lcorr, asef->lroi, asef->lrect);
  cvGetSubRect(asef->rcorr, asef->rroi, asef->rrect);

  asef->lut = cvCreateMat(256, 1, CV_32FC1);
  for (int i = 0; i<256; i++){
    cvmSet(asef->lut, i, 0, 1.0 + i);
  }
  cvLog(asef->lut, asef->lut);
}
Пример #3
0
Файл: asef.c Проект: o2co2/ASEF
void asef_locate_eyes(AsefEyeLocator *asef, IplImage *image, CvRect face_rect, CvPoint *leye, CvPoint *reye){
  asef->face_img.cols = face_rect.width;
  asef->face_img.rows = face_rect.height;
  asef->face_img.type = CV_8UC1;
  asef->face_img.step = face_rect.width;

  cvGetSubRect(image, &asef->face_img, face_rect);

  double xscale = ((double)asef->image_tile->cols)/((double)asef->face_img.cols);
  double yscale = ((double)asef->image_tile->rows)/((double)asef->face_img.rows);

  cvResize(&asef->face_img, asef->image_tile, CV_INTER_LINEAR);

  cvLUT(asef->image_tile, asef->image, asef->lut);

  cvDFT(asef->image, asef->image, CV_DXT_FORWARD, 0);
  cvMulSpectrums(asef->image, asef->lfilter_dft, asef->lcorr, CV_DXT_MUL_CONJ);
  cvMulSpectrums(asef->image, asef->rfilter_dft, asef->rcorr, CV_DXT_MUL_CONJ);

  cvDFT(asef->lcorr, asef->lcorr, CV_DXT_INV_SCALE, 0);
  cvDFT(asef->rcorr, asef->rcorr, CV_DXT_INV_SCALE, 0);

  cvMinMaxLoc(asef->lroi, NULL, NULL, NULL, leye, NULL);
  cvMinMaxLoc(asef->rroi, NULL, NULL, NULL, reye, NULL);

  leye->x = (asef->lrect.x + leye->x)/xscale + face_rect.x;
  leye->y = (asef->lrect.y + leye->y)/yscale + face_rect.y;
  reye->x = (asef->rrect.x + reye->x)/xscale + face_rect.x;
  reye->y = (asef->rrect.y + reye->y)/yscale + face_rect.y;
}
Пример #4
0
int main(int argc, char** argv) {
	int M1 = 2;
	int M2 = 2;
	int N1 = 2;
	int N2 = 2;
	// initialize A and B
	//
	CvMat* A = cvCreateMat(M1, N1, CV_32F);
	CvMat* B = cvCreateMat(M2, N2, A->type);

	// it is also possible to have only abs(M2-M1)+1×abs(N2-N1)+1
	// part of the full convolution result
	CvMat* conv = cvCreateMat(A->rows + B->rows - 1, A->cols + B->cols - 1,
			A->type);

	int dft_M = cvGetOptimalDFTSize(A->rows + B->rows - 1);
	int dft_N = cvGetOptimalDFTSize(A->cols + B->cols - 1);

	CvMat* dft_A = cvCreateMat(dft_M, dft_N, A->type);
	CvMat* dft_B = cvCreateMat(dft_M, dft_N, B->type);
	CvMat tmp;

	// copy A to dft_A and pad dft_A with zeros
	//
	cvGetSubRect(dft_A, &tmp, cvRect(0, 0, A->cols, A->rows));
	cvCopy(A, &tmp);
	cvGetSubRect(dft_A, &tmp,
			cvRect(A->cols, 0, dft_A->cols - A->cols, A->rows));
	cvZero(&tmp);

	// no need to pad bottom part of dft_A with zeros because of
	// use nonzero_rows parameter in cvDFT() call below
	//
	cvDFT(dft_A, dft_A, CV_DXT_FORWARD, A->rows);

	// repeat the same with the second array
	//
	cvGetSubRect(dft_B, &tmp, cvRect(0, 0, B->cols, B->rows));
	cvCopy(B, &tmp);
	cvGetSubRect(dft_B, &tmp,
			cvRect(B->cols, 0, dft_B->cols - B->cols, B->rows));
	cvZero(&tmp);

	// no need to pad bottom part of dft_B with zeros because of
	// use nonzero_rows parameter in cvDFT() call below
	//
	cvDFT(dft_B, dft_B, CV_DXT_FORWARD, B->rows);

	// or CV_DXT_MUL_CONJ to get correlation rather than convolution 
	//
	cvMulSpectrums(dft_A, dft_B, dft_A, 0);

	// calculate only the top part
	//
	cvDFT(dft_A, dft_A, CV_DXT_INV_SCALE, conv->rows);
	cvGetSubRect(dft_A, &tmp, cvRect(0, 0, conv->cols, conv->rows));

	cvCopy(&tmp, conv);
	return 0;
}
Пример #5
0
void ASEF_Algorithm::detecteyes(Mat face_image) {
    
    if (isInitialized == false)
        this->initialize();
    
    double xscale = ((double) scaled_face_image_8uc1->cols) / ((double) face_image.cols);
    double yscale = ((double) scaled_face_image_8uc1->rows) / ((double) face_image.rows);
    
    CvMat temp = face_image;
    cvResize(&temp, scaled_face_image_8uc1, CV_INTER_LINEAR);
    cvLUT(scaled_face_image_8uc1, scaled_face_image_32fc1, lut);
    cvDFT(scaled_face_image_32fc1, scaled_face_image_32fc1, CV_DXT_FORWARD, 0);
    cvMulSpectrums(scaled_face_image_32fc1, lfilter_dft, lcorr, CV_DXT_MUL_CONJ);
    cvMulSpectrums(scaled_face_image_32fc1, rfilter_dft, rcorr, CV_DXT_MUL_CONJ);
    
    cvDFT(lcorr, lcorr, CV_DXT_INV_SCALE, 0);
    
    cvDFT(rcorr, rcorr, CV_DXT_INV_SCALE, 0);
    
    minMaxLoc(Mat(lroi), &MinValuel, NULL, NULL, &left_eye);
    minMaxLoc(Mat(rroi), &MinValuer, NULL, NULL, &right_eye);
    left_eye.x = (lrect.x + left_eye.x) / xscale;
    left_eye.y = (lrect.y + left_eye.y) / yscale;
    right_eye.x = (rrect.x + right_eye.x) / xscale;
    right_eye.y = (rrect.y + right_eye.y) / yscale;
}
Пример #6
0
int asef_initialze(AsefEyeLocator *asef, const char *asef_file_name, const char *fd_file_name){

	if ( !asef || !asef_file_name || !fd_file_name || 
		strlen(asef_file_name)==0 || strlen(fd_file_name)==0)
		return -1;

  // For face detection:
	asef->face_detection_buffer = cvCreateMemStorage(0);
	asef->face_detection_classifier = fd_load_detector( fd_file_name );

	if ( !asef->face_detection_classifier )
		return -1;

  // For asef eye locator:

	if ( load_asef_filters(asef_file_name, &asef->n_rows, &asef->n_cols, &asef->lrect, 
		&asef->rrect, &asef->lfilter, &asef->rfilter) )
		return -1;

	asef->lfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);
	asef->rfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);

	asef->scaled_face_image_32fc1 = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);
	asef->scaled_face_image_8uc1 = cvCreateMat(asef->n_rows, asef->n_cols, CV_8UC1);

	asef->lcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);
	asef->rcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1);

	asef->lroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1);
	asef->rroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1);

	asef->lut = cvCreateMat(256, 1, CV_32FC1);

	if ( !(asef->lfilter_dft && asef->rfilter_dft && asef->scaled_face_image_32fc1 && 
		asef->scaled_face_image_8uc1 && asef->lcorr && asef->rcorr && asef->lroi && 
		asef->rroi && asef->lut) ){
		return -1;
}

cvDFT(asef->lfilter, asef->lfilter_dft, CV_DXT_FORWARD, 0);
cvDFT(asef->rfilter, asef->rfilter_dft, CV_DXT_FORWARD, 0);

cvGetSubRect(asef->lcorr, asef->lroi, asef->lrect);
cvGetSubRect(asef->rcorr, asef->rroi, asef->rrect);


for (int i = 0; i<256; i++){
	cvmSet(asef->lut, i, 0, 1.0 + i);
}
cvLog(asef->lut, asef->lut);

return 0;
}
Пример #7
0
void BModel::wiener_filter_chanel(IplImage *channel, IplImage *kernel, const double sigma)
{
    IplImage *fKernel  = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 2);
    IplImage *fChannel = cvCreateImage(cvGetSize(channel), IPL_DEPTH_64F, 2);
    IplImage *answ = cvCreateImage(cvGetSize(channel), IPL_DEPTH_64F, 2);
    IplImage *reFKernel = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 1);
    IplImage *imFKernel = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 1);
    IplImage *reFChannel = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 1);
    IplImage *imFChannel = cvCreateImage(cvGetSize(kernel), IPL_DEPTH_64F, 1);
    IplImage *reAnsw = cvCreateImage(cvGetSize(channel), IPL_DEPTH_64F, 1);
    IplImage *imAnsw = cvCreateImage(cvGetSize(channel), IPL_DEPTH_64F, 1);

    cvDFT(kernel, fKernel, CV_DXT_FORWARD, channel->height);
    cvDFT(channel, fChannel, CV_DXT_FORWARD, channel->height);

    cvMulSpectrums(fChannel, fKernel, answ, CV_DXT_MUL_CONJ);
    cvSplit(answ, reAnsw, imAnsw, 0, 0 );
    cvSplit(fKernel, reFKernel, imFKernel, 0, 0 );

    cvPow(reFKernel, reFKernel, 2);
    cvPow(imFKernel, imFKernel, 2);
    cvAdd(reFKernel, imFKernel, reFKernel, 0);
    cvAddS(reFKernel, cvScalarAll(sigma), reFKernel, 0);

    cvDiv(reAnsw, reFKernel, reAnsw, 1);
    cvDiv(imAnsw, reFKernel, imAnsw, 1);

    cvMerge(reAnsw, imAnsw, NULL, NULL, answ);

    cvDFT(answ, answ, CV_DXT_INV_SCALE, channel->height);
    cvCopy(answ, channel);

    cvReleaseImage(&fKernel);
    cvReleaseImage(&fChannel);
    cvReleaseImage(&answ);
    cvReleaseImage(&reFKernel);
    cvReleaseImage(&imFKernel);
    cvReleaseImage(&reFChannel);
    cvReleaseImage(&imFChannel);
    cvReleaseImage(&reAnsw);
    cvReleaseImage(&imAnsw);
}
Пример #8
0
int ASEF_Algorithm::initialize() {
    
    if (load_asef_filters(haar_cascade_path, &n_rows, &n_cols, &lrect,
                          &rrect, &lfilter, &rfilter))
        return -1;
    
    
    lfilter_dft = cvCreateMat(n_rows, n_cols, CV_32FC1);
    rfilter_dft = cvCreateMat(n_rows, n_cols, CV_32FC1);
    
    scaled_face_image_32fc1 = cvCreateMat(n_rows, n_cols, CV_32FC1);
    scaled_face_image_8uc1 = cvCreateMat(n_rows, n_cols, CV_8UC1);
    
    lcorr = cvCreateMat(n_rows, n_cols, CV_32FC1);
    rcorr = cvCreateMat(n_rows, n_cols, CV_32FC1);
    
    lroi = cvCreateMatHeader(n_rows, n_cols, CV_32FC1);
    rroi = cvCreateMatHeader(n_rows, n_cols, CV_32FC1);
    
    lut = cvCreateMat(256, 1, CV_32FC1);
    point = cvCreateMat(1, 2, CV_32FC1);
    if (!(lfilter_dft && rfilter_dft && scaled_face_image_32fc1 &&
          scaled_face_image_8uc1 && lcorr && rcorr && lroi &&
          rroi && lut)) {
        return -1;
    }
    
    cvDFT(lfilter, lfilter_dft, CV_DXT_FORWARD, 0);
    cvDFT(rfilter, rfilter_dft, CV_DXT_FORWARD, 0);
    
    cvGetSubRect(lcorr, lroi, lrect);
    cvGetSubRect(rcorr, rroi, rrect);
    
    for (int i = 0; i < 256; i++) {
        cvmSet(lut, i, 0, 1.0 + i);
    }
    cvLog(lut, lut);
    isInitialized = true;
    return 0;
}
Пример #9
0
////////////////////////perform fourier transform//////////////////////////////////////////////////
//fft2
// code comes from http://www.opencv.org.cn/
void CLightSet::fft2(IplImage *src,CvMat *dst)
{
	IplImage * realInput;
	IplImage * imaginaryInput;
	IplImage * complexInput;
	int dft_M, dft_N;
	CvMat* dft_A, tmp;
	IplImage * image_Re;
	IplImage * image_Im;

	realInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1);
	imaginaryInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1);
	complexInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 2);

	cvScale(src, realInput, 1.0, 0.0);
	cvZero(imaginaryInput);
	cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

// 	dft_M = cvGetOptimalDFTSize( src->height - 1 );
// 	dft_N = cvGetOptimalDFTSize( src->width - 1 );

	dft_M =src->height;
	dft_N =src->width ;

	dft_A = cvCreateMat( dft_M, dft_N, CV_32FC2 );
	image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_32F, 1);
	image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_32F, 1);

	// copy A to dft_A and pad dft_A with zeros
	cvGetSubRect( dft_A, &tmp, cvRect(0,0, src->width, src->height));
	cvCopy( complexInput, &tmp, NULL );
	if( dft_A->cols > src->width )
	{
		cvGetSubRect( dft_A, &tmp, cvRect(src->width,0, dft_A->cols - src->width, src->height));
		cvZero( &tmp );
	}

	// no need to pad bottom part of dft_A with zeros because of
	// use nonzero_rows parameter in cvDFT() call below

	cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

	cvCopy(dft_A,dst);

	cvReleaseImage(&realInput);
	cvReleaseImage(&imaginaryInput);
	cvReleaseImage(&complexInput);
	cvReleaseImage(&image_Re);
	cvReleaseImage(&image_Im);

}
Пример #10
0
void cvShowInvDFT1(IplImage* im, CvMat* dft_A, int dft_M, int dft_N,char* src)
{

    IplImage* realInput;
    IplImage* imaginaryInput;
    IplImage* complexInput;

    IplImage * image_Re;
    IplImage * image_Im;

    double m, M;
    char str[80];

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    //cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, complexInput->height );
    cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, dft_M);
    strcpy(str,"DFT INVERSE - ");
    strcat(str,src);
    cvNamedWindow(str, 0);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    //cvCvtColor(image_Re, image_Re, CV_GRAY2RGBA);

    cvShowImage(str, image_Re);

}
Пример #11
0
IplImage *fftImageInv(IplImage *img)
{
	IplImage *ret;
	CvMat *ft;
	CvMat tmp;

	ft = cvCreateMat(img->height, img->width, CV_64FC2);
	origin_center(img);

	// copy A to dft_A and pad dft_A with zeros
	cvGetSubRect(ft, &tmp, cvRect(0, 0, img->width, img->height));  // tmp points to sub of dft_A
	cvCopy(img, &tmp, NULL);                                       // Copy complex image into sub of dft_A
	cvDFT(ft, ft, (CV_DXT_SCALE | CV_DXT_INVERSE), img->height);

	ret = cvMatToImage(ft);
	cvReleaseMat(&ft);
	printf("INVFFT Return image is: Depth=%d channels=%d\n", ret->depth, ret->nChannels);
	return ret;
}
Пример #12
0
IplImage* fftImage(IplImage *img)
{
	IplImage *realpart, *imgpart, *complexpart, *ret;
	CvMat *ft;
	int sizeM, sizeN;
	CvMat tmp;

	realpart = cvCreateImage(cvGetSize(img), IPL_DEPTH_64F, 1);
	imgpart = cvCreateImage(cvGetSize(img), IPL_DEPTH_64F, 1);
	complexpart = cvCreateImage(cvGetSize(img), IPL_DEPTH_64F, 2);
	cvScale(img, realpart, 1.0, 0.0);              // copy grey input image to realpart
	cvZero(imgpart);                                   // Set imaginary part to 0
	cvMerge(realpart, imgpart, NULL, NULL, complexpart); // real+imag to complex

														 /* Messy bit - fft needs sizes to be a power of 2, so the images have to be
														 // embedded into a background of 0 pixels. */
	sizeM = cvGetOptimalDFTSize(img->height - 1);
	sizeN = cvGetOptimalDFTSize(img->width - 1);
	printf("Size of image to be transformed is %dx%d\n", sizeM, sizeN);

	ft = cvCreateMat(sizeM, sizeN, CV_64FC2);
	origin_center(complexpart);

	// copy A to dft_A and pad dft_A with zeros
	cvGetSubRect(ft, &tmp, cvRect(0, 0, img->width, img->height));  // tmp points to sub of dft_A
	cvCopy(complexpart, &tmp, NULL);                                       // Copy complex image into sub of dft_A
	cvGetSubRect(ft, &tmp, cvRect(img->width, 0,
		ft->cols - img->width, img->height));   // Get sub of dft_A on right side
	if ((ft->cols - img->width) > 0) cvZero(&tmp);                   // Set right margin to zero


	cvDFT(ft, ft, CV_DXT_FORWARD, complexpart->height);

	ret = cvMatToImage(ft);
	cvReleaseMat(&ft);
	cvReleaseImage(&realpart);
	cvReleaseImage(&imgpart);
	cvReleaseImage(&complexpart);
	return ret;
}
Пример #13
0
CvMat* cvShowDFT1(IplImage* im, int dft_M, int dft_N,char* src)
{
    IplImage* realInput;
    IplImage* imaginaryInput;
    IplImage* complexInput;

    CvMat* dft_A, tmp;

    IplImage* image_Re;
    IplImage* image_Im;

    char str[80];

    double m, M;

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    cvScale(im, realInput, 1.0, 0.0);
    cvZero(imaginaryInput);
    cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

    dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    // copy A to dft_A and pad dft_A with zeros
    cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
    cvCopy( complexInput, &tmp, NULL );
    if( dft_A->cols > im->width )
    {
        cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
        cvZero( &tmp );
    }

    // no need to pad bottom part of dft_A with zeros because of
    // use nonzero_rows parameter in cvDFT() call below

    cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

    strcpy(str,"DFT -");
    strcat(str,src);
    cvNamedWindow(str, 0);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    cvShowImage(str, image_Re);
    return(dft_A);
}
Пример #14
0
int main(int argc, char ** argv)
{
    const char* filename = argc >=2 ? argv[1] : "lena.jpg";
    IplImage * im;

    IplImage * realInput;
    IplImage * imaginaryInput;
    IplImage * complexInput;
    int dft_M, dft_N;
    CvMat* dft_A, tmp;
    IplImage * image_Re;
    IplImage * image_Im;
    double m, M;

    im = cvLoadImage( filename, CV_LOAD_IMAGE_GRAYSCALE );
    if( !im )
        return -1;

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    cvScale(im, realInput, 1.0, 0.0);
    cvZero(imaginaryInput);
    cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

    dft_M = cvGetOptimalDFTSize( im->height - 1 );
    dft_N = cvGetOptimalDFTSize( im->width - 1 );

    dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    // copy A to dft_A and pad dft_A with zeros
    cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
    cvCopy( complexInput, &tmp, NULL );
    if( dft_A->cols > im->width )
    {
        cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
        cvZero( &tmp );
    }

    // no need to pad bottom part of dft_A with zeros because of
    // use nonzero_rows parameter in cvDFT() call below

    cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

    cvNamedWindow("win", 0);
    cvNamedWindow("magnitude", 0);
    cvShowImage("win", im);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)


    // Rearrange the quadrants of Fourier image so that the origin is at
    // the image center
    cvShiftDFT( image_Re, image_Re );

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    cvShowImage("magnitude", image_Re);

    cvWaitKey(-1);
    return 0;
}
void CMagicCabsineUniversalProperty_feature_texture_spectral_DFT::ComputeProperty()
{

	IplImage * im;//输入图像必须是灰度图像
	
	
	IplImage * realInput;
	IplImage * imaginaryInput;
	IplImage * complexInput;
	int dft_M, dft_N;
	CvMat* dft_A, tmp;
	IplImage * image_Im;
	IplImage * image_Re;

	double m, M;
	
	//灰度图转化
	IplImage *srcimg_cvt = cvCreateImage(cvGetSize(srcImage),IPL_DEPTH_8U,3);//源图像格式化图像
	im = cvCreateImage(cvGetSize(srcImage),IPL_DEPTH_8U,1);
	cvConvertScale(srcImage,srcimg_cvt);//源图像格式化 转换成 IPL_DEPTH_32F 3通道
	cvCvtColor(srcImage,im,CV_BGR2GRAY);//源图像转换成灰度图
	cvReleaseImage(&srcimg_cvt);

	realInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 1);//灰度图像
	imaginaryInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 1);//灰度图像
	complexInput = cvCreateImage( cvGetSize(srcImage), IPL_DEPTH_64F, 2);

	cvScale(im, realInput, 1.0, 0.0);//从8u转换成32f 提高精度
	cvZero(imaginaryInput);
	//将单通道图像合并成多通道图像(将realInput和imageinaryImput合并到双通道图像complexInput)
	cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

	/*dft_M = cvGetOptimalDFTSize( im->height - 1 );
	dft_N = cvGetOptimalDFTSize( im->width - 1 );*/

	dft_M = im->height;
	dft_N = im->width;
	dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
	image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);//DFT最佳尺寸的图像
	image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

	// copy A to dft_A and pad dft_A with zeros
	cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
	cvCopy( complexInput, &tmp, NULL );
	if( dft_A->cols > im->width )
	{
		cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
		cvZero( &tmp );
	}

	// no need to pad bottom part of dft_A with zeros because of
	// use nonzero_rows parameter in cvDFT() call below

	cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );
	

	// Split Fourier in real and imaginary parts
	cvSplit( dft_A, image_Re, image_Im, 0, 0 );
	
	// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
	cvPow( image_Re, image_Re, 2.0);
	cvPow( image_Im, image_Im, 2.0);
	cvAdd( image_Re, image_Im, image_Re, NULL);
	cvPow( image_Re, image_Re, 0.5 );

	// Compute log(1 + Mag)
	cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
	cvLog( image_Re, image_Re ); // log(1 + Mag)
		
	// Rearrange the quadrants of Fourier image so that the origin is at
	// the image center
	cvShiftDFT( image_Re, image_Re );
	cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
	cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
	//cvShowImage("magnitude", image_Re);

	//正规化傅里叶变换图像使其可以被存储
	image_Re_save = cvCreateImage(cvSize(dft_N,dft_M),IPL_DEPTH_64F,1);//注意本来应该在初始化中创建,但是由于大小参数无法估计故在此创建需要在析构函数中release
	cvNormalize(image_Re,image_Re_save,255,0,CV_MINMAX,NULL);



	cvReleaseImage(&im);
	cvReleaseImage(&realInput);
	cvReleaseImage(&imaginaryInput);
	cvReleaseImage(&complexInput);
	cvReleaseImage(&image_Re);
	cvReleaseImage(&image_Im);

	////对傅里叶进行正规化使用L2(欧几里得)范式获取正规化傅里叶系数NFP,以计算3个傅里叶系数,未加绝对值
	//cvNormalize(image_Re,image_Re,1,0,CV_L2,NULL);

	////test
	//cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
	////对傅里叶NFP取绝对值
	//cvAbs(image_Re,image_Re);

	////test
	//cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);



	////计算傅里叶系数熵
	//double h = FT_Entropy(image_Re, dft_N, dft_M);

	//double e = FT_Energy(image_Re, dft_N, dft_M);

	//double i = FT_Inertia(image_Re, dft_N, dft_M);


	////存储特征
	//featurefilename+=".txt";
	//
	//FILE* file = fopen(featurefilename.c_str(),"w");
	//fprintf_s(file,"%.6f\n",h);
	//fprintf_s(file,"%.6f\n",e);
	//fprintf_s(file,"%.6f\n",i);
	//fclose(file);
	//return 0;
}
Пример #16
0
IplImage*
create_fourier_image(const IplImage *im)
{

  IplImage *realInput;
  IplImage *imaginaryInput;
  IplImage *complexInput;
  int dft_M, dft_N;
  CvMat *dft_A, tmp;
  IplImage *image_Re;
  IplImage *image_Im;

  realInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
  imaginaryInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
  complexInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

  cvScale(im, realInput, 1.0, 0.0);
  cvZero(imaginaryInput);
  cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

  dft_M = cvGetOptimalDFTSize( im->height - 1 );
  dft_N = cvGetOptimalDFTSize( im->width - 1 );

  dft_A = rb_cvCreateMat( dft_M, dft_N, CV_64FC2 );
  image_Re = rb_cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
  image_Im = rb_cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

  // copy A to dft_A and pad dft_A with zeros
  cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
  cvCopy( complexInput, &tmp, NULL );
  if( dft_A->cols > im->width )
  {
    cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
    cvZero( &tmp );
  }

  // no need to pad bottom part of dft_A with zeros because of
  // use nonzero_rows parameter in cvDFT() call below

  cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

  // Split Fourier in real and imaginary parts
  cvSplit( dft_A, image_Re, image_Im, 0, 0 );

  // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
  cvPow( image_Re, image_Re, 2.0);
  cvPow( image_Im, image_Im, 2.0);
  cvAdd( image_Re, image_Im, image_Re, NULL);
  cvPow( image_Re, image_Re, 0.5 );

  // Compute log(1 + Mag)
  cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
  cvLog( image_Re, image_Re ); // log(1 + Mag)

  // Rearrange the quadrants of Fourier image so that the origin is at
  // the image center
  cvShiftDFT( image_Re, image_Re );

  cvReleaseImage(&realInput);
  cvReleaseImage(&imaginaryInput);
  cvReleaseImage(&complexInput);
  cvReleaseImage(&image_Im);

  cvReleaseMat(&dft_A);

  return image_Re;

}
Пример #17
0
void prefilt(const IplImage* img,IplImage* prefedImg,int pad,IplImage* filter2D)
{
	int imagesize=img->height;
	int padImgSize=imagesize+2*pad;
	
	IplImage* temp1=cvCreateImage(cvSize(imagesize,imagesize),IPL_DEPTH_32F,3);
	
	/*put 8 bits image into 32 bits float image*/
	cvConvertScale(img,temp1,1,1);

	int i;
	//use ptr to get access to data blocks/pixels,each block/pixel has three float numbers
	IplImage* temp2=cvCreateImage(cvSize(imagesize,imagesize),IPL_DEPTH_32F,3);
	float* ptr=NULL;

	cvLog(temp1,temp2);
	cvReleaseImage(&temp1);

	/*pad the image to reduce boundary artifacts*/
	IplImage* temp5=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,3);
	IplImage* pad1=cvCreateImage(cvSize(imagesize,imagesize),IPL_DEPTH_32F,1);
	IplImage* pad2=cvCreateImage(cvSize(imagesize,imagesize),IPL_DEPTH_32F,1);
	IplImage* pad3=cvCreateImage(cvSize(imagesize,imagesize),IPL_DEPTH_32F,1);
	cvSplit(temp2,pad1,pad2,pad3,NULL);
	IplImage* C1=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
	IplImage* C2=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
	IplImage* C3=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
	padarraySym(pad1,C1,pad);/* pad each channel */
	padarraySym(pad2,C2,pad);
	padarraySym(pad3,C3,pad);
	cvMerge(C1,C2,C3,NULL,temp5);

	cvReleaseImage(&pad1);
	cvReleaseImage(&pad2);
	cvReleaseImage(&pad3);
	cvReleaseImage(&C1);
	cvReleaseImage(&C2);
	cvReleaseImage(&C3);



	/* FFT */
	/* split 3-channel image to single channel images */
	IplImage* splitImgs[3];
	IplImage* dftOutput[3];
	IplImage* splitImgsC2[3];
	for(i=0;i<3;i++)
	{
		splitImgs[i]=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
		splitImgsC2[i]=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,2);
		dftOutput[i]=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,2);
		cvSetZero(splitImgs[i]);
		cvSetZero(splitImgsC2[i]);
		cvSetZero(dftOutput[i]);
	}
	cvSplit(temp5,splitImgs[0],splitImgs[1],splitImgs[2],NULL);
	IplImage* mergSrc=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
	cvSetZero(mergSrc);
	/* merge to double channel images for input of FFT */
	for(i=0;i<3;i++)
	{
		cvMerge(splitImgs[i],mergSrc,NULL,NULL,splitImgsC2[i]);
		cvReleaseImage(&splitImgs[i]);
	}
	/* FFT */
	for(i=0;i<3;i++)
	{
		cvDFT(splitImgsC2[i],dftOutput[i],CV_DXT_FORWARD);
		cvReleaseImage(&splitImgsC2[i]);
	}

	/* apply prefilt to three channels */
	IplImage* output_temp[3];
	IplImage* outputReal[3];
	IplImage* outputImaginary[3];
	IplImage* ifftOutput[3];
	float temp=0;
	int j=0;
	float* ptr1=NULL;
	for(i=0;i<3;i++)
	{
		output_temp[i]=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,2);
		cvSetZero(output_temp[i]);
		cvMul(filter2D,dftOutput[i],output_temp[i]);
		cvReleaseImage(&dftOutput[i]);

		outputReal[i]=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
		cvSetZero(outputReal[i]);
		outputImaginary[i]=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
		cvSetZero(outputImaginary[i]);
		ifftOutput[i]=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,2);
		cvSetZero(ifftOutput[i]);
		cvDFT(output_temp[i],ifftOutput[i],CV_DXT_INVERSE_SCALE);
		cvSplit(ifftOutput[i],outputReal[i],outputImaginary[i],NULL,NULL);
		cvReleaseImage(&outputImaginary[i]);
		cvReleaseImage(&ifftOutput[i]);
	}

	/* merge real parts to a 3-channel image */
	IplImage* temp3=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,3);
	cvMerge(outputReal[0],outputReal[1],outputReal[2],NULL,temp3);
	cvReleaseImage(&outputReal[0]);
	cvReleaseImage(&outputReal[1]);
	cvReleaseImage(&outputReal[2]);

	IplImage* prefiltOutput=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,3);
	cvSub(temp5,temp3,prefiltOutput);
	cvReleaseImage(&temp3);
	cvReleaseImage(&temp5);



	/* local contrast normalization */
	/* mean of values in different channels of each pixel */
	IplImage* mean=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
	IplImage* mean2D=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,2);
	ptr=(float*) mean->imageData;
	ptr1=(float*) prefiltOutput->imageData;
	for(i=0;i<(padImgSize)*(padImgSize)*3;i++)
	{
		if((i+1)%3==0)
		{
			temp=(*(ptr1+i)+*(ptr1+i-1)+*(ptr1+i-2))/3;
			*(ptr+(i+1)/3-1)=temp*temp;
		}

	}


	/* FFT */
	IplImage* fftRes=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,2);
	cvMerge(mean,mergSrc,NULL,NULL,mean2D);
	cvReleaseImage(&mergSrc);
	cvDFT(mean2D,fftRes,CV_DXT_FORWARD);
	cvReleaseImage(&mean);
	IplImage* temp4=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,2);
	cvMul(filter2D,fftRes,temp4);

	/* Inverse FFT */
	IplImage* ifftRes=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,2);
	IplImage* lccstTemp=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,1);
	ptr1=(float*) lccstTemp->imageData;
	cvFFT(temp4,ifftRes,CV_DXT_INVERSE_SCALE);
	/* caculate the abs of complex number matrix */
	ptr=(float*)ifftRes->imageData;
	float realV=0;
	float imgV=0;
	for(i=0;i<(padImgSize)*(padImgSize);i++)
	{
		realV=*(ptr+2*i);
		imgV=*(ptr+2*i+1);
		*(ptr1+i)=sqrt(sqrt(realV*realV+fabs(imgV*imgV)))+0.2;
	}
	IplImage* lccst=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,3);

	cvMerge(lccstTemp,lccstTemp,lccstTemp,NULL,lccst);
	IplImage* outputTemp=cvCreateImage(cvSize(padImgSize,padImgSize),IPL_DEPTH_32F,3);
	cvDiv(prefiltOutput,lccst,outputTemp);

	cvReleaseImage(&mean2D);
	cvReleaseImage(&fftRes);
	cvReleaseImage(&ifftRes);
	cvReleaseImage(&lccstTemp);
	cvReleaseImage(&lccst);
	cvReleaseImage(&prefiltOutput);
	cvReleaseImage(&temp2);
	cvReleaseImage(&temp4);
	cvReleaseImage(&output_temp[0]);
	cvReleaseImage(&output_temp[1]);
	cvReleaseImage(&output_temp[2]);


	IplImage* temp6 = cvGetSubImage(outputTemp,cvRect(pad,pad,imagesize,imagesize));
	cvCopy(temp6,prefedImg);
	cvReleaseImage(&outputTemp);
	cvReleaseImage(&temp6);
}
Пример #18
0
void ASEF::ComputeEyeLocations(IplImage *img, CvRect bb){


    assert(img->nChannels==1);


    IplImage *roi = cvCreateImage(cvSize(this->nCols, this->nRows), img->depth, 1);

    cvSetImageROI(img, bb);
    cvResize(img, roi, CV_INTER_LINEAR);
    cvResetImageROI(img);


    IplImage *roi_64 = cvCreateImage(cvGetSize(roi), IPL_DEPTH_64F, 1);

    cvConvertScale(roi, roi_64, 1./255., 0.);

    FaceNormIllu::do_NormIlluRETINA(roi_64, this->face_real, 5.0);


    cvMerge(this->face_real, this->face_im, 0, 0, this->complex_data);

    // do DFT
    cvDFT(this->complex_data, this->F, CV_DXT_FORWARD, this->complex_data->height);


    // G left
    cvMulSpectrums(this->F, this->LeftEyeDetector, this->Gl, CV_DXT_ROWS);

    cvDFT(this->Gl, this->Gl, CV_DXT_INV_SCALE, this->Gl->height);
    cvSplit(this->Gl, this->gl, 0, 0, 0);

    // G right
    cvMulSpectrums(this->F, this->RightEyeDetector, this->Gr, CV_DXT_ROWS);

    cvDFT(this->Gr, this->Gr, CV_DXT_INV_SCALE, this->Gl->height);
    cvSplit(this->Gr, this->gr,0,0,0);


    // add both responses
    double minV, maxV;
    cvMinMaxLoc(this->gl, &minV, &maxV);
    cvConvertScale(this->gl, this->gl, 1./(maxV-minV), -minV/(maxV-minV));

    cvMinMaxLoc(this->gr, &minV, &maxV);
    cvConvertScale(this->gr, this->gr, 1./(maxV-minV), -minV/(maxV-minV));

    cvAdd(this->gl, this->gr, this->g);

    cvMul(this->g, this->LeftEyeMask, this->gl);

    cvMul(this->g, this->RightEyeMask, this->gr);


    ///////////////////////////////////////////////////
    //  Compute Eye Locations
    ///////////////////////////////////////////////////
    float scale;

    cvSetImageROI(this->gl, cvRect(0,0, this->nCols>>1, this->nRows>>1));
    cvMinMaxLoc(this->gl, 0,0,0, &this->pEyeL);
    cvResetImageROI(this->gl);

    scale = (float)bb.width/(float)this->nCols;
    this->pEyeL.x=cvRound((float)this->pEyeL.x * scale + bb.x);
    this->pEyeL.y=cvRound((float)this->pEyeL.y * scale + bb.y);


    cvSetImageROI(this->gr, cvRect(this->nCols>>1, 0, this->nCols>>1, this->nRows>>1));
    cvMinMaxLoc(this->gr, 0,0,0, &this->pEyeR);
    cvResetImageROI(this->gr);

    scale = (float)bb.height/(float)this->nRows;
    this->pEyeR.x=cvRound((float)(this->pEyeR.x + this->nCols*0.5)* scale + bb.x);
    this->pEyeR.y=cvRound((float)this->pEyeR.y * scale + bb.y);


    cvReleaseImage(&roi);
    cvReleaseImage(&roi_64);
}
Пример #19
0
 */
int main(int argc, char *argv[]) 
{   
    DSP_cvStartDSP();
    CvCapture * capture;
    IplImage *videoFrame, *convFrame, *convOpencvFrame, *unsignedFrame; 
    IplImage *dataImage, *integralImage;
    int key;
    
    int *ptr;
    float *flPtr;

    int i,j;
    
    float tempFloat=0.0;
    float *floatDataPtr;
    float *floatOutPtr;

    /* Data to test cvIntegral() */
    unsigned char intdata[] = { 151,  57, 116, 170,   9, 247, 208, 140, 150,  60,  88,  77,   4,   6, 162,   6, 
			    	31, 143, 178,   3, 135,  91,  54, 154, 193, 161,  20, 162, 137, 150, 128, 224, 
			    	214, 113,   9,  28,  53, 211,  98, 217, 149, 233, 231, 127, 115, 203, 177,  42, 
			    	62, 155,   3, 103, 127,  16, 135, 131, 211, 158,   9,   2, 106, 227, 249, 255 }; //16 x 4
        
    if ( argc < 2 ) {
       printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T [option] \n");
       printf("option:\ni. integral\ns. sobel\nd. dft\n");
       printf("Following are the all usage:\n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T i dsp(To test integral-image algorithm using DSP. Input is from webcam). Note: You need to install VLIB to test this.\n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T i arm(To test integral-image algorithm using ARM. Input is from webcam). \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T i test(To test integral-image algorithm using test data given in APP. Input is from webcam). \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T d dsp (To test DFT algorithm using DSP) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T d arm (To test DFT algorithm using ARM) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi dsp (To test sobel algorithm for movie clip tree.avi using DSP) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi arm (To test sobel algorithm for movie clip tree.avi using ARM) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T s webcam dsp (To test sobel algorithm for image from webcam using DSP) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T s webcam arm (To test sobel algorithm for image from webcam using ARM) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T rgb2gray (To test RGB to Gray for image from webcam.) \n");
       return (-1);
    }

    if (*argv[1] == 's' && argc < 3) {
       printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi \n");
       printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T s webcam \n");

       return (-1);
    }
	
    


    switch (*argv[1]) {
 
       	case 'i': 
	    switch (*argv[2]) {
                case 'd': { //'d' dor DSP accelerated

    			cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    
    
    			capture = cvCaptureFromCAM(-1);

    			if ( !capture) {
      	   	   	   printf("Error: Video capture initialization failed.\n");
      	   	   	   break;
    			}
        		videoFrame = cvQueryFrame ( capture );
     			if ( !videoFrame) {
      	   	   	   printf("**Error reading from webcam\n");
      		   	   break;
    			}
    			
    			/* create new image for the grayscale version */
    			convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );

    			/* create sobel filtered image */
    			convOpencvFrame = cvCreateImage( cvSize( convFrame->width+1, convFrame->height+1 ), IPL_DEPTH_32S, 1 );
			
			/* Process the first frame outside the loop*/
			DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
			DSP_cvSyncDSP();			    
    			while ( key != 'q') { 
			          			 
	 		      /* Time to test and benchmark DSP based sobel filter */
			      Time_reset(&sTime);
			      Time_delta(&sTime,&time);
			      /*Find integral image */
			      DSP_cvIntegral(convFrame,convOpencvFrame,NULL,NULL);			      
			      
			      /* get next frame */
			      videoFrame = cvQueryFrame( capture); 
			      if ( !videoFrame) {
	         	         printf("***The End***\n");
	           	         break;
      	      	              }
			      DSP_cvSyncDSP();			      		      
			      /* Do color conversion */
			      DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
			      /* show Image */ //Since I am using VLIB for IntegralImage, its output image width and height is same as source image.
			      convOpencvFrame->width -= 1; convOpencvFrame->height -= 1;
			      convOpencvFrame->widthStep -= sizeof(int) * convOpencvFrame->nChannels;
			      cvShowImage("video", convOpencvFrame);
			      convOpencvFrame->width += 1; convOpencvFrame->height += 1;
			      convOpencvFrame->widthStep += sizeof(int) * convOpencvFrame->nChannels;
			      /* Sync with DSP */					  
			      DSP_cvSyncDSP();
			      Time_delta(&sTime,&time);         
 		              printf("Total execution time = %dus\n",(unsigned int)time);

	      	              key = cvWaitKey( 15 );
       	       		}
			
	       		cvDestroyWindow("video");
    	       		cvReleaseImage(&videoFrame);
    	       		cvReleaseImage(&convFrame);
               		cvReleaseImage(&convOpencvFrame);
			cvReleaseCapture(&capture); 
			}
       	       		break; /* End of sobel test */

		case 'a': { // 'a' for ARM side

    			cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    
    
    			capture = cvCaptureFromCAM(-1);

    			if ( !capture) {
      	   	   	   printf("Error: Video capture initialization failed.\n");
      	   	   	   break;
    			}
        		videoFrame = cvQueryFrame ( capture );
     			if ( !videoFrame) {
      	   	   	   printf("**Error reading from webcam\n");
      		   	   break;
    			}

    			
    			/* create new image for the grayscale version */
    			convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );

    			/* create sobel filtered image */
    			convOpencvFrame = cvCreateImage( cvSize( convFrame->width+1, convFrame->height+1 ), IPL_DEPTH_32S, 1 );
			
			/* Process the first frame outside the loop*/
			cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
			    		    
    			while ( key != 'q') { 
			          			 
	 		      /* Time to test and benchmark DSP based sobel filter */
			      Time_reset(&sTime);
			      Time_delta(&sTime,&time);
			      /*Find integral image */
			      cvIntegral(convFrame,convOpencvFrame,NULL,NULL);
			      
			      /* get next frame */
			      videoFrame = cvQueryFrame( capture); 
			      if ( !videoFrame) {
	         	         printf("***The End***\n");
	           	         break;
      	      	              }
				  
			      /* Do color conversion */
			      cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
			      /* show Image */
			      cvShowImage("video", convOpencvFrame);

			      Time_delta(&sTime,&time); 			         
			      printf("Total execution time = %dus\n",(unsigned int)time);

	      	              key = cvWaitKey( 15 );
       	       		}
			
	       		cvDestroyWindow("video");
    	       		cvReleaseImage(&videoFrame);
    	       		cvReleaseImage(&convFrame);
               		cvReleaseImage(&convOpencvFrame); 
			cvReleaseCapture(&capture);
			}
       	       		break; /* End of sobel test */
			
		case 't': { /* This to test the consistency of algorithm */
			/* Start of integral image test */
			dataImage = cvCreateImageHeader(cvSize(16, 4), IPL_DEPTH_8U, 1);
			integralImage = cvCreateImageHeader(cvSize(17, 5), IPL_DEPTH_32S, 1); 
			unsigned char *data = (unsigned char*)cvAlloc(16*4*sizeof(char));
 			unsigned int *sum = (unsigned int *)cvAlloc(17*5*sizeof(int)); 
			memcpy(data, &intdata[0], 16*4*sizeof(char));
			memset(sum, 0, 17*5*sizeof(int));
			dataImage->imageData = ( char *)data;
    			integralImage->imageData = ( char *)sum;
	    		/* DSP based integral */
    			DSP_cvIntegral(dataImage,integralImage,NULL,NULL);
  		    	
    			ptr = (int *)integralImage->imageData;
    			printf(" The integral image is:\n");
    			for (i=0;i<4;i++){
            	    	    for (j=0;j<16;j++){
                        	printf("%d \t ", ptr[i* 16 + j]);
            	    	    }
            	   	    printf("\n");
    			}

			/* Arm based cvIntegral() */
			cvIntegral(dataImage, integralImage,NULL,NULL);
			
			ptr = (int *)integralImage->imageData;
    			printf(" The integral image is:\n");
    			for (i=0;i<5;i++){
            	    	    for (j=0;j<17;j++){
                        	printf("%d \t ", ptr[i* 17 + j]);
            	    	    }
            	    	    printf("\n");
    			}
			cvFree(&dataImage);
			cvFree(&integralImage);
			}
    			break;   /* End of integral image test */

		default: 
		    printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T i d \n./remote_ti_platforms_evm3530_opencv.xv5T i a\n ./remote_ti_platforms_evm3530_opencv.xv5T i d \n"); 
		
		    break;
		}
		break;                

        /* Start RGB to Y test */
	case 'r': {
		cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    
    
    	 	capture = cvCaptureFromCAM(-1);

    		if ( !capture) {
      	   	   printf("Error: Video capture initialization failed.\n");
      	   	   break;
    		}
        	videoFrame = cvQueryFrame ( capture );
     		if ( !videoFrame) {
      	   	   printf("**Error reading from webcam\n");
      		   break;
    		}
    			
    		/* create new image for the grayscale version */
    		convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );

 		unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );
		 		
    		while ( key != 'q') { 
		    /* Time to test and benchmark DSP based sobel filter */
	            Time_reset(&sTime);
		    Time_delta(&sTime,&time); 
		    /* Process the first frame outside the loop*/			
		    DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
		    	 
		    videoFrame = cvQueryFrame( capture);
		    if ( !videoFrame) {
	               printf("***The End***\n");
	               break;
      	      	    }
		      
		    DSP_cvSyncDSP();
		    cvShowImage("video",convFrame); 		    
		    
		    Time_delta(&sTime,&time); 
		    printf("Total execution time1 = %dus\n",(unsigned int)time);
					      
	      	    key = cvWaitKey( 15 );
			      
       	       	}
			
		
	       	cvDestroyWindow("video");
    	       	cvReleaseImage(&videoFrame);
    	       	cvReleaseImage(&convFrame);
               	}	
       	       	break; 
	        
    	
    	case 's': /* Start of sobel test */
		switch (*argv[2]) {
		    
                    case 't':
		      switch(*argv[3]) {
			case 'd': { //'d' dor DSP accelerated

    			    cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    
    
    			    capture = cvCreateFileCapture(argv[2]);

    			    if ( !capture) {
      	   	   	       printf("Error: Video not found\n");
      	   	   	       break;
    			    }
        		    videoFrame = cvQueryFrame ( capture );
     			    if ( !videoFrame) {
      	   	   	       printf("**Error reading video\n");
      		   	    break;
    			    }
    			
    			    /* create new image for the grayscale version */
    			    convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );

    			    /* create sobel filtered image */
    			    convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 );
			
			    DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
			    DSP_cvSyncDSP();    		
    			    while ( key != 'q') { 
			          			 
	 		          /* Time to test and benchmark DSP based sobel filter */
			          Time_reset(&sTime);
			          Time_delta(&sTime,&time);
				  DSP_cvSobel(convFrame,convOpencvFrame,1,1,3);	

				  /* get next frame */
			          videoFrame = cvQueryFrame( capture); 
			          if ( !videoFrame) {
	         	             printf("***The End***\n");
	           	             break;
      	      	                  }
				  /* Sync with DSP*/		       	      	  
			          DSP_cvSyncDSP();
				  /* Start RGB To Y conversion of next frame */
			          DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
				  
				  /* Display Filtered Image */
				  cvShowImage("video", convOpencvFrame);
 				  /* Sync DSP */	      	       
			          DSP_cvSyncDSP();
				        	                  
			          Time_delta(&sTime,&time);
			          printf("Total execution time = %dus\n",(unsigned int)time);			          

	      	                  key = cvWaitKey( 15 );
       	       		    }
			
	       		cvDestroyWindow("video");
    	       		cvReleaseImage(&videoFrame);
    	       		cvReleaseImage(&convFrame);
               		cvReleaseImage(&convOpencvFrame); 
			}
       	       		break; /* End of sobel test */

			case 'a': { // 'a' for ARM side

    			    cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    
    
    			    capture = cvCreateFileCapture(argv[2]);

    			    if ( !capture) {
      	   	   	       printf("Error: Video not found\n");
      	   	   	       break;
    			    }
        		    videoFrame = cvQueryFrame ( capture );
     			    if ( !videoFrame) {
      	   	   	       printf("**Error reading video\n");
      		   	    break;
    			    }

    			
    			    /* create new image for the grayscale version */
    			    convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );

    			    /* create sobel filtered image */
    			    convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 );
			
			    cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);   		
    			    while ( key != 'q') { 
			          /* Time to test and benchmark DSP based sobel filter */
			          Time_reset(&sTime);
			          Time_delta(&sTime,&time);
				  cvSobel(convFrame,convOpencvFrame,1,1,3);	

				  /* get next frame */
			          videoFrame = cvQueryFrame( capture); 
			          if ( !videoFrame) {
	         	             printf("***The End***\n");
	           	             break;
      	      	                  }
				  
				  /* Start RGB To Y conversion of next frame */
			          cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
				  
				  /* Display Filtered Image */
				  cvShowImage("video", convOpencvFrame);
 				  				        	                  
			          Time_delta(&sTime,&time);
			          printf("Total execution time = %dus\n",(unsigned int)time);			          

	      	                  key = cvWaitKey( 15 );
       	       		    }
			
	       		cvDestroyWindow("video");
    	       		cvReleaseImage(&videoFrame);
    	       		cvReleaseImage(&convFrame);
               		cvReleaseImage(&convOpencvFrame); 
			}
       	       		break; /* End of sobel test */

			default: 
			    printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi d \n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi a\n"); 
		      }
		      break;
			
		    case 'w':
		      switch(*argv[3]) {
			case 'd': { //'d' dor DSP accelerated

    			    cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    
    
    			    capture = cvCaptureFromCAM(-1);

    			    if ( !capture) {
      	   	   	       printf("Error: Video capture initialization failed.\n");
      	   	   	       break;
    			    }
        		    videoFrame = cvQueryFrame ( capture );
     			    if ( !videoFrame) {
      	   	   	       printf("**Error reading from webcam\n");
      		   	       break;
    			    }
    			
    			    /* create new image for the grayscale version */
    			    convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );

    			    /* create sobel filtered image */
    			    convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 );
			    /* Create unsigned image */
 			    unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );
			
			    /* Process the first frame outside the loop*/			
			    DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
			    DSP_cvSyncDSP();    		
    			    while ( key != 'q') { 
			          			 
	 		          /* Time to test and benchmark DSP based sobel filter */
			          Time_reset(&sTime);
			          Time_delta(&sTime,&time);
				  DSP_cvSobel(convFrame,convOpencvFrame,1,1,3);	
				 			 
				  /* get next frame */
			          videoFrame = cvQueryFrame( capture); 
			          if ( !videoFrame) {
	         	             printf("***The End***\n");
	           	             break;
      	      	                  }
				 
				  /* Sync with DSP*/		       	      	  
			          DSP_cvSyncDSP();				  
				  	
				  /* Start RGB To Y conversion of next frame */
			          DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
				  
				  /* Convert signed image to unsign image for better clarity */
				  cvConvert(convOpencvFrame,unsignedFrame);
				  /* Display Filtered Image */
				  cvShowImage("video", unsignedFrame);

 				  /* Sync DSP */	      	       
			          DSP_cvSyncDSP();
				        	                  
			          Time_delta(&sTime,&time);
			          printf("Total execution time = %dus\n",(unsigned int)time);			          

	      	                  key = cvWaitKey( 15 );
       	       		    }
			
	       		cvDestroyWindow("video");
    	       		cvReleaseImage(&videoFrame);
    	       		cvReleaseImage(&convFrame);
               		cvReleaseImage(&convOpencvFrame); 
			}
       	       		break; /* End of sobel test */

			case 'a': { // 'a' for ARM side

    			    cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    
    
    			    capture = cvCaptureFromCAM(-1);

    			    if ( !capture) {
      	   	   	       printf("Error: Video capture initialization failed.\n");
      	   	   	       break;
    			    }
        		    videoFrame = cvQueryFrame ( capture );
     			    if ( !videoFrame) {
      	   	   	       printf("**Error reading from webcam\n");
      		   	       break;
    			    }

    			
    			    /* create new image for the grayscale version */
    			    convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );

    			    /* create sobel filtered image */
    			    convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 );
			    /* Create unsigned image */
 			    unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );
			
			    /* Process the first frame outside the loop*/
			    cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
   		
    			    while ( key != 'q') { 
			          /* Time to test and benchmark DSP based sobel filter */
			          Time_reset(&sTime);
			          Time_delta(&sTime,&time);
				  cvSobel(convFrame,convOpencvFrame,1,1,3);	
				 		  
				  /* get next frame */
			          videoFrame = cvQueryFrame( capture); 
			          if ( !videoFrame) {
	         	             printf("***The End***\n");
	           	             break;
      	      	                  }
				  
				  /* Start RGB To Y conversion of next frame */
			          cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
				  
				  /* Convert signed image to unsign image for better clarity */
				  cvConvert(convOpencvFrame,unsignedFrame);

				  /* Display Filtered Image */
				  cvShowImage("video", unsignedFrame);
 				  				        	                  
			          Time_delta(&sTime,&time);
			          printf("Total execution time = %dus\n",(unsigned int)time);			          

	      	                  key = cvWaitKey( 15 );
       	       		    }
			
	       		cvDestroyWindow("video");
    	       		cvReleaseImage(&videoFrame);
    	       		cvReleaseImage(&convFrame);
               		cvReleaseImage(&convOpencvFrame); 
			}
       	       		break; /* End of sobel test */

			default: 
			    printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s webcam d \n./remote_ti_platforms_evm3530_opencv.xv5T s webcam a\n"); 
		      }
		      break;
  		    
		    default:
		    printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi \n./remote_ti_platforms_evm3530_opencv.xv5T s webcam\n"); 
		    break;
		}
	        break;			
		

	case 'd':
	  switch(*argv[2]) {
	    case 'd': { //'d' dor DSP accelerated
		int row =63;
		int col =63; 
	        floatDataPtr = (float *)cvAlloc(sizeof(float)*row*col* 2);
		floatOutPtr = (float *)cvAlloc(sizeof(float)*row*col * 2);
		dataImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2);
		dataImage->imageData = (char *)floatDataPtr;
		integralImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2);
		integralImage->imageData = (char *)floatOutPtr;

		for (i=0; i< row * col * 2; i+=2) {
		    tempFloat += 1.0;
		    floatDataPtr[i] = tempFloat;
		    floatDataPtr[i+1] = 0.0;
	        } 
		
		Time_reset(&sTime);
		Time_delta(&sTime,&time);		
    		DSP_cvDFT(dataImage,integralImage,CV_DXT_FORWARD,0);
		Time_delta(&sTime,&time);
	  	DSP_cvSyncDSP();
				    		
		/* Print the output data for DFT */ 		
    		flPtr = (float *)integralImage->imageData;
    		printf(" The DFT output is:\n");
    		for (i=0;i<integralImage->height;i++){
		    key = 0;
            	    for (j=0;j<integralImage->width * 2;j+=2){
			key++;
                        printf("%f + i%f\t", flPtr[(i*integralImage->width * 2)+j], flPtr[(i*integralImage->width * 2) + j + 1]);
			if ((key % 5) == 0) printf("\n");
            	    }
            	    printf("\n");
    		}
		printf("DSP_cvDFT Total execution time = %dus\n",(unsigned int)time);    
 		cvFree(&floatDataPtr);
		cvFree(&floatOutPtr);
		}
		break;
            case 'a': {
		int row =63;
		int col =63; 
	        floatDataPtr = (float *)cvAlloc(sizeof(float)*row*col*2);
		floatOutPtr = (float *)cvAlloc(sizeof(float)*row*col*2);
		dataImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2);
		dataImage->imageData = (char *)floatDataPtr;
		integralImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2);
		integralImage->imageData = (char *)floatOutPtr;
               
		for (i=0; i< row * col * 2; i+=2) {
		    tempFloat += 1.0;
		    floatDataPtr[i] = tempFloat;
		    floatDataPtr[i+1] = 0.0;
	        } 
	
		Time_reset(&sTime);
		Time_delta(&sTime,&time);		
    		cvDFT(dataImage,integralImage,CV_DXT_FORWARD,0);
				
		Time_delta(&sTime,&time);
				  
    		
		/* Print the output data for DFT */ 		
    		flPtr = (float *)integralImage->imageData;
    		printf(" The DFT output is:\n");
    		for (i=0;i<integralImage->height;i++){
		    key = 0;
            	    for (j=0;j<integralImage->width * 2;j+=2){
			key++;
                        printf("%f + i%f\t", flPtr[(i*integralImage->width * 2)+j], flPtr[(i*integralImage->width * 2) + j + 1]);
			if ((key % 5) == 0) printf("\n");
            	    }
            	    printf("\n");
    		}
		printf("cvDFT Total execution time = %dus\n",(unsigned int)time);    
 		cvFree(&floatDataPtr);
		cvFree(&floatOutPtr);
		}
		break;
	    default:
	       printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T d d \n./remote_ti_platforms_evm3530_opencv.xv5T d a\n");
         } // end of latest switch(*argv[3])
         break;
	default:
	      return -1;
    }
       
    DSP_cvEndDSP();
    return 0;
Пример #20
0
void CLightSet::RunLightPrep(IplImage* src,IplImage* dest)
{
	int M,N;
	M=0;
	N=0;
	if (src->roi)
	{
		 M = src->roi->width;
		 N = src->roi->height;
	}
	else
	{
		 M = src->width;
		 N = src->height;
	}

	CvMat *matD; // create mat for meshgrid frequency matrices
	matD = cvCreateMat(M,N,CV_32FC1);

	CDM(M,N,matD);

	CvMat *matH;
	matH = cvCreateMat(M,N,CV_32FC1); // mat for lowpass filter

	float D0 = 10.0;
	float rH,rL,c;
	rH = 2.0;
	rL = 0.5;
	c  = 1.0;
	lpfilter(matD,matH,D0,rH,rL,c);

	IplImage *srcshift; // shift center
	srcshift = cvCloneImage(src);
	cvShiftDFT(srcshift,srcshift);

	IplImage *log, *temp;
	log = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);
	temp = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);

	cvCvtScale(srcshift,temp,1.0,0);
	cvLog(temp,log);
	cvCvtScale(log,log,-1.0,0);

	CvMat *Fourier;
	Fourier = cvCreateMat( M, N, CV_32FC2 );

	fft2(log,Fourier);
	IplImage* image_im;
	image_im = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);

	cvSplit(Fourier,dest,image_im,0,0);

	cvMul(dest,matH,dest); 
	cvMul(image_im,matH,image_im);

	IplImage *dst;
	dst  = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,2);

	cvMerge(dest,image_im,0,0,dst);
	cvDFT(dst,dst,CV_DXT_INV_SCALE); 

	cvExp(dst,dst);

	cvZero(dest);
	cvZero(image_im);

	cvSplit(dst,dest,image_im,0,0); 
	//使得图像按照原来的顺序显示
	cvShiftDFT(dest,dest);

	double max,min; // normalize
	cvMinMaxLoc(dest,&min,&max,NULL,NULL);

	cvReleaseImage(&image_im);
	cvReleaseImage(&srcshift);
 	cvReleaseImage(&dst);	
	cvReleaseImage(&log);
	cvReleaseImage(&temp);
	cvReleaseMat(&matD);
	cvReleaseMat(&matH);
}
void
icvCrossCorr( const CvArr* _img, const CvArr* _templ, CvArr* _corr,
              CvPoint anchor, double delta, int borderType )
{
    // disable OpenMP in the case of Visual Studio,
    // otherwise the performance drops significantly
#undef USE_OPENMP
#if !defined _MSC_VER || defined CV_ICC
    #define USE_OPENMP 1
#endif

    const double block_scale = 4.5;
    const int min_block_size = 256;
    cv::Ptr<CvMat> dft_img[CV_MAX_THREADS];
    cv::Ptr<CvMat> dft_templ;
    std::vector<uchar> buf[CV_MAX_THREADS];
    int k, num_threads = 0;
    
    CvMat istub, *img = (CvMat*)_img;
    CvMat tstub, *templ = (CvMat*)_templ;
    CvMat cstub, *corr = (CvMat*)_corr;
    CvSize dftsize, blocksize;
    int depth, templ_depth, corr_depth, max_depth = CV_32F,
        cn, templ_cn, corr_cn, buf_size = 0,
        tile_count_x, tile_count_y, tile_count;

    img = cvGetMat( img, &istub );
    templ = cvGetMat( templ, &tstub );
    corr = cvGetMat( corr, &cstub );

    if( CV_MAT_DEPTH( img->type ) != CV_8U &&
        CV_MAT_DEPTH( img->type ) != CV_16U &&
        CV_MAT_DEPTH( img->type ) != CV_32F &&
        CV_MAT_DEPTH( img->type ) != CV_64F )
        CV_Error( CV_StsUnsupportedFormat,
        "The function supports only 8u, 16u and 32f data types" );

    if( !CV_ARE_DEPTHS_EQ( img, templ ) && CV_MAT_DEPTH( templ->type ) != CV_32F )
        CV_Error( CV_StsUnsupportedFormat,
        "Template (kernel) must be of the same depth as the input image, or be 32f" );
    
    if( !CV_ARE_DEPTHS_EQ( img, corr ) && CV_MAT_DEPTH( corr->type ) != CV_32F &&
        CV_MAT_DEPTH( corr->type ) != CV_64F )
        CV_Error( CV_StsUnsupportedFormat,
        "The output image must have the same depth as the input image, or be 32f/64f" );

    if( (!CV_ARE_CNS_EQ( img, corr ) || CV_MAT_CN(templ->type) > 1) &&
        (CV_MAT_CN( corr->type ) > 1 || !CV_ARE_CNS_EQ( img, templ)) )
        CV_Error( CV_StsUnsupportedFormat,
        "The output must have the same number of channels as the input (when the template has 1 channel), "
        "or the output must have 1 channel when the input and the template have the same number of channels" );

    depth = CV_MAT_DEPTH(img->type);
    cn = CV_MAT_CN(img->type);
    templ_depth = CV_MAT_DEPTH(templ->type);
    templ_cn = CV_MAT_CN(templ->type);
    corr_depth = CV_MAT_DEPTH(corr->type);
    corr_cn = CV_MAT_CN(corr->type);

    CV_Assert( corr_cn == 1 || delta == 0 );

    max_depth = MAX( max_depth, templ_depth );
    max_depth = MAX( max_depth, depth );
    max_depth = MAX( max_depth, corr_depth );
    if( depth > CV_8U )
        max_depth = CV_64F;

    /*if( img->cols < templ->cols || img->rows < templ->rows )
        CV_Error( CV_StsUnmatchedSizes,
        "Such a combination of image and template/filter size is not supported" );*/

    if( corr->rows > img->rows + templ->rows - 1 ||
        corr->cols > img->cols + templ->cols - 1 )
        CV_Error( CV_StsUnmatchedSizes,
        "output image should not be greater than (W + w - 1)x(H + h - 1)" );

    blocksize.width = cvRound(templ->cols*block_scale);
    blocksize.width = MAX( blocksize.width, min_block_size - templ->cols + 1 );
    blocksize.width = MIN( blocksize.width, corr->cols );
    blocksize.height = cvRound(templ->rows*block_scale);
    blocksize.height = MAX( blocksize.height, min_block_size - templ->rows + 1 );
    blocksize.height = MIN( blocksize.height, corr->rows );

    dftsize.width = cvGetOptimalDFTSize(blocksize.width + templ->cols - 1);
    if( dftsize.width == 1 )
        dftsize.width = 2;
    dftsize.height = cvGetOptimalDFTSize(blocksize.height + templ->rows - 1);
    if( dftsize.width <= 0 || dftsize.height <= 0 )
        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );

    // recompute block size
    blocksize.width = dftsize.width - templ->cols + 1;
    blocksize.width = MIN( blocksize.width, corr->cols );
    blocksize.height = dftsize.height - templ->rows + 1;
    blocksize.height = MIN( blocksize.height, corr->rows );

    dft_templ = cvCreateMat( dftsize.height*templ_cn, dftsize.width, max_depth );

#ifdef USE_OPENMP
    num_threads = cvGetNumThreads();
#else
    num_threads = 1;
#endif

    for( k = 0; k < num_threads; k++ )
        dft_img[k] = cvCreateMat( dftsize.height, dftsize.width, max_depth );

    if( templ_cn > 1 && templ_depth != max_depth )
        buf_size = templ->cols*templ->rows*CV_ELEM_SIZE(templ_depth);

    if( cn > 1 && depth != max_depth )
        buf_size = MAX( buf_size, (blocksize.width + templ->cols - 1)*
            (blocksize.height + templ->rows - 1)*CV_ELEM_SIZE(depth));

    if( (corr_cn > 1 || cn > 1) && corr_depth != max_depth )
        buf_size = MAX( buf_size, blocksize.width*blocksize.height*CV_ELEM_SIZE(corr_depth));

    if( buf_size > 0 )
    {
        for( k = 0; k < num_threads; k++ )
            buf[k].resize(buf_size);
    }

    // compute DFT of each template plane
    for( k = 0; k < templ_cn; k++ )
    {
        CvMat dstub, *src, *dst, temp;
        CvMat* planes[] = { 0, 0, 0, 0 };
        int yofs = k*dftsize.height;

        src = templ;
        dst = cvGetSubRect( dft_templ, &dstub, cvRect(0,yofs,templ->cols,templ->rows));
    
        if( templ_cn > 1 )
        {
            planes[k] = templ_depth == max_depth ? dst :
                cvInitMatHeader( &temp, templ->rows, templ->cols, templ_depth, &buf[0][0] );
            cvSplit( templ, planes[0], planes[1], planes[2], planes[3] );
            src = planes[k];
            planes[k] = 0;
        }

        if( dst != src )
            cvConvert( src, dst );

        if( dft_templ->cols > templ->cols )
        {
            cvGetSubRect( dft_templ, dst, cvRect(templ->cols, yofs,
                          dft_templ->cols - templ->cols, templ->rows) );
            cvZero( dst );
        }
        cvGetSubRect( dft_templ, dst, cvRect(0,yofs,dftsize.width,dftsize.height) );
        cvDFT( dst, dst, CV_DXT_FORWARD + CV_DXT_SCALE, templ->rows );
    }

    tile_count_x = (corr->cols + blocksize.width - 1)/blocksize.width;
    tile_count_y = (corr->rows + blocksize.height - 1)/blocksize.height;
    tile_count = tile_count_x*tile_count_y;

#if defined _OPENMP && defined USE_OPENMP
    #pragma omp parallel for num_threads(num_threads) schedule(dynamic)
#endif
    // calculate correlation by blocks
    for( k = 0; k < tile_count; k++ )
    {
#ifdef USE_OPENMP
        int thread_idx = cvGetThreadNum();
#else
        int thread_idx = 0;
#endif
        int x = (k%tile_count_x)*blocksize.width;
        int y = (k/tile_count_x)*blocksize.height;
        int i, yofs;
        CvMat sstub, dstub, *src, *dst, temp;
        CvMat* planes[] = { 0, 0, 0, 0 };
        CvMat* _dft_img = dft_img[thread_idx];
        uchar* _buf = buf_size > 0 ? &buf[thread_idx][0] : 0;
        CvSize csz = { blocksize.width, blocksize.height }, isz;
        int x0 = x - anchor.x, y0 = y - anchor.y;
        int x1 = MAX( 0, x0 ), y1 = MAX( 0, y0 ), x2, y2;
        csz.width = MIN( csz.width, corr->cols - x );
        csz.height = MIN( csz.height, corr->rows - y );
        isz.width = csz.width + templ->cols - 1;
        isz.height = csz.height + templ->rows - 1;
        x2 = MIN( img->cols, x0 + isz.width );
        y2 = MIN( img->rows, y0 + isz.height );
        
        for( i = 0; i < cn; i++ )
        {
            CvMat dstub1, *dst1;
            yofs = i*dftsize.height;

            src = cvGetSubRect( img, &sstub, cvRect(x1,y1,x2-x1,y2-y1) );
            dst = cvGetSubRect( _dft_img, &dstub,
                cvRect(0,0,isz.width,isz.height) );
            dst1 = dst;
            
            if( x2 - x1 < isz.width || y2 - y1 < isz.height )
                dst1 = cvGetSubRect( _dft_img, &dstub1,
                    cvRect( x1 - x0, y1 - y0, x2 - x1, y2 - y1 ));

            if( cn > 1 )
            {
                planes[i] = dst1;
                if( depth != max_depth )
                    planes[i] = cvInitMatHeader( &temp, y2 - y1, x2 - x1, depth, _buf );
                cvSplit( src, planes[0], planes[1], planes[2], planes[3] );
                src = planes[i];
                planes[i] = 0;
            }

            if( dst1 != src )
                cvConvert( src, dst1 );

            if( dst != dst1 )
                cvCopyMakeBorder( dst1, dst, cvPoint(x1 - x0, y1 - y0), borderType );

            if( dftsize.width > isz.width )
            {
                cvGetSubRect( _dft_img, dst, cvRect(isz.width, 0,
                      dftsize.width - isz.width,dftsize.height) );
                cvZero( dst );
            }

            cvDFT( _dft_img, _dft_img, CV_DXT_FORWARD, isz.height );
            cvGetSubRect( dft_templ, dst,
                cvRect(0,(templ_cn>1?yofs:0),dftsize.width,dftsize.height) );

            cvMulSpectrums( _dft_img, dst, _dft_img, CV_DXT_MUL_CONJ );
            cvDFT( _dft_img, _dft_img, CV_DXT_INVERSE, csz.height );

            src = cvGetSubRect( _dft_img, &sstub, cvRect(0,0,csz.width,csz.height) );
            dst = cvGetSubRect( corr, &dstub, cvRect(x,y,csz.width,csz.height) );

            if( corr_cn > 1 )
            {
                planes[i] = src;
                if( corr_depth != max_depth )
                {
                    planes[i] = cvInitMatHeader( &temp, csz.height, csz.width,
                                                 corr_depth, _buf );
                    cvConvertScale( src, planes[i], 1, delta );
                }
                cvMerge( planes[0], planes[1], planes[2], planes[3], dst );
                planes[i] = 0;                    
            }
            else
            {
                if( i == 0 )
                    cvConvertScale( src, dst, 1, delta );
                else
                {
                    if( max_depth > corr_depth )
                    {
                        cvInitMatHeader( &temp, csz.height, csz.width,
                                         corr_depth, _buf );
                        cvConvert( src, &temp );
                        src = &temp;
                    }
                    cvAcc( src, dst );
                }
            }
        }
    }
}
Пример #22
0
void tips(IplImage *img)
{
	FILE *tipgraph = fopen("tipgraph.dat","w");
	//FILE *freq = fopen("freq.dat", "w");

	int i, j, curr_pixel, step, x[640];
	tipcount=0;
	uchar *data = (uchar*)img->imageData;
	step = img->widthStep;
	CvMat *dst = cvCreateMat(1, 640, CV_32FC1 );
    CvMat *src = cvCreateMat(1, 640, CV_32FC1 );
	
	for(i=0; i<width; i++)
	{
		x[i] = 0;
		for(j=0; j<height; j++)
		{
			curr_pixel = data[j*step + i];
			if((curr_pixel == 255) )
			{
				//data[x[i]*step + i] = 0;
				x[i] = j;
			}
			//else data[j*step + i] = 0;
		}
		cvSetReal1D(src, i, x[i]);
		fprintf(tipgraph, "%d\n", x[i]);
			//printf("hi");
	}
	
    cvDFT(src, dst, CV_DXT_SCALE, 0);
    for(i=0; i<width; i++)
    {
		//fprintf(freq, "%f\n", abs(cvGetReal1D(dst, i)));
		if(i>20) cvSetReal1D(dst, i, 0);
	}
    cvDFT(dst, src, CV_DXT_INVERSE, 0);
    for(i=1; i<width-1; i++)
    {
		if((cvGetReal1D(src, i+1) - cvGetReal1D(src, i) <= 0 && cvGetReal1D(src, i) - cvGetReal1D(src, i-1) >= 0) 
		//|| (cvGetReal1D(src, i+1) - cvGetReal1D(src, i) <= 0 && cvGetReal1D(src, i+1) - cvGetReal1D(src, i) >=-5 && cvGetReal1D(src, i) - cvGetReal1D(src, i-1) <= 0 && cvGetReal1D(src, i) - cvGetReal1D(src, i-1) >= -5)
		)
		{
			tips_position[tipcount][0] = 0;
			for(j=-3; j<=3; j++)
			{
				if((i+j)<640 && (i+j)>0)
				{
					tips_position[tipcount][0] +=  x[i+j];
				}
			}
			tips_position[tipcount][0] = (tips_position[tipcount][0])/7;
				
			if(tips_position[tipcount][0] > 40)
			{
				if((tipcount > 1) && (i-tips_position[tipcount-1][1]) < 55)
				{
					tips_position[tipcount-1][0] = (tips_position[tipcount-1][0] + tips_position[tipcount][0]) / 2;
					tips_position[tipcount-1][1] = (tips_position[tipcount-1][1] + i) / 2;
				}
				else
				{
					tips_position[tipcount++][1] = i;
				}
			}
		}
		
		fprintf(tipgraph, "%f\n", cvGetReal1D(src, i));
	}
    //printf("%d\t",tipcount);
	fclose(tipgraph);
	correlated_tips();
	//fclose(freq);		
}
Пример #23
0
void defense::ImgToContours(IplImage* TheInput,int PlaneNumber, int CMode){
    
    
    CvMemStorage* G_storage=NULL;
	G_storage=cvCreateMemStorage(0);   
    CvSeq* contour = 0;
    IplImage * Maska;
    Maska = cvCreateImage( cvGetSize(TheInput),IPL_DEPTH_8U,1); 
    int TotalEllip=0;
    CvMat* TempMat;
    TempMat = cvCreateMat(FFTSize, 1, CV_32FC2);
    vector <ofVec2f> TempVec;
    ArrayOfContours.clear();
    ContourGray.clear();
    for (int k=0;k<PlaneNumber;k++){
        if (CMode==0) {
            cvInRangeS(TheInput,cvScalarAll((k-1)*255/(float)PlaneNumber),cvScalarAll(k*255/(float)PlaneNumber),Maska);
        }
        else{  
        cvThreshold(TheInput, Maska, k*255/(float)PlaneNumber, 255, CV_THRESH_BINARY_INV);
        }
        cvSmooth(Maska,Maska,CV_MEDIAN,3);  
        int NC=cvFindContours( Maska, G_storage, &contour, sizeof(CvContour), 
                              CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
  //                            CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_L1 );
        for( ; contour != 0; contour = contour->h_next )
        {
            
            if ((contour->total > 10 )&&(TotalEllip<MaxEllip)){
                
                CvMat* CountArray;
                CvBox2D Ellipdesc;
                CvPoint2D32f * OtroArray;
                OtroArray = new CvPoint2D32f[contour->total];
                for(int q=0;q<contour->total;q++){
                    CvPoint* p = (CvPoint*)cvGetSeqElem( contour, q );
                    OtroArray[q].x = (float)p->x;
                    OtroArray[q].y=(float)p->y;
                }
                CountArray=cvCreateMatHeader(contour->total,1,CV_32FC2);
                cvInitMatHeader(CountArray,contour->total,1,CV_32FC2,OtroArray);
                cvResize(CountArray, TempMat);	
                cvDFT(TempMat, TempMat, CV_DXT_FORWARD);
                int TheLimit = (int)((FFTSize/2)*Slider1/127.0) -1;
                
                for (int q =0; q < TheLimit; q++) { 
                    ((float*)(TempMat->data.ptr + TempMat->step*((FFTSize/2)-q)))[0] = 0.0;
                    ((float*)(TempMat->data.ptr + TempMat->step*((FFTSize/2)-q)))[1] = 0.0;
                    ((float*)(TempMat->data.ptr + TempMat->step*((FFTSize/2)+q)))[0] = 0.0;
                    ((float*)(TempMat->data.ptr + TempMat->step*((FFTSize/2)+q)))[1] = 0.0;                 
                 }
                 cvDFT(TempMat, TempMat, CV_DXT_INV_SCALE);
                TempVec.clear();
                for (int q=0; q < FFTSize;q++){
                    TempVec.push_back(ofVec2f (((const float*)(TempMat->data.ptr + TempMat->step*q))[0],
                    ((const float*)(TempMat->data.ptr + TempMat->step*q))[1]));
                }
                ArrayOfContours.push_back(TempVec);
                ContourGray.push_back(k*255/(float)PlaneNumber);
                
                TotalEllip++;
                delete [] OtroArray;
                cvReleaseMat(&CountArray);  
            } // end of if contour-> total
            
            
        } // end of for contours
        
        
    } // end for planenumber
    
    cvReleaseMat(&TempMat);
    cvReleaseImage(&Maska);
    
    // releasing mem storages
    if (contour!=NULL){cvClearSeq(contour);}
    //cvClearMemStorage(storage);
    if (G_storage!=NULL){cvReleaseMemStorage(&G_storage);}
  
}
Пример #24
0
ReturnType FrequencyFilter::onExecute()
{
    // 영상을 Inport로부터 취득
    opros_any *pData = ImageIn.pop();
    RawImage result;

    if(pData != NULL) {

        // 포트로 부터 이미지 취득
        RawImage Image = ImageIn.getContent(*pData);
        RawImageData *RawImage = Image.getImage();

        // 현재영상의 크기를 취득
        m_in_width = RawImage->getWidth();
        m_in_height = RawImage->getHeight();

        // 받은 영상의 2의 승수임을 확인
        if(!Check2Square(m_in_width) || !Check2Square(m_in_height)) {

            std::cout << "This image is not a multifplier of 2" << std::endl;

            return OPROS_BAD_INPUT_PARAMETER;

        }

        // 받은 영상의 가로 세로 사이즈가 같은지 확인
        if(m_in_width != m_in_height) {

            std::cout << "Size(width and height) of Image is not equal" << std::endl;

            return OPROS_BAD_INPUT_PARAMETER;

        }

        // 원본영상의 이미지영역 확보
        if(m_orig_img == NULL) {
            m_orig_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
        }
        // 바이너리 영상영역의 확보
        if(m_gray_img == NULL) {
            m_gray_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
        }
        // 수행결과 영상영역의 확보
        if(m_result_img == NULL) {
            m_result_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
        }
        // 출력결과 영상영역의 확보
        if(m_final_img == NULL) {
            m_final_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
        }
        // Re영역 영상영역의 확보(실수)
        if(m_image_Re == NULL) {
            m_image_Re = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 1);
        }
        // Im영역 영상영역의 확보(허수)
        if(m_image_Im == NULL) {
            m_image_Im = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 1);
        }
        // 주파수 변환 영상영역의 확보.
        if(m_pDFT_A == NULL) {
            m_pDFT_A = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_32F, 2);
        }

        // 영상에 대한 정보를 확보!memcpy
        memcpy(m_orig_img->imageData, RawImage->getData(), RawImage->getSize());

        // 둘다 none 이 아니거나, 둘중에 하나가 none 일경우
        if((m_low_Pass_Filtering != "none" || m_high_Pass_Filtering != "none") &&
                (m_low_Pass_Filtering == "none" || m_high_Pass_Filtering == "none")) {

            // 입력 받은 영상을 이진화 시킴
            cvCvtColor( m_orig_img, m_gray_img, CV_BGR2GRAY );

            // 주파수영역으로의 작업을 위한 깊이 정보 변경
            cvConvertScale(m_gray_img, m_image_Re);		// 8U -> 32F
            // m_image_Im의 초기화
            cvZero(m_image_Im);

            // shift center
            // 입력영상을 실수부로 변환한 이미지가 홀수인 화소의 부호를 변경하여
            // 푸리에변환에 의한 주파수 영역의 원점을 중심으로 이동시키기 위함
            ChangePosition(m_image_Re);

            cvMerge(m_image_Re, m_image_Im, NULL, NULL, m_pDFT_A);

            // m_pDFT_A에 대해 푸리에 변환을 수행
            cvDFT(m_pDFT_A, m_pDFT_A, CV_DXT_FORWARD);

            // 이상적 저주파 통과 필터링 실행
            if(m_low_Pass_Filtering == "ideal" && m_high_Pass_Filtering == "none") {

                IdealLowPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 버터워스 저주파 통과 필터링 실행
            else if(m_low_Pass_Filtering == "butterworth" && m_high_Pass_Filtering == "none") {

                ButterworthLowPassFiltering(m_pDFT_A, m_cutoff_Frequency, 2);

            }
            // 가우시안 저주파 통과 필터링 실행
            else if(m_low_Pass_Filtering == "gaussian" && m_high_Pass_Filtering == "none") {

                GaussianLowPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 이상적 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "ideal" && m_low_Pass_Filtering == "none") {

                IdealHighPassFiltering(m_pDFT_A, m_cutoff_Frequency);

            }
            // 버터워스 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "butterworth" && m_low_Pass_Filtering == "none") {

                ButterworthHighPassFiltering(m_pDFT_A, m_cutoff_Frequency, 2);

            }
            // 가우시안 고주파 통과 필터링 실행
            else if(m_high_Pass_Filtering == "gaussian" && m_low_Pass_Filtering == "none") {

                GaussianHighpassFiltering(m_pDFT_A, m_cutoff_Frequency);


            }
            else {
                //none
            }

            // 퓨리에 역변환 실행
            cvDFT(m_pDFT_A, m_pDFT_A, CV_DXT_INV_SCALE);

            // 다중 채널의 행렬을 단일 채널 행렬로 분할(Re, Im으로)
            cvSplit(m_pDFT_A, m_image_Re, m_image_Im, NULL, NULL);

            // 저주파일때만 실행
            if((m_low_Pass_Filtering == "ideal" || m_low_Pass_Filtering == "butterworth" || m_low_Pass_Filtering == "gaussian")
                    && m_high_Pass_Filtering == "none") {
                ChangePosition(m_image_Re);
                cvScale(m_image_Re, m_result_img, 1);
            }

            // 고주파일때만 실행
            if((m_high_Pass_Filtering == "ideal" || m_high_Pass_Filtering == "butterworth" || m_high_Pass_Filtering == "gaussian")
                    && m_low_Pass_Filtering == "none") {

                // 스펙트럼의 진폭을 계산 Mag=sqrt(Re^2 + Im^2)
                cvPow(m_image_Re, m_image_Re, 2.0);
                cvPow(m_image_Im, m_image_Im, 2.0);
                cvAdd(m_image_Re, m_image_Re, m_image_Re);
                cvPow(m_image_Re, m_image_Re, 0.5);

                // 진폭 화상의 픽셀치가 min과 max사이에 분포하로독 스케일링
                double min_val, max_val;
                cvMinMaxLoc(m_image_Re, &min_val, &max_val, NULL, NULL);
                cvScale(m_image_Re, m_result_img, 255.0/max_val);
            }

            // 1채널 영상의 3채널 영상으로의 변환
            cvMerge(m_result_img, m_result_img, m_result_img, NULL, m_final_img);

            // 아웃풋 push
            // RawImage의 이미지 포인터 변수 할당
            RawImageData *pimage = result.getImage();

            // 입력된 이미지 사이즈 및 채널수로 로 재 설정
            pimage->resize(m_final_img->width, m_final_img->height, m_final_img->nChannels);

            // 영상의 총 크기(pixels수) 취득
            int size = m_final_img->width * m_final_img->height * m_final_img->nChannels;

            // 영상 데이터로부터 영상값만을 할당하기 위한 변수
            unsigned char *ptrdata = pimage->getData();

            // 현재 프레임 영상을 사이즈 만큼 memcpy
            memcpy(ptrdata, m_final_img->imageData, size);

            // 포트아웃
            opros_any mdata = result;
            ImageOut.push(result);//전달

            delete pData;

        } else {

            // 아웃풋 push
            // 아웃풋 push
            // RawImage의 이미지 포인터 변수 할당
            RawImageData *pimage = result.getImage();

            // 입력된 이미지 사이즈 및 채널수로 로 재 설정
            pimage->resize(m_orig_img->width, m_orig_img->height, m_orig_img->nChannels);

            // 영상의 총 크기(pixels수) 취득
            int size = m_orig_img->width * m_orig_img->height * m_orig_img->nChannels;

            // 영상 데이터로부터 영상값만을 할당하기 위한 변수
            unsigned char *ptrdata = pimage->getData();

            // 현재 프레임 영상을 사이즈 만큼 memcpy
            memcpy(ptrdata, m_orig_img->imageData, size);

            // 포트아웃
            opros_any mdata = result;
            ImageOut.push(result);//전달

            delete pData;

        }

    }

    return OPROS_SUCCESS;
}
Пример #25
0
static CvPoint alignImages( const CvMat* img, const CvMat* pano )
{
  CvSize img_size  = cvGetSize(img);
  CvSize pano_size = cvGetSize(pano);

  // to handle wraparound correctly, I set the output size to be at least as
  // large as double the smallest dimensions
#define max(a,b) ( (a) > (b) ? (a) : (b) )
#define min(a,b) ( (a) < (b) ? (a) : (b) )
  CvSize dft_size =
    cvSize( max( 2*min(img_size.width,  pano_size.width),  max(img_size.width,  pano_size.width)  ),
            max( 2*min(img_size.height, pano_size.height), max(img_size.height, pano_size.height) ) );
#undef min
#undef max

#define DoDft(mat)                                                      \
  CvMat* mat ## _float = cvCreateMat( dft_size.height, dft_size.width, CV_32FC1); \
  assert( mat ## _float );                                              \
                                                                        \
  CvMat* mat ## _dft = cvCreateMat( dft_size.height, dft_size.width, CV_32FC1); \
  assert( mat ## _dft );                                                \
                                                                        \
  cvSet( mat ## _float,                                                 \
         cvAvg(mat, NULL),                                              \
         NULL );                                                        \
                                                                        \
  CvMat mat ## _float_origsize;                                         \
  cvGetSubRect( mat ## _float, &mat ## _float_origsize,                 \
                cvRect(0,0, mat ## _size.width, mat ## _size.height ) ); \
  cvConvert( mat,  &mat ## _float_origsize );                           \
  cvDFT(mat ## _float,  mat ## _dft,  CV_DXT_FORWARD, 0)



  DoDft(img);
  DoDft(pano);

  CvMat* correlation            = cvCreateMat( dft_size.height, dft_size.width, CV_32FC1);
  CvMat* correlation_freqdomain = cvCreateMat( dft_size.height, dft_size.width, CV_32FC1);
  assert(correlation_freqdomain);

  cvMulSpectrums(pano_dft, img_dft, correlation_freqdomain, CV_DXT_MUL_CONJ);
  cvDFT(correlation_freqdomain, correlation, CV_DXT_INVERSE, 0);


  double minval, maxval;
  CvPoint minpoint, maxpoint;
  cvMinMaxLoc( correlation, &minval, &maxval, &minpoint, &maxpoint, NULL );


  printf("right answer: (644,86)\n");


  cvSaveImage( "iedges.png", img_float ,0 );
  cvSaveImage( "pedges.png", pano_float,0 );

  CvMat* correlation_img = cvCreateMat( dft_size.height, dft_size.width, CV_8UC1);
  cvConvertScale( correlation, correlation_img,
                  255.0/(maxval - minval),
                  -minval*255.0/(maxval - minval) );
  cvSaveImage( "corr.png", correlation_img ,0 );
  cvReleaseMat( &correlation_img );



  cvReleaseMat( &correlation );
  cvReleaseMat( &correlation_freqdomain );
  cvReleaseMat( &img_float );
  cvReleaseMat( &pano_float );
  cvReleaseMat( &img_dft );
  cvReleaseMat( &pano_dft );

  return maxpoint;
}
Пример #26
0
IplImage* 
ComputeSaliency(IplImage* image, int thresh, int scale) {
  //given a one channel image
  unsigned int size = floor(pow(2,scale)); //the size to do teh  saliency @

  IplImage* bw_im = cvCreateImage(cvSize(size,size), 
				  IPL_DEPTH_8U,1);
  cvResize(image, bw_im);
  IplImage* realInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 1);
  
  IplImage* imaginaryInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 1);
  IplImage* complexInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 2);

  cvScale(bw_im, realInput, 1.0/255.0);
  cvZero(imaginaryInput);
  cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);
  CvMat* dft_A = cvCreateMat( size, size, CV_32FC2 );

  // copy A to dft_A and pad dft_A with zeros
  CvMat tmp;
  cvGetSubRect( dft_A, &tmp, cvRect(0,0, size,size));
  cvCopy( complexInput, &tmp );
  //  cvZero(&tmp);

  cvDFT( dft_A, dft_A, CV_DXT_FORWARD, size );
  cvSplit( dft_A, realInput, imaginaryInput, NULL, NULL );
  // Compute the phase angle 
  IplImage* image_Mag = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
  IplImage* image_Phase = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
    

  //compute the phase of the spectrum
  cvCartToPolar(realInput, imaginaryInput, image_Mag, image_Phase, 0);
  
  IplImage* log_mag = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
  cvLog(image_Mag, log_mag);
  //Box filter the magnitude, then take the difference

  IplImage* log_mag_Filt = cvCreateImage(cvSize(size, size), 
					   IPL_DEPTH_32F, 1);
  CvMat* filt = cvCreateMat(3,3, CV_32FC1);
  cvSet(filt,cvScalarAll(1.0/9.0));
  cvFilter2D(log_mag, log_mag_Filt, filt);
  cvReleaseMat(&filt);

  cvSub(log_mag, log_mag_Filt, log_mag);
  
  cvExp(log_mag, image_Mag);
   
  cvPolarToCart(image_Mag, image_Phase, realInput, imaginaryInput,0);
  cvExp(log_mag, image_Mag);

  cvMerge(realInput, imaginaryInput, NULL, NULL, dft_A);
  cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, size);

  cvAbs(dft_A, dft_A);
  cvMul(dft_A,dft_A, dft_A);
  cvGetSubRect( dft_A, &tmp,  cvRect(0,0, size,size));
  cvCopy( &tmp, complexInput);
  cvSplit(complexInput, realInput, imaginaryInput, NULL,NULL);

  IplImage* result_image = cvCreateImage(cvGetSize(image),IPL_DEPTH_32F, 1);
  double minv, maxv;
  CvPoint minl, maxl;
  cvSmooth(realInput,realInput);
  cvSmooth(realInput,realInput);
  cvMinMaxLoc(realInput,&minv,&maxv,&minl,&maxl);
  printf("Max value %lf, min %lf\n", maxv,minv);
  cvScale(realInput, realInput, 1.0/(maxv-minv), 1.0*(-minv)/(maxv-minv));
  cvResize(realInput, result_image);
  double threshold = thresh/100.0*cvAvg(realInput).val[0];
  cvThreshold(result_image, result_image, threshold, 1.0, CV_THRESH_BINARY);
  IplImage* final_result = cvCreateImage(cvGetSize(image),IPL_DEPTH_8U, 1);
  cvScale(result_image, final_result, 255.0, 0.0);
  cvReleaseImage(&result_image);
  //cvReleaseImage(&realInput);
  cvReleaseImage(&imaginaryInput);
  cvReleaseImage(&complexInput);
  cvReleaseMat(&dft_A);
  cvReleaseImage(&bw_im);

  cvReleaseImage(&image_Mag);
  cvReleaseImage(&image_Phase);

  cvReleaseImage(&log_mag);
  cvReleaseImage(&log_mag_Filt);
  cvReleaseImage(&bw_im);
  return final_result;
  //return bw_im;
}
Пример #27
0
static IplImage*
get_convolution (const IplImage *image,
                 const IplImage *filter)
{
  CvSize dft_size;
  IplImage *reversed_image, *reversed_filter;
  IplImage *dft_image, *dft_filter, *dft_res;
  IplImage *res;

  dft_size.height = cvGetOptimalDFTSize(image->height + filter->height - 1);
  dft_size.width = cvGetOptimalDFTSize(image->width + filter->width - 1);

  res = cvCreateImage(cvSize(image->width,
                             image->height),
                      IPL_DEPTH_32F,
                      N_CHANNELS_GRAY);
  reversed_image = cvCreateImage(cvGetSize(image),
                                 IPL_DEPTH_8U,
                                 N_CHANNELS_GRAY);
  reversed_filter = cvCreateImage(cvGetSize(filter),
                                  IPL_DEPTH_8U,
                                  N_CHANNELS_GRAY);

  cvNot(image, reversed_image);
  cvNot(filter, reversed_filter);

  dft_image = cvCreateImage(dft_size,
                            IPL_DEPTH_32F,
                            N_CHANNELS_GRAY);
  cvSet(dft_image, cvScalar(0, 0, 0, 0), NULL);
  dft_filter = cvCreateImage(dft_size,
                             IPL_DEPTH_32F,
                             N_CHANNELS_GRAY);
  cvSet(dft_filter, cvScalar(0, 0, 0, 0), NULL);

  cvSetImageROI(dft_image, cvRect(0, 0,
                                  reversed_image->width,
                                  reversed_image->height));
  cvSetImageROI(dft_filter, cvRect(0, 0,
                                   reversed_filter->width,
                                   reversed_filter->height));
  double scaling_factor = 1.0/255;
  cvConvertScale(reversed_image, dft_image, scaling_factor, 0);
  cvConvertScale(reversed_filter, dft_filter, scaling_factor, 0);
  cvResetImageROI(dft_image);
  cvResetImageROI(dft_filter);


  cvDFT(dft_image, dft_image, CV_DXT_FORWARD, image->height);
  cvDFT(dft_filter, dft_filter, CV_DXT_FORWARD, filter->height);

  dft_res = cvCreateImage(dft_size,
                          IPL_DEPTH_32F,
                          N_CHANNELS_GRAY);

  cvMulSpectrums(dft_image, dft_filter, dft_res, 0);

  cvDFT(dft_res, dft_res, CV_DXT_INVERSE, res->height);
  cvSetImageROI(dft_res, cvRect(0, 0, res->width, res->height));
  cvCopy(dft_res, res, NULL);
  cvResetImageROI(dft_res);

  cvReleaseImage(&reversed_filter);
  cvReleaseImage(&reversed_image);
  cvReleaseImage(&dft_image);
  cvReleaseImage(&dft_filter);
  cvReleaseImage(&dft_res);

  return res;
}
Пример #28
0
IplImage *wiener_filter(IplImage* input_file, double snr, IplImage *filter_file){
		int height = input_file->height;
	int width = input_file->width;
	int h, w;

	// convert to CvMat
	CvMat *input_real = cvCreateMat( height, width, CV_64FC1 );
	CvMat *input_imag = cvCreateMat( height, width, CV_64FC1 );

	CvMat *output_real = cvCreateMat( height, width, CV_64FC1 );
	CvMat *output_imag = cvCreateMat( height, width, CV_64FC1 );

	CvMat *filter_real = cvCreateMat( height, width, CV_64FC1 );
	CvMat *filter_imag = cvCreateMat( height, width, CV_64FC1 );

	// cvDFTを使うためのデータ変換
	// cvDFTで扱うデータ形式は,double型cvMat,2channel
	CvMat *fft_mat = cvCreateMat( height, width, CV_64FC2 );

	// input file
	cvConvert( input_file, input_real ); // IplImage -> input_real
	cvSetZero( input_imag ); // input_imag = 0
	cvMerge( input_real, input_imag, NULL, NULL, fft_mat ); // ( input_real, input_imag ) -> fft_mat
	cvDFT( fft_mat, fft_mat, CV_DXT_FORWARD , 0);// FFT
	cvSplit( fft_mat, input_real, input_imag, NULL, NULL ); // fft_mat -> ( input_real, input_imag )

	// filter file
	cvSetZero( filter_real );

	// convert filter_file to filter_real
	for( h = 0; h < filter_file->height; ++h ){
		for( w = 0; w < filter_file->width; ++w ){

			int y = h - filter_file->height/2;
			int x = w - filter_file->width/2;
			y += (y<0) ? height : 0;
			x += (x<0) ? width : 0;

			CV_MAT_ELEM( *filter_real, double, y, x ) = CV_IMAGE_ELEM( filter_file, uchar, h, w );
		}
	}
	cvNormalize( filter_real, filter_real, 1.0, 0.0, CV_L1, NULL );
	cvSetZero( filter_imag ); // filter_imag = 0
	cvMerge( filter_real, filter_imag, NULL, NULL, fft_mat ); // ( filter_real, filter_imag ) -> fft_mat
	cvDFT( fft_mat, fft_mat, CV_DXT_FORWARD , 0);// FFT
	cvSplit( fft_mat, filter_real, filter_imag, NULL, NULL ); // fft_mat -> ( filter_real, filter_imag )

	// Wiener calculation
	for( h = 0; h < height; ++h ){
		for( w = 0; w < width; ++w ){
			double a = CV_MAT_ELEM( *input_real, double, h, w );
			double b = CV_MAT_ELEM( *input_imag, double, h, w );
			double c = CV_MAT_ELEM( *filter_real, double, h, w );
			double d = CV_MAT_ELEM( *filter_imag, double, h, w );

			CV_MAT_ELEM( *output_real, double, h, w ) = ( a*c + b*d ) / ( c*c + d*d + snr );
			CV_MAT_ELEM( *output_imag, double, h, w ) = ( b*c - a*d ) / ( c*c + d*d + snr );

		}
	}

	// IFT
	cvMerge( output_real, output_imag, NULL, NULL, fft_mat);
	cvDFT( fft_mat, fft_mat, CV_DXT_INV_SCALE, 0 );
	cvSplit( fft_mat, output_real, output_imag, NULL, NULL );

	// save file
	IplImage *output_file = cvCreateImage( cvSize( width, height), IPL_DEPTH_8U, 1 );
	cvConvert( output_real, output_file);
	
	return output_file;
}