コード例 #1
0
ファイル: GaborImage.cpp プロジェクト: Juxi/icVision
Img GaborImage::GaborTransform(Img Image, int Frequency, int Orientation) {
	orientation = Orientation;
	CalculateKernel(Orientation, Frequency);

	Img retImg  = (IplImage*) cvClone(Image);
	
	Img gabor_real = (IplImage*) cvClone(Image);
	Img gabor_img  = (IplImage*) cvClone(Image);
	cvFilter2D(Image, gabor_real, KernelRealData);	//image.Convolution(this.KernelRealData);
	cvFilter2D(Image, gabor_img , KernelImgData);	//image.Convolution(this.KernelImgData);
	
	cvPow(gabor_real, gabor_real, 2);
	cvPow(gabor_img,  gabor_img,  2);
	
	// Img gabor = (gabor_real + gabor_img).Pow(0.5);
	cvAdd(gabor_real, gabor_img, retImg);
	
	cv::Mat in = retImg;
	cv::Mat out;
	cv::sqrt(in, out); 
	
	IplImage dst_img = out;	
	
	cvReleaseImage(&gabor_real);
	cvReleaseImage(&gabor_img);
	
	retImg = (IplImage*) cvClone(&dst_img);
	
	return retImg;
}
コード例 #2
0
ファイル: cvgabor.cpp プロジェクト: ToMadoRe/v4r
/**
 * @brief CvGabor::conv_img(IplImage *src, IplImage *dst, int Type)
 * @param src
 * @param dst
 * @param Type
 */
void CvGabor::conv_img(IplImage *src, IplImage *dst, int Type)   //函数名:conv_img
{
// printf("CvGabor::conv_img 1\n");
  double ve; //, re,im;
  
  CvMat *mat = cvCreateMat(src->width, src->height, CV_32FC1);
  for (int i = 0; i < src->width; i++) {
    for (int j = 0; j < src->height; j++) {
      ve = CV_IMAGE_ELEM(src, uchar, j, i);   //CV_IMAGE_ELEM 是取图像(j,i)位置的像素值
      CV_MAT_ELEM(*mat, float, i, j) = (float)ve;  //转化成float 类型
    }
  }
  
// printf("CvGabor::conv_img 2\n");
  CvMat *rmat = cvCreateMat(src->width, src->height, CV_32FC1);
  CvMat *imat = cvCreateMat(src->width, src->height, CV_32FC1);
  
  switch (Type)
  {
    case CV_GABOR_REAL:
      cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)Real, cvPoint( (Width-1)/2, (Width-1)/2));
      break;
    case CV_GABOR_IMAG:
      cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)Imag, cvPoint( (Width-1)/2, (Width-1)/2));
      break;
    case CV_GABOR_MAG:
      cvFilter2D( (CvMat*)mat, (CvMat*)rmat, (CvMat*)Real, cvPoint( (Width-1)/2, (Width-1)/2));
      cvFilter2D( (CvMat*)mat, (CvMat*)imat, (CvMat*)Imag, cvPoint( (Width-1)/2, (Width-1)/2));
      
      cvPow(rmat,rmat,2); 
      cvPow(imat,imat,2);
      cvAdd(imat,rmat,mat); 
      cvPow(mat,mat,0.5); 
      break;
    case CV_GABOR_PHASE:
      break;
  }
  
// printf("CvGabor::conv_img 3\n");
  if (dst->depth == IPL_DEPTH_8U)
  {
    cvNormalize((CvMat*)mat, (CvMat*)mat, 0, 255, CV_MINMAX);
    for (int i = 0; i < mat->rows; i++)
    {
      for (int j = 0; j < mat->cols; j++)
      {
        ve = CV_MAT_ELEM(*mat, float, i, j);
        CV_IMAGE_ELEM(dst, uchar, j, i) = (uchar)cvRound(ve);
      }
    }
  }
コード例 #3
0
ファイル: Filters.cpp プロジェクト: plsang/visualrhythm
void Filters::lowPass(VRFrame* frame, int size)
{
    IplImage* imgDst = 0;
    IplImage* imgAux = 0;
    IplImage* imgNew = 0;

    VRFrame* frameAux;

    Log::writeLog("%s :: param: frame[%x] size[%d]", __FUNCTION__, frame, size);

    //Ajuste do tamanho da matriz.
    if (size > 9)
        size = 9;

    int cols_i = size;
    int rows_i = size;
    int total_size = 0;
    CvMat *filter = 0;

    total_size=(int)pow((double)size,2);

    // Máscara para realizar o processo de convolução.
    ///double convMask[total_size];
    double * convMask = new double[total_size];

    // Cria uma imagem com os mesmos parâmetros da original.
    frameAux = new VRFrame(frame);

    imgDst = VRFrame::imgAlloc(frameAux);
    imgAux = VRFrame::imgAlloc(frameAux);
    imgNew = VRFrame::imgAlloc(frameAux);

    // Monta a máscara com o tamanho que foi passado como parâmetro.
    for (int i=0; i<total_size; i++)	
        convMask[i] = (double)1/(double)total_size;

    imgAux->imageData = frameAux->data->imageData;
    imgAux->widthStep = frameAux->data->width;

    imgDst->imageData = imgAux->imageData;
    imgDst->widthStep = imgAux->width;

    filter = cvCreateMatHeader(rows_i, cols_i, CV_64FC1);

    cvSetData(filter, convMask, cols_i*8);

    cvFilter2D(imgAux, imgDst, filter, cvPoint(-1,-1));

    VRFrame::imgCopy(imgDst, imgNew);

    frame->setImage(imgNew);

    // Desaloca os temporários
    VRFrame::imgDealloc(imgAux);
    VRFrame::imgDealloc(imgDst);

    delete[] convMask;
    delete frameAux;

}
コード例 #4
0
void Process(int pos)   
{
	//CvFileStorage *fs = cvOpenFileStorage("kernel.xml",NULL,CV_STORAGE_WRITE);

	int x,y;
	float kernel_val;
	var = (float)pos_var;
	w = (float)pos_w;
	phase = (float) pos_phase*CV_PI/180;

	cvZero(kernel);
	for (x = -kernel_size/2+1;x<=kernel_size/2; x++) {
		for (y = (-kernel_size/2+1);y<=(kernel_size/2); y++) {

			kernel_val = 1/(2*CV_PI*var)*exp( -((x*x)+(y*y))/(2*var))*cos( w*x*cos(phase)+w*y*sin(phase));

			cvSet2D(kernel,y+kernel_size/2-1,x+kernel_size/2-1,cvScalar(kernel_val));
			}
		}
	//cvWrite( fs, "kernel", kernel, cvAttrList(0,0) );
	//cvReleaseFileStorage(&fs);

	cvFilter2D(src, dest,kernel);
    cvShowImage("Process window",dest);
}   
コード例 #5
0
void ImgAli_common::gradient(IplImage *img,CvMat *g_x,CvMat *g_y,CvMat *margnin)
{
	
	cvFilter2D(img,g_x,x_kernel,cvPoint(-1,-1));  
	cvFilter2D(img,g_y,y_kernel,cvPoint(-1,-1));  


	for(int i=0;i<cvGetSize(img).height;i++)
	{
		CV_MAT_ELEM(*g_x,double,i,0)*=2;
		CV_MAT_ELEM(*g_x,double,i,cvGetSize(img).width-1)*=2;
	}

	for(int i=0;i<cvGetSize(img).width;i++)
	{
		CV_MAT_ELEM(*g_y,double,0,i)*=2;
		CV_MAT_ELEM(*g_y,double,cvGetSize(img).height-1,i)*=2;
	}
}
コード例 #6
0
void ImgAli_common::gradient(CvMat *img,CvMat *g_x,CvMat *g_y,CvMat *margnin)
{

	//for (int i=0;i<width;i++)
	//{
	//	for (int j=0;j<height;j++)
	//	{
	//		if(i>0&&i<width-1&&j>0&&j<height-1)
	//		{
	//			cvmSet(g_x,i,j)=((double)cvGet2D(img,i,j+1)-(double)cvGet2D(img,i,j-1))/2;
	//		}
	//	}
	//}

//	CvMat *imgSat=cvCreateMat(width,)
	cvFilter2D(img,g_x,x_kernel,cvPoint(-1,-1));  
	cvFilter2D(img,g_y,y_kernel,cvPoint(-1,-1));  

	//for (int i=0;i<width;i++)
	//{
	//	for (int j=0;j<height;j++)
	//	{
	//		if (CV_MAT_ELEM(*margnin,double,j,i)==1)
	//		{
	//			CV_MAT_ELEM(*g_x,double,j,i)=0;
	//			CV_MAT_ELEM(*g_y,double,j,i)=0;
	//		}
	//	}
	//}

	for(int i=0;i<cvGetSize(img).height;i++)
	{
		CV_MAT_ELEM(*g_x,double,i,0)*=2;
		CV_MAT_ELEM(*g_x,double,i,cvGetSize(img).width-1)*=2;
	}

	for(int i=0;i<cvGetSize(img).width;i++)
	{
		CV_MAT_ELEM(*g_y,double,0,i)*=2;
		CV_MAT_ELEM(*g_y,double,cvGetSize(img).height-1,i)*=2;
	}
}
コード例 #7
0
ファイル: harris.cpp プロジェクト: cherip/Harris
IplImage *derivateY(const IplImage *src) {
    CvMat matrix;
    matrix = cvMat(5, 1, CV_32F, mat);

    IplImage *dst = cvCloneImage(src);
    cvFilter2D(src, dst, &matrix);

//  cvReleaseImage(&img);

    return dst;
}
コード例 #8
0
ファイル: harris.cpp プロジェクト: cherip/Harris
IplImage *derivateX(const IplImage *src) {
    CvMat matrix;
    matrix = cvMat(1, 5, CV_32F, mat);

//    IplImage *img = get_gray(src);
    IplImage *dst = cvCloneImage(src);
    cvFilter2D(src, dst, &matrix);

//    cvReleaseImage(&img);

    return dst;
}
コード例 #9
0
IplImage* lowPassFilter(IplImage *image){
	IplImage* filteredImage = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	double K[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 };
	float t = 0;
	for (int i = 0; i< (3 * 3); ++i)
		t = t + K[i];
	for (int i = 0; i< (3 * 3); ++i)
		K[i] = K[i] / t;
	CvMat Kernel = cvMat(3, 3, CV_64FC1, K);
	cvFilter2D(image, filteredImage, &Kernel);
	
	return filteredImage;
}
コード例 #10
0
ファイル: Morphology1.cpp プロジェクト: CareShaw/OCR
//形态学等级滤波器(二值,默认SE为矩形3*3)
void lhMorpRankFilterB(const IplImage* src, IplImage* dst, IplConvKernel* se = NULL, unsigned int rank = 0)
{
	assert(src != NULL  &&  dst != NULL && src != dst );

	bool defaultse = false;
	int card;
	if (se == NULL)
	{
		card = 3*3;
		assert(rank >= 0 && rank <= card);
		se = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT);
		defaultse = true;
	}
	else
	{
		card = lhStructuringElementCard(se);
		assert(rank >= 0 && rank <= card);
	}

	//default rank is median
	if (rank == 0)
		rank = card/2+1;

	IplConvKernel* semap =	lhStructuringElementMap(se);

	CvMat *semat = cvCreateMat(semap->nRows, semap->nCols, CV_32FC1);

	int i;
	for (i=0; i<semap->nRows*semap->nCols; i++)
	{
		semat->data.fl[i] = semap->values[i];
	}

	cvThreshold(src, dst, 0, 1, CV_THRESH_BINARY);
	IplImage *temp = cvCreateImage(cvGetSize(dst), 8, 1);

	cvFilter2D(dst, temp, semat, cvPoint(semap->anchorX, semap->anchorY));

	cvThreshold(temp, dst, card-rank, 255, CV_THRESH_BINARY);

	cvReleaseMat(&semat);
	cvReleaseStructuringElement(&semap);

	if (defaultse)
		cvReleaseStructuringElement(&se);	
	
	cvReleaseImage(&temp);

}
コード例 #11
0
ファイル: QR_Reader_C.c プロジェクト: ahliao/QR_Reader
int main(int argc, char* argv[])
{
	// Matrix for frames from the camera
	CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );

	// Infinite loop where our scanning is down on each camera frame
	while (1) 
	{
		IplImage* frame = cvQueryFrame(capture);
		if (!frame) {
			fprintf(stderr, "ERROR: Frame is null\n");
			getchar();
			break;
		}

		IplImage* outputimg = cvCreateImage(cvGetSize(frame), frame->depth,1);
		IplImage* test = cvCreateImage(cvGetSize(frame), frame->depth,frame->nChannels);

		double a[9]={-1,20,1,-1,20,1,-1,20,1};
		CvMat kernel= cvMat(3,3,CV_32FC1,a);
		cvFilter2D(frame,test,&kernel,cvPoint(-1,-1));

		cvCvtColor(frame, outputimg, CV_RGB2GRAY);
		QR_Data data;
		process_QR(outputimg, &data, frame);

		cvShowImage("Camera", frame);
		cvShowImage("Sharp", test);

		// Delay so screen can refresh
		if ((char) cvWaitKey(30) == 27) break;
	}

	// Release the resources
	cvReleaseCapture(&capture);
	cvDestroyWindow("Camera");

	return 0;
}
コード例 #12
0
ファイル: featurepyramid.cpp プロジェクト: outernets/opencv
/*
// Getting feature map for the selected subimage
//
// API
// int getFeatureMaps(const IplImage * image, const int k, featureMap **map);
// INPUT
// image             - selected subimage
// k                 - size of cells
// OUTPUT
// map               - feature map
// RESULT
// Error status
*/
int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
{
    int sizeX, sizeY;
    int p, px, stringSize;
    int height, width, numChannels;
    int i, j, kk, c, ii, jj, d;
    float  * datadx, * datady;

    int   ch;
    float magnitude, x, y, tx, ty;

    IplImage * dx, * dy;
    int *nearest;
    float *w, a_x, b_x;

    float kernel[3] = {-1.f, 0.f, 1.f};
    CvMat kernel_dx = cvMat(1, 3, CV_32F, kernel);
    CvMat kernel_dy = cvMat(3, 1, CV_32F, kernel);

    float * r;
    int   * alfa;

    float boundary_x[NUM_SECTOR + 1];
    float boundary_y[NUM_SECTOR + 1];
    float max, dotProd;
    int   maxi;

    height = image->height;
    width  = image->width ;

    numChannels = image->nChannels;

    dx    = cvCreateImage(cvSize(image->width, image->height),
                          IPL_DEPTH_32F, 3);
    dy    = cvCreateImage(cvSize(image->width, image->height),
                          IPL_DEPTH_32F, 3);

    sizeX = width  / k;
    sizeY = height / k;
    px    = 3 * NUM_SECTOR;
    p     = px;
    stringSize = sizeX * p;
    allocFeatureMapObject(map, sizeX, sizeY, p);

    cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));
    cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));

    float arg_vector;
    for(i = 0; i <= NUM_SECTOR; i++)
    {
        arg_vector    = ( (float) i ) * ( (float)(PI) / (float)(NUM_SECTOR) );
        boundary_x[i] = cosf(arg_vector);
        boundary_y[i] = sinf(arg_vector);
    }/*for(i = 0; i <= NUM_SECTOR; i++) */

    r    = (float *)malloc( sizeof(float) * (width * height));
    alfa = (int   *)malloc( sizeof(int  ) * (width * height * 2));

    for(j = 1; j < height - 1; j++)
    {
        datadx = (float*)(dx->imageData + dx->widthStep * j);
        datady = (float*)(dy->imageData + dy->widthStep * j);
        for(i = 1; i < width - 1; i++)
        {
            c = 0;
            x = (datadx[i * numChannels + c]);
            y = (datady[i * numChannels + c]);

            r[j * width + i] =sqrtf(x * x + y * y);
            for(ch = 1; ch < numChannels; ch++)
            {
                tx = (datadx[i * numChannels + ch]);
                ty = (datady[i * numChannels + ch]);
                magnitude = sqrtf(tx * tx + ty * ty);
                if(magnitude > r[j * width + i])
                {
                    r[j * width + i] = magnitude;
                    c = ch;
                    x = tx;
                    y = ty;
                }
            }/*for(ch = 1; ch < numChannels; ch++)*/

            max  = boundary_x[0] * x + boundary_y[0] * y;
            maxi = 0;
            for (kk = 0; kk < NUM_SECTOR; kk++)
            {
                dotProd = boundary_x[kk] * x + boundary_y[kk] * y;
                if (dotProd > max)
                {
                    max  = dotProd;
                    maxi = kk;
                }
                else
                {
                    if (-dotProd > max)
                    {
                        max  = -dotProd;
                        maxi = kk + NUM_SECTOR;
                    }
                }
            }
            alfa[j * width * 2 + i * 2    ] = maxi % NUM_SECTOR;
            alfa[j * width * 2 + i * 2 + 1] = maxi;
        }/*for(i = 0; i < width; i++)*/
    }/*for(j = 0; j < height; j++)*/

    nearest = (int  *)malloc(sizeof(int  ) *  k);
    w       = (float*)malloc(sizeof(float) * (k * 2));

    for(i = 0; i < k / 2; i++)
    {
        nearest[i] = -1;
    }/*for(i = 0; i < k / 2; i++)*/
    for(i = k / 2; i < k; i++)
    {
        nearest[i] = 1;
    }/*for(i = k / 2; i < k; i++)*/

    for(j = 0; j < k / 2; j++)
    {
        b_x = k / 2 + j + 0.5f;
        a_x = k / 2 - j - 0.5f;
        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
    }/*for(j = 0; j < k / 2; j++)*/
    for(j = k / 2; j < k; j++)
    {
        a_x = j - k / 2 + 0.5f;
        b_x =-j + k / 2 - 0.5f + k;
        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
    }/*for(j = k / 2; j < k; j++)*/


    for(i = 0; i < sizeY; i++)
    {
        for(j = 0; j < sizeX; j++)
        {
            for(ii = 0; ii < k; ii++)
            {
                for(jj = 0; jj < k; jj++)
                {
                    if ((i * k + ii > 0) &&
                            (i * k + ii < height - 1) &&
                            (j * k + jj > 0) &&
                            (j * k + jj < width  - 1))
                    {
                        d = (k * i + ii) * width + (j * k + jj);
                        (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]] +=
                            r[d] * w[ii * 2] * w[jj * 2];
                        (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
                            r[d] * w[ii * 2] * w[jj * 2];
                        if ((i + nearest[ii] >= 0) &&
                                (i + nearest[ii] <= sizeY - 1))
                        {
                            (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]             ] +=
                                r[d] * w[ii * 2 + 1] * w[jj * 2 ];
                            (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
                                r[d] * w[ii * 2 + 1] * w[jj * 2 ];
                        }
                        if ((j + nearest[jj] >= 0) &&
                                (j + nearest[jj] <= sizeX - 1))
                        {
                            (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] +=
                                r[d] * w[ii * 2] * w[jj * 2 + 1];
                            (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
                                r[d] * w[ii * 2] * w[jj * 2 + 1];
                        }
                        if ((i + nearest[ii] >= 0) &&
                                (i + nearest[ii] <= sizeY - 1) &&
                                (j + nearest[jj] >= 0) &&
                                (j + nearest[jj] <= sizeX - 1))
                        {
                            (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] +=
                                r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
                            (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
                                r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
                        }
                    }
                }/*for(jj = 0; jj < k; jj++)*/
            }/*for(ii = 0; ii < k; ii++)*/
        }/*for(j = 1; j < sizeX - 1; j++)*/
    }/*for(i = 1; i < sizeY - 1; i++)*/

    cvReleaseImage(&dx);
    cvReleaseImage(&dy);


    free(w);
    free(nearest);

    free(r);
    free(alfa);

    return LATENT_SVM_OK;
}
コード例 #13
0
UINT WINAPI
//DWORD WINAPI
#elif defined(POSIX_SYS)
// using pthread
void *
#endif
	ChessRecognition::HoughLineThread(
#if defined(WINDOWS_SYS)
	LPVOID
#elif defined(POSIX_SYS)
	void *
#endif
	Param) {
	// 실제로 뒤에서 동작하는 windows용 thread함수.
	// 함수 인자로 클래스를 받아옴.
	ChessRecognition *_TChessRecognition = (ChessRecognition *)Param;
	_TChessRecognition->_HoughLineBased = new HoughLineBased();

	CvSeq *_TLineX, *_TLineY;
	double _TH[] = { -1, -7, -15, 0, 15, 7, 1 };

	CvMat _TDoGX = cvMat(1, 7, CV_64FC1, _TH);
	CvMat* _TDoGY = cvCreateMat(7, 1, CV_64FC1);
	cvTranspose(&_TDoGX, _TDoGY); // transpose(&DoGx) -> DoGy

	double _TMinValX, _TMaxValX, _TMinValY, _TMaxValY, _TMinValT, _TMaxValT;
	int _TKernel = 1;

	// Hough 사용되는 Image에 대한 Initialize.
	IplImage *iplTemp = cvCreateImage(cvSize(_TChessRecognition->_Width, _TChessRecognition->_Height), IPL_DEPTH_32F, 1);                   
	IplImage *iplDoGx = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGy = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGyClone = cvCloneImage(iplDoGy);
	IplImage *iplDoGxClone = cvCloneImage(iplDoGx);
	IplImage *iplEdgeX = cvCreateImage(cvGetSize(iplTemp), 8, 1);
	IplImage *iplEdgeY = cvCreateImage(cvGetSize(iplTemp), 8, 1);

	CvMemStorage* _TStorageX = cvCreateMemStorage(0), *_TStorageY = cvCreateMemStorage(0);

	while (_TChessRecognition->_EnableThread != false) {
		// 이미지를 받아옴. main루프와 동기를 맞추기 위해서 critical section 사용.
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.lock();
		//EnterCriticalSection(&(_TChessRecognition->cs));
		cvConvert(_TChessRecognition->_ChessBoardDetectionInternalImage, iplTemp);
		//LeaveCriticalSection(&_TChessRecognition->cs);
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.unlock();

		// 각 X축 Y축 라인을 검출해 내기 위해서 filter 적용.
		cvFilter2D(iplTemp, iplDoGx, &_TDoGX); // 라인만 축출해내고.
		cvFilter2D(iplTemp, iplDoGy, _TDoGY);
		cvAbs(iplDoGx, iplDoGx);
		cvAbs(iplDoGy, iplDoGy);

		// 이미지 내부에서 최댓값과 최소값을 구하여 정규화.
		cvMinMaxLoc(iplDoGx, &_TMinValX, &_TMaxValX);
		cvMinMaxLoc(iplDoGy, &_TMinValY, &_TMaxValY);
		cvMinMaxLoc(iplTemp, &_TMinValT, &_TMaxValT);
		cvScale(iplDoGx, iplDoGx, 2.0 / _TMaxValX); // 정규화.
		cvScale(iplDoGy, iplDoGy, 2.0 / _TMaxValY);
		cvScale(iplTemp, iplTemp, 2.0 / _TMaxValT);

		cvCopy(iplDoGy, iplDoGyClone);
		cvCopy(iplDoGx, iplDoGxClone);

		// NMS진행후 추가 작업
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGx, iplDoGyClone, _TKernel);
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGy, iplDoGxClone, _TKernel);

		cvConvert(iplDoGx, iplEdgeY); // IPL_DEPTH_8U로 다시 재변환.
		cvConvert(iplDoGy, iplEdgeX);

		double rho = 1.0; // distance resolution in pixel-related units.
		double theta = 1.0; // angle resolution measured in radians.
		int threshold = 20;

		if (threshold == 0)
			threshold = 1;

		// detecting 해낸 edge에서 hough line 검출.
		_TLineX = cvHoughLines2(iplEdgeX, _TStorageX, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);
		_TLineY = cvHoughLines2(iplEdgeY, _TStorageY, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);

		// cvSeq를 vector로 바꾸기 위한 연산.
		_TChessRecognition->_Vec_ProtectionMutex.lock();
		_TChessRecognition->_HoughLineBased->CastSequence(_TLineX, _TLineY);
		_TChessRecognition->_Vec_ProtectionMutex.unlock();

		Sleep(2);
	}

	// mat 할당 해제.
	cvReleaseMat(&_TDoGY);

	// 내부 연산에 사용된 이미지 할당 해제.
	cvReleaseImage(&iplTemp);
	cvReleaseImage(&iplDoGx);
	cvReleaseImage(&iplDoGy);
	cvReleaseImage(&iplDoGyClone);
	cvReleaseImage(&iplDoGxClone);
	cvReleaseImage(&iplEdgeX);
	cvReleaseImage(&iplEdgeY);

	// houghline2에 사용된 opencv 메모리 할당 해제.
	cvReleaseMemStorage(&_TStorageX);
	cvReleaseMemStorage(&_TStorageY);

	delete _TChessRecognition->_HoughLineBased;
#if defined(WINDOWS_SYS)
	_endthread();
#elif defined(POSIX_SYS)

#endif
	_TChessRecognition->_EndThread = true;
	return 0;
}
コード例 #14
0
ファイル: savgol.cpp プロジェクト: xufango/contrib_bk
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
CvMat *tgso (CvMat &tmap, int ntex, double sigma, double theta, CvMat &tsim, int useChi2) {


	CvMat *roundTmap=cvCreateMat(tmap.rows,tmap.cols,CV_32FC1);
	CvMat *comp=cvCreateMat(tmap.rows,tmap.cols,CV_32FC1);

	for (int i=0;i<tmap.rows;i++)
		for (int j=0;j<tmap.cols;j++)
			cvSetReal2D(roundTmap,i,j,cvRound(cvGetReal2D(&tmap,i,j)));

	cvSub(&tmap,roundTmap,comp);
	if (cvCountNonZero(comp)) {
		printf("texton labels not integral");
		cvReleaseMat(&roundTmap);
		cvReleaseMat(&comp);
		exit(1);
	}

	double min,max;
	cvMinMaxLoc(&tmap,&min,&max);
	if (min<1 && max>ntex) {
		char *msg=new char[50];
		printf(msg,"texton labels out of range [1,%d]",ntex);
		cvReleaseMat(&roundTmap);
		cvReleaseMat(&comp);
		exit(1);
	}

	cvReleaseMat(&roundTmap);
	cvReleaseMat(&comp);


	double wr=floor(sigma); //sigma=radius (Leo) 

	CvMat *x=cvCreateMat(1,wr-(-wr)+1, CV_64FC1);
	CvMat *y=cvCreateMat(wr-(-wr)+1,1, CV_64FC1);

	CvMat *u=cvCreateMat(wr-(-wr)+1,wr-(-wr)+1, CV_64FC1);
	CvMat *v=cvCreateMat(wr-(-wr)+1,wr-(-wr)+1, CV_64FC1);
	CvMat *gamma=cvCreateMat(u->rows,v->rows, CV_64FC1);

	// Set x,y directions 
	for (int j=-wr;j<=wr;j++) {
		cvSetReal2D(x,0,(j+wr),j);
		cvSetReal2D(y,(j+wr),0,j);
	}

	// Set u,v, meshgrids
	for (int i=0;i<u->rows;i++) {
		cvRepeat(x,u);
		cvRepeat(y,v);
	}

	// Compute the gamma matrix from the grid
	for (int i=0;i<u->rows;i++) 
		for (int j=0;j<u->cols;j++)
			cvSetReal2D(gamma,i,j,atan2(cvGetReal2D(v,i,j),cvGetReal2D(u,i,j)));

	cvReleaseMat(&x);
	cvReleaseMat(&y);

	CvMat *sum=cvCreateMat(u->rows,u->cols, CV_64FC1);
	cvMul(u,u,u);
	cvMul(v,v,v);
	cvAdd(u,v,sum);
	CvMat *mask=cvCreateMat(u->rows,u->cols, CV_8UC1);
	cvCmpS(sum,sigma*sigma,mask,CV_CMP_LE);
	cvConvertScale(mask,mask,1.0/255);
	cvSetReal2D(mask,wr,wr,0);
	int count=cvCountNonZero(mask);

	cvReleaseMat(&u);
	cvReleaseMat(&v);
	cvReleaseMat(&sum);

	CvMat *sub=cvCreateMat(mask->rows,mask->cols, CV_64FC1);
	CvMat *side=cvCreateMat(mask->rows,mask->cols, CV_8UC1);

	cvSubS(gamma,cvScalar(theta),sub);
	cvReleaseMat(&gamma);

	for (int i=0;i<mask->rows;i++){
		for (int j=0;j<mask->cols;j++) {
			double n=cvmGet(sub,i,j);
			double n_mod = n-floor(n/(2*M_PI))*2*M_PI;
			cvSetReal2D(side,i,j, 1 + int(n_mod < M_PI));
		}
	}

	cvMul(side,mask,side);
	cvReleaseMat(&sub);
	cvReleaseMat(&mask);

	CvMat *lmask=cvCreateMat(side->rows,side->cols, CV_8UC1);
	CvMat *rmask=cvCreateMat(side->rows,side->cols, CV_8UC1);
	cvCmpS(side,1,lmask,CV_CMP_EQ);
	cvCmpS(side,2,rmask,CV_CMP_EQ);
	int count1=cvCountNonZero(lmask), count2=cvCountNonZero(rmask);
	if (count1 != count2) {
		printf("Bug: imbalance\n");
	}

	CvMat *rlmask=cvCreateMat(side->rows,side->cols, CV_32FC1);
	CvMat *rrmask=cvCreateMat(side->rows,side->cols, CV_32FC1);
	cvConvertScale(lmask,rlmask,1.0/(255*count)*2);
	cvConvertScale(rmask,rrmask,1.0/(255*count)*2);


	cvReleaseMat(&lmask);
	cvReleaseMat(&rmask);
	cvReleaseMat(&side);

	int h=tmap.rows;
	int w=tmap.cols;


	CvMat *d       = cvCreateMat(h*w,ntex,CV_32FC1);
	CvMat *coltemp = cvCreateMat(h*w,1,CV_32FC1);
	CvMat *tgL     = cvCreateMat(h,w, CV_32FC1);
	CvMat *tgR     = cvCreateMat(h,w, CV_32FC1);
	CvMat *temp    = cvCreateMat(h,w,CV_8UC1);
	CvMat *im      = cvCreateMat(h,w, CV_32FC1);
	CvMat *sub2    = cvCreateMat(h,w,CV_32FC1);
	CvMat *sub2t   = cvCreateMat(w,h,CV_32FC1);
	CvMat *prod    = cvCreateMat(h*w,ntex,CV_32FC1);
	CvMat reshapehdr,*reshape;

	CvMat* tgL_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);
	CvMat* tgR_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);
	CvMat* im_pad  = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);

	CvMat *tg=cvCreateMat(h,w,CV_32FC1);
	cvZero(tg);
	
	if (useChi2 == 1){
		CvMat* temp_add1 = cvCreateMat(h,w,CV_32FC1);
		for (int i=0;i<ntex;i++) {
			cvCmpS(&tmap,i+1,temp,CV_CMP_EQ); 
			cvConvertScale(temp,im,1.0/255);

			cvCopyMakeBorder(tgL,tgL_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(tgR,tgR_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(im,im_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);

			cvFilter2D(im_pad,tgL_pad,rlmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));
			cvFilter2D(im_pad,tgR_pad,rrmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));

			cvGetSubRect(tgL_pad,tgL,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgL->cols,tgL->rows));
			cvGetSubRect(tgR_pad,tgR,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgR->cols,tgR->rows));

			cvSub(tgL,tgR,sub2);
			cvPow(sub2,sub2,2.0);
			cvAdd(tgL,tgR,temp_add1);
			cvAddS(temp_add1,cvScalar(0.0000000001),temp_add1);
			cvDiv(sub2,temp_add1,sub2);
			cvAdd(tg,sub2,tg);
		}
		cvScale(tg,tg,0.5);

		cvReleaseMat(&temp_add1);

	}
	else{// if not chi^2
		for (int i=0;i<ntex;i++) {
			cvCmpS(&tmap,i+1,temp,CV_CMP_EQ); 
			cvConvertScale(temp,im,1.0/255);

			cvCopyMakeBorder(tgL,tgL_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(tgR,tgR_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(im,im_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);

			cvFilter2D(im_pad,tgL_pad,rlmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));
			cvFilter2D(im_pad,tgR_pad,rrmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));

			cvGetSubRect(tgL_pad,tgL,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgL->cols,tgL->rows));
			cvGetSubRect(tgR_pad,tgR,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgR->cols,tgR->rows));

			cvSub(tgL,tgR,sub2);
			cvAbs(sub2,sub2);
			cvTranspose(sub2,sub2t);
			reshape=cvReshape(sub2t,&reshapehdr,0,h*w);
			cvGetCol(d,coltemp,i);
			cvCopy(reshape,coltemp);
		}

		cvMatMul(d,&tsim,prod);
		cvMul(prod,d,prod);


		CvMat *sumcols=cvCreateMat(h*w,1,CV_32FC1);
		cvSetZero(sumcols);
		for (int i=0;i<prod->cols;i++) {
			cvGetCol(prod,coltemp,i);
			cvAdd(sumcols,coltemp,sumcols);
		}

		reshape=cvReshape(sumcols,&reshapehdr,0,w);
		cvTranspose(reshape,tg);

		cvReleaseMat(&sumcols);
	}


	//Smooth the gradient now!!
	tg=fitparab(*tg,sigma,sigma/4,theta);
	cvMaxS(tg,0,tg); 

	
	cvReleaseMat(&im_pad);
	cvReleaseMat(&tgL_pad);
	cvReleaseMat(&tgR_pad);
	cvReleaseMat(&rlmask);
	cvReleaseMat(&rrmask);
	cvReleaseMat(&im);
	cvReleaseMat(&tgL);
	cvReleaseMat(&tgR);
	cvReleaseMat(&temp);
	cvReleaseMat(&coltemp);
	cvReleaseMat(&sub2);
	cvReleaseMat(&sub2t);
	cvReleaseMat(&d);
	cvReleaseMat(&prod);

	return tg;

}
コード例 #15
0
ファイル: savgol.cpp プロジェクト: xufango/contrib_bk
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
CvMat *savgolFilter(CvMat &z, double ra, double rb, double theta){

	int d=2;
	int k=1; // Always these, according to the matlab code (Leo)
	ra = max(1.5,ra);
	rb = max(1.5,rb);
	double ira2 = 1.0/(ra*ra);
	double irb2 = 1.0/(rb*rb);
	double wr = floor(max(ra,rb));
	double wd = 2*wr+1;
	double sint = sin(theta);
	double cost = cos(theta);

	CvMat*xx= cvCreateMat(2*d+1, 1, CV_64FC1);
	cvSetZero(xx);
	CvMat *temp=cvCreateMat(2*d+1, 1, CV_64FC1);

	for (int u=-wr;u<=wr;u++)
		for (int v=-wr;v<=wr;v++) {
			double ai=-u*sint+v*cost; //distance along major axis
			double bi= u*cost+v*sint; //distance along minor axis

			if (ai*ai*ira2+bi*bi*irb2 <= 1) {
				cvSet(temp, cvScalar(ai));
				cvSetReal2D(temp, 0, 0, 1.0);

				double dTemp=1;

				for (int i=0;i<2*d+1-1;i++) {
					dTemp=dTemp*cvGetReal2D(temp,i+1,0);
					cvSetReal2D(temp,i+1,0,dTemp);
				}

				cvAdd(xx,temp,xx);

			}
		}

		cvReleaseMat(&temp);

		CvMat *A=cvCreateMat(d+1,d+1,CV_64FC1);
		for (int i=0;i<d+1;i++)
			for (int j=i;j<=i+d;j++)
				cvSetReal2D(A,j-i,i,cvGetReal2D(xx,j,0));

		cvInvert(A,A,CV_LU);

		CvMat *zz=cvCreateMat(wd,wd, CV_64FC1);
		CvMat *yy=cvCreateMat(d+1,1, CV_64FC1);
		CvMat *result=cvCreateMat(d+1,1, CV_64FC1);
		CvMat *filt=cvCreateMat(wd,wd, CV_32FC1);
		cvSetZero(filt);


		for (int u=-wr;u<=wr;u++)
			for (int v=-wr;v<=wr;v++) {

				cvSetZero(zz);
				cvSetReal2D(zz,v+wr,u+wr,1);
				cvSetZero(yy);

				double ai=-u*sint+v*cost; //distance along major axis
				double bi= u*cost+v*sint; //distance along minor axis

				if (ai*ai*ira2+bi*bi*irb2 <= 1) {
					cvSet(yy, cvScalar(ai));
					cvSetReal2D(yy, 0, 0, 1.0);

					double dTemp=1;
					for (int i=0;i<d+1-1;i++) {
						dTemp=dTemp*cvGetReal2D(yy,i+1,0);
						cvSetReal2D(yy,i+1,0,dTemp);
					}

					cvMatMul(A,yy,result);
					cvSetReal2D(filt,v+wr,u+wr,cvGetReal2D(result,k-1,0));
				}
			}

			cvReleaseMat(&zz);
			cvReleaseMat(&yy);
			cvReleaseMat(&xx);
			cvReleaseMat(&A);

			CvMat *ztemp= cvCreateMat(z.rows+filt->rows-1,z.cols+filt->cols-1,CV_32FC1);
			cvCopyMakeBorder(&z,ztemp,cvPoint((filt->cols-1)/2,(filt->rows-1)/2),IPL_BORDER_CONSTANT);
			CvMat *filteredtemp= cvCreateMat(ztemp->rows,ztemp->cols,CV_32FC1);

			cvFilter2D(ztemp,filteredtemp,filt,cvPoint((filt->cols-1)/2,(filt->rows-1)/2));

			CvMat *filtered = cvCreateMat(z.rows,z.cols,CV_32FC1);
			cvGetSubRect(filteredtemp,filtered,cvRect((filt->cols-1)/2,(filt->rows-1)/2,z.cols,z.rows));

			return filtered;
}
コード例 #16
0
ファイル: cvgabor.cpp プロジェクト: ToMadoRe/v4r
/**
 * @brief CvGabor::conv_img_a(IplImage *src, IplImage *dst, int Type)
 */
void CvGabor::conv_img_a(IplImage *src, IplImage *dst, int Type)   //图像做gabor卷积  函数名:conv_img_a
{
    double ve, re,im;
  
    int width = src->width;
    int height = src->height;
    CvMat *mat = cvCreateMat(src->width, src->height, CV_32FC1);
    
    for (int i = 0; i < width; i++)  //对整幅图像进行图像坐标转换
    {
       for (int j = 0; j < height; j++)
       {
              ve = cvGetReal2D((IplImage*)src, j, i);
              cvSetReal2D( (CvMat*)mat, i, j, ve );
       }
    }

    CvMat *rmat = cvCreateMat(width, height, CV_32FC1);  //存实部
    CvMat *imat = cvCreateMat(width, height, CV_32FC1);  //存虚部

    CvMat *kernel = cvCreateMat( Width, Width, CV_32FC1 ); //创建核函数窗口

    switch (Type)
    {
      case CV_GABOR_REAL:   //实部卷积
        cvCopy( (CvMat*)Real, (CvMat*)kernel, NULL );
        cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)kernel, cvPoint( (Width-1)/2, (Width-1)/2));
        break;
      case CV_GABOR_IMAG:      //虚部卷积
        cvCopy( (CvMat*)Imag, (CvMat*)kernel, NULL );
        cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)kernel, cvPoint( (Width-1)/2, (Width-1)/2));
        break;
      case CV_GABOR_MAG:   //实部与虚部卷积
        /* Real Response */
        cvCopy( (CvMat*)Real, (CvMat*)kernel, NULL );
        cvFilter2D( (CvMat*)mat, (CvMat*)rmat, (CvMat*)kernel, cvPoint( (Width-1)/2, (Width-1)/2));
        /* Imag Response */
        cvCopy( (CvMat*)Imag, (CvMat*)kernel, NULL );
        cvFilter2D( (CvMat*)mat, (CvMat*)imat, (CvMat*)kernel, cvPoint( (Width-1)/2, (Width-1)/2));
        /* Magnitude response is the square root of the sum of the square of real response and imaginary response */
        for (int i = 0; i < width; i++)
        {
           for (int j = 0; j < height; j++)
           {
               re = cvGetReal2D((CvMat*)rmat, i, j);
               im = cvGetReal2D((CvMat*)imat, i, j);
               ve = sqrt(re*re + im*im);
               cvSetReal2D( (CvMat*)mat, i, j, ve );
           }
        }       
        break;
      case CV_GABOR_PHASE:
        break;
    }
    
    if (dst->depth == IPL_DEPTH_8U)  //归一化
    {
        cvNormalize((CvMat*)mat, (CvMat*)mat, 0, 255, CV_MINMAX, NULL);
      for (int i = 0; i < width; i++)
      {
            for (int j = 0; j < height; j++)
            {
                ve = cvGetReal2D((CvMat*)mat, i, j);
                ve = cvRound(ve);
                cvSetReal2D( (IplImage*)dst, j, i, ve );
            }
        }
     }

     if (dst->depth == IPL_DEPTH_32F)
     {
         for (int i = 0; i < width; i++)
       {
            for (int j = 0; j < height; j++)
            {
                ve = cvGetReal2D((CvMat*)mat, i, j);
                cvSetReal2D( (IplImage*)dst, j, i, ve );
            }
         }
     } 

    cvReleaseMat(&kernel);
    cvReleaseMat(&imat);
    cvReleaseMat(&rmat);
    cvReleaseMat(&mat);
}
コード例 #17
0
ファイル: main.cpp プロジェクト: Exorcismus/IRIS-Recognition
void main(){



Gabor gabor;

run_camera();


cvCvtColor( src, grayscale, CV_RGB2GRAY );
cvCopy(src,dst);
Smooth(src,smooth);
//cvShowImage( "smooth",smooth);


process_image();

//normalize(float xp,float xi, float yp,float yi, float rp,float ri ,IplImage *src)
normalize(xp, yp, rp, xi, yi,ri ,src);

cvCvtColor(normalized,greynormalized,CV_RGB2GRAY);
//cvShowImage("greynormalized",greynormalized);

cvEqualizeHist(greynormalized,eqnormalized);
//cvShowImage("eqnormalized",eqnormalized);



//1-void create_gabor_kernel_v1( float sig, float thet, float lm, float gamma , float ps , float bw)
//gabor.create_gabor_kernel_v1(0,45,8,1,0,1);   //sigma = 1.6 , gamma = 1 for kernel of size 9



//2-void create_gabor_kernel_v2(int kernel_size, float sig, float thet, float lm, float gamma , float ps);
//gabor.create_gabor_kernel_v2(21,5,45,10,1,90); 



//3-void create_gabor_kernel_v3(int kernel_size, float sig, float thet, float freq, float gamma , float ps)
//gabor.create_gabor_kernel_v3(9,2,0,0.2,1,0); 



//4-void create_gabor_kernel_v4(int kernel_size, double sigma, double theta, double lambd, double gamma, double psi, int ktype);
//gabor.create_gabor_kernel_v4(9,2,0,35,1,0,CV_32F); 



//5-void create_gabor_kernel_v5 (int kernel_size, double sigma,double theta ,  double freq ,double gamma)
//gabor.create_gabor_kernel_v5(9,2,0,0.25,1);


/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////


cvFilter2D(eqnormalized,real_gabor, kernel_real);
cvShowImage("real wavelet",real_gabor);


cvFilter2D(eqnormalized,imag_gabor, kernel_imag);
cvShowImage("imag wavelet",imag_gabor);

//cvFilter2D(eqnormalized,mag_gabor, kernel_mag);
//cvShowImage("mag wavelet",mag_gabor);





//quantize_gabor_image_real();
//quantize_gabor_image_imag();



generate_code();

//save_image_code("code_4.bmp",code);       //scales code before saving

//display_image_values(0,kernel_real);     // FOR DEBUGING PURPOSES
//display_image_values(code,0);            // FOR DEBUGING PURPOSES



float HD =compare_code(code,"code_4.bmp");
printf("Hamming Distance = %1.2f",HD);


cvScale(code,scaled_code,255);
cvShowImage("code",scaled_code);

cvNamedWindow( "Source");
cvShowImage( "Source",dst);

//printf ( "xp:%f ,xi:%f, yp:%f, yi:%f, rp:%f, ri:%f",xp,xi,yp,yi,rp,ri);



cvWaitKey();
cvReleaseImage(&dst);
cvReleaseImage(&smooth);
cvReleaseImage(&pupil);
cvReleaseImage(&iris);
cvReleaseImage(&pedge);
cvReleaseImage(&iedge);
cvReleaseImage(&src);

cvReleaseImage(&eqnormalized);
cvReleaseImage(&greynormalized);
cvReleaseImage(&normalized);
cvReleaseImage(&real_gabor);
cvReleaseImage(&imag_gabor);
cvReleaseImage(&mag_gabor);
//cvReleaseImage(&phase_gabor);
cvReleaseImage(&quantized_real);
cvReleaseImage(&quantized_imag);
cvReleaseImage(&code);




}
コード例 #18
0
IplImage* 
ComputeSaliency(IplImage* image, int thresh, int scale) {
  //given a one channel image
  unsigned int size = floor(pow(2,scale)); //the size to do teh  saliency @

  IplImage* bw_im = cvCreateImage(cvSize(size,size), 
				  IPL_DEPTH_8U,1);
  cvResize(image, bw_im);
  IplImage* realInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 1);
  
  IplImage* imaginaryInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 1);
  IplImage* complexInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 2);

  cvScale(bw_im, realInput, 1.0/255.0);
  cvZero(imaginaryInput);
  cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);
  CvMat* dft_A = cvCreateMat( size, size, CV_32FC2 );

  // copy A to dft_A and pad dft_A with zeros
  CvMat tmp;
  cvGetSubRect( dft_A, &tmp, cvRect(0,0, size,size));
  cvCopy( complexInput, &tmp );
  //  cvZero(&tmp);

  cvDFT( dft_A, dft_A, CV_DXT_FORWARD, size );
  cvSplit( dft_A, realInput, imaginaryInput, NULL, NULL );
  // Compute the phase angle 
  IplImage* image_Mag = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
  IplImage* image_Phase = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
    

  //compute the phase of the spectrum
  cvCartToPolar(realInput, imaginaryInput, image_Mag, image_Phase, 0);
  
  IplImage* log_mag = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
  cvLog(image_Mag, log_mag);
  //Box filter the magnitude, then take the difference

  IplImage* log_mag_Filt = cvCreateImage(cvSize(size, size), 
					   IPL_DEPTH_32F, 1);
  CvMat* filt = cvCreateMat(3,3, CV_32FC1);
  cvSet(filt,cvScalarAll(1.0/9.0));
  cvFilter2D(log_mag, log_mag_Filt, filt);
  cvReleaseMat(&filt);

  cvSub(log_mag, log_mag_Filt, log_mag);
  
  cvExp(log_mag, image_Mag);
   
  cvPolarToCart(image_Mag, image_Phase, realInput, imaginaryInput,0);
  cvExp(log_mag, image_Mag);

  cvMerge(realInput, imaginaryInput, NULL, NULL, dft_A);
  cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, size);

  cvAbs(dft_A, dft_A);
  cvMul(dft_A,dft_A, dft_A);
  cvGetSubRect( dft_A, &tmp,  cvRect(0,0, size,size));
  cvCopy( &tmp, complexInput);
  cvSplit(complexInput, realInput, imaginaryInput, NULL,NULL);

  IplImage* result_image = cvCreateImage(cvGetSize(image),IPL_DEPTH_32F, 1);
  double minv, maxv;
  CvPoint minl, maxl;
  cvSmooth(realInput,realInput);
  cvSmooth(realInput,realInput);
  cvMinMaxLoc(realInput,&minv,&maxv,&minl,&maxl);
  printf("Max value %lf, min %lf\n", maxv,minv);
  cvScale(realInput, realInput, 1.0/(maxv-minv), 1.0*(-minv)/(maxv-minv));
  cvResize(realInput, result_image);
  double threshold = thresh/100.0*cvAvg(realInput).val[0];
  cvThreshold(result_image, result_image, threshold, 1.0, CV_THRESH_BINARY);
  IplImage* final_result = cvCreateImage(cvGetSize(image),IPL_DEPTH_8U, 1);
  cvScale(result_image, final_result, 255.0, 0.0);
  cvReleaseImage(&result_image);
  //cvReleaseImage(&realInput);
  cvReleaseImage(&imaginaryInput);
  cvReleaseImage(&complexInput);
  cvReleaseMat(&dft_A);
  cvReleaseImage(&bw_im);

  cvReleaseImage(&image_Mag);
  cvReleaseImage(&image_Phase);

  cvReleaseImage(&log_mag);
  cvReleaseImage(&log_mag_Filt);
  cvReleaseImage(&bw_im);
  return final_result;
  //return bw_im;
}
コード例 #19
0
/*!
//void cameraCorrection(IplImage* src,IplImage* dst,int type, double A, int size)
//	Corregge un singolo frame:
//	@param[in] src l'immagine da correggere
//	@param[out] dst dove viene memorizzato il risultato	
//	@param[in] type(default=MEDIAN): può assumere i seguenti valori...
//		MEDIAN filtro mediano per ridurre il rumore
//		SHARPENING filto di sharpening con maskera (0,-1,0;-1,5,-1;0,-1,0)
//		HIGH_BOOST sharpening high boost, dipendente dal valore di A(default=1.1)
//			maschera	-1   -1   -1 
//						-1  A*9-1 -1
//						-1   -1   -1
//		Sono inoltre consentite le seguenti combinazioni 
//						(cambiando l'ordine degli addendi il risultato non cambia, le operazioni verranno applicate sempre con lo stesso ordine)
//		MEDIAN+SHARPENING
//		MEDIAN+SHARPENING+MEDIAN
//		MEDIAN+HIGH_BOOST
//		MEDIAN+HIGH_BOOST+MEDIAN
//	@param[in] A (default=1.1) parametro per filtraggio con HIGH_BOOST
//	@param[in] size (default=5) dimensione della maschera del filtro mediano
*/
void cameraCorrection(IplImage* src,IplImage* dst,int type, double A, int size){	
	double w;
	CvMat* kernel = cvCreateMat( 3, 3, CV_32FC1) ;
	LOG4CXX_TRACE(loggerCameraCorrection , "Camera correction started");
	try{

		switch(type){
		case MEDIAN:
			cvSmooth(src,dst,CV_MEDIAN,size,0,0,0);
			LOG4CXX_DEBUG(loggerCameraCorrection,"Median filter application");
			break;
		case HIGH_BOOST:
			w=9*A-1;
			cvSet2D(kernel, 0, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 0, 1, cvRealScalar( -1) ); 
			cvSet2D(kernel, 0, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 1, cvRealScalar(w) );
			cvSet2D(kernel, 1, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 1, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 2, cvRealScalar( -1) );

			cvFilter2D(src, dst,kernel,cvPoint(0,0));
			LOG4CXX_DEBUG(loggerCameraCorrection,"High boost application");
			break;
		case SHARPENING:
			cvSet2D(kernel, 0, 0, cvRealScalar( 0) );
			cvSet2D(kernel, 0, 1, cvRealScalar( -1) ); 
			cvSet2D(kernel, 0, 2, cvRealScalar( 0) );
			cvSet2D(kernel, 1, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 1, cvRealScalar(5) );
			cvSet2D(kernel, 1, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 0, cvRealScalar( 0) );
			cvSet2D(kernel, 2, 1, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 2, cvRealScalar( 0) );

			cvFilter2D(src, dst,kernel,cvPoint(0,0));
			LOG4CXX_DEBUG(loggerCameraCorrection,"Sharpening application");
			break;
		case (MEDIAN+SHARPENING):
			cvSet2D(kernel, 0, 0, cvRealScalar( 0) );
			cvSet2D(kernel, 0, 1, cvRealScalar( -1) ); 
			cvSet2D(kernel, 0, 2, cvRealScalar( 0) );
			cvSet2D(kernel, 1, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 1, cvRealScalar(5) );
			cvSet2D(kernel, 1, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 0, cvRealScalar( 0) );
			cvSet2D(kernel, 2, 1, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 2, cvRealScalar( 0) );
		
			cvSmooth(src,dst,CV_MEDIAN,size,0,0,0);
			cvFilter2D(dst, dst,kernel,cvPoint(0,0));
			break;
			LOG4CXX_DEBUG(loggerCameraCorrection,"Median filter and Sharpening application");
		case (MEDIAN+HIGH_BOOST):
			w=9*A-1;
			cvSet2D(kernel, 0, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 0, 1, cvRealScalar( -1) ); 
			cvSet2D(kernel, 0, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 1, cvRealScalar(w) );
			cvSet2D(kernel, 1, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 1, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 2, cvRealScalar( -1) );

			cvSmooth(src,dst,CV_MEDIAN,size,0,0,0);
			cvFilter2D(dst, dst,kernel,cvPoint(0,0));
			LOG4CXX_DEBUG(loggerCameraCorrection,"Median filter and high boost application");
			break;
		case (MEDIAN+HIGH_BOOST+MEDIAN):
			w=9*A-1;
			cvSet2D(kernel, 0, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 0, 1, cvRealScalar( -1) ); 
			cvSet2D(kernel, 0, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 1, cvRealScalar(w) );
			cvSet2D(kernel, 1, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 1, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 2, cvRealScalar( -1) );

			cvSmooth(src,dst,CV_MEDIAN,size,0,0,0);
			cvFilter2D(dst, dst,kernel,cvPoint(0,0));
			cvSmooth(dst,dst,CV_MEDIAN,size-2,0,0,0);
			LOG4CXX_DEBUG(loggerCameraCorrection,"Median filter, high boost and median filter application again ");
			break;
		case(MEDIAN+SHARPENING+MEDIAN):
			cvSet2D(kernel, 0, 0, cvRealScalar( 0) );
			cvSet2D(kernel, 0, 1, cvRealScalar( -1) ); 
			cvSet2D(kernel, 0, 2, cvRealScalar( 0) );
			cvSet2D(kernel, 1, 0, cvRealScalar( -1) );
			cvSet2D(kernel, 1, 1, cvRealScalar(5) );
			cvSet2D(kernel, 1, 2, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 0, cvRealScalar( 0) );
			cvSet2D(kernel, 2, 1, cvRealScalar( -1) );
			cvSet2D(kernel, 2, 2, cvRealScalar( 0) );

			cvSmooth(src,dst,CV_MEDIAN,size,0,0,0);
			cvFilter2D(dst, dst,kernel,cvPoint(0,0));
			cvSmooth(dst,dst,CV_MEDIAN,size-2,0,0,0);
			LOG4CXX_DEBUG(loggerCameraCorrection,"Median filter, sharpening and median filter application again ");
			break;
		default:
			throw 1;
		}
	}catch(int e){
		LOG4CXX_ERROR(loggerCameraCorrection,"Exception: the inserted parameter 'type' in cameraCorrection() isn't allowed");
	}
}
コード例 #20
0
ファイル: cam.cpp プロジェクト: Mirmik/cam
int main(int argc, char* argv[])
{

    float kernel[5*5];

    for (int i =0;i<25;i++) kernel[i]=1;

        // получаем любую подключённую камеру
        CvCapture* capture = cvCreateCameraCapture(CV_CAP_ANY); //cvCaptureFromCAM( 0 );
        assert( capture );

        cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 640); 
        cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 480); 

        // узнаем ширину и высоту кадра
        double width = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
        double height = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
        printf("[i] %.0f x %.0f\n", width, height );

        cvNamedWindow("capture", CV_WINDOW_AUTOSIZE);

        printf("[i] press Enter for capture image and Esc for quit!\n\n");

        int counter=0;
        char filename[512];

       // cvSetMouseCallback("capture", callback, NULL);

        frame = cvQueryFrame( capture );
        framef = cvQueryFrame( capture );

        // создаём изображения

hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
h_plane = cvCreateImage( cvGetSize(frame), 8, 1 );
s_plane = cvCreateImage( cvGetSize(frame), 8, 1 );
v_plane = cvCreateImage( cvGetSize(frame), 8, 1 );
bmas = cvCreateImage( cvGetSize(frame), 8, 1 );
        while(true){
                // получаем кадр
                framef = cvQueryFrame( capture );

               //cvSmooth(framef, frame, CV_GAUSSIAN,5,5);
                // показываем
                char c = cvWaitKey(60);
                if (c == 27) { // нажата ESC
                        break;
                }

        /*for (int i = 0; i < frame->height; i++)
                for (int j = 0; j < frame->width; j++)
                {
                if (    //beetween(PIXEL(j,i,RED), 0, 100) &&
                        //beetween(PIXEL(j,i,GREEN), 50, 255) &&
                        //beetween(PIXEL(j,i,BLUE), 0, 100)
                                PIXEL(j,i,GREEN) 
                        - 
                                (PIXEL(j,i,RED) + PIXEL(j,i,BLUE)) / 2
                        >
                                10
                )
                SETCOLOR(j,i,0,0,0);
                }
        */

//  конвертируем в HSV 
cvCvtColor( frame, hsv, CV_BGR2HSV );
// разбиваем на каналы


char* ref;
        for (int i = 0; i < frame->height * frame->width; i++)
                {

ref = &mas[i];

                if (beetween(HSVPIXEL(i,HUE), (55/2), (180/2))
                        &&
                beetween(HSVPIXEL(i,VAL), 10, 255)
                &&
                beetween(HSVPIXEL(i,SAT), 50, 250)
                        )
                *ref = (*ref + 1) % 10;
                else *ref = 0;

                //if (
                    //mas[i+1] == 0 &&
                    //mas[i-1] == 0 &&
                    //mas[i+frame->width] == 0 &&
                    //mas[i-frame->width] == 0 &&
                    //)
                    //*ref = 0;

                if (*ref > 1) 
                    bmas->imageData[i] = 255;
                else
                    bmas->imageData[i] = 0;

                };



                CvMat kernel_matrix = cvMat(5,5,CV_32FC1,kernel);
                cvFilter2D(bmas,bmas,&kernel_matrix, cvPoint(-1,-1));


        //cvCvtColor( hsv, frame, CV_HSV2RGB );    
        cvShowImage("capture", bmas);
        
        cvSmooth(bmas, bmas, CV_GAUSSIAN,5,5);

        }
        // освобождаем ресурсы
        cvReleaseCapture( &capture );
        cvDestroyWindow("capture");
        return 0;
}
コード例 #21
0
ファイル: VarFlow.cpp プロジェクト: horsewin/ARMicroMachines
/**
   Calculates the optical flow between two images.

   @param[in] imgA   First input image
   @param[in] imgB   Second input image, the flow is calculated from imgA to imgB
   @param[out] imgU   Horizontal flow field
   @param[out] imgV   Vertical flow field
   @param[in] saved_data   Flag indicates previous imgB is now imgA (ie subsequent frames), save some time in calculation
   
   @return   Flag to indicate succesful completion

*/
int VarFlow::CalcFlow(IplImage* imgA, IplImage* imgB, IplImage* imgU, IplImage* imgV, bool saved_data = false){
    
    if(!initialized)
      return 0;
      
    IplImage* swap_img;
      
    //Don't recalculate imgAfloat, just swap with imgBfloat
    if(saved_data){
        
       CV_SWAP(imgAfloat, imgBfloat, swap_img);
       
       cvResize(imgB, imgBsmall, CV_INTER_LINEAR);
       cvConvert(imgBsmall, imgBfloat);  // Calculate new imgBfloat
       cvSmooth(imgBfloat, imgBfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
       
    }
    
    //Calculate imgAfloat as well as imgBfloat
    else{
    
        cvResize(imgA, imgAsmall, CV_INTER_LINEAR);
        cvResize(imgB, imgBsmall, CV_INTER_LINEAR);
    
        cvConvert(imgAsmall, imgAfloat);
        cvConvert(imgBsmall, imgBfloat);
    
        cvSmooth(imgAfloat, imgAfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
        cvSmooth(imgBfloat, imgBfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
        
    }
    
    cvFilter2D(imgAfloat, imgAfx, &fx_mask, cvPoint(-1,-1));  // X spacial derivative
    cvFilter2D(imgAfloat, imgAfy, &fy_mask, cvPoint(-1,-1));  // Y spacial derivative
    
    cvSub(imgBfloat, imgAfloat, imgAft, NULL);  // Temporal derivative
    
    cvMul(imgAfx,imgAfx,imgAfxfx_array[0], 1);
    cvMul(imgAfx,imgAfy,imgAfxfy_array[0], 1);
    cvMul(imgAfx,imgAft,imgAfxft_array[0], 1);
    cvMul(imgAfy,imgAfy,imgAfyfy_array[0], 1);
    cvMul(imgAfy,imgAft,imgAfyft_array[0], 1);
    
    cvSmooth(imgAfxfx_array[0], imgAfxfx_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfxfy_array[0], imgAfxfy_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfxft_array[0], imgAfxft_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfyfy_array[0], imgAfyfy_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfyft_array[0], imgAfyft_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    
    int i;
    
    //Fill all the levels of the multigrid algorithm with resized images
    for(i = 1; i < (max_level - start_level + 1); i++){
        
        cvResize(imgAfxfx_array[i-1], imgAfxfx_array[i], CV_INTER_LINEAR);
        cvResize(imgAfxfy_array[i-1], imgAfxfy_array[i], CV_INTER_LINEAR);
        cvResize(imgAfxft_array[i-1], imgAfxft_array[i], CV_INTER_LINEAR);
        cvResize(imgAfyfy_array[i-1], imgAfyfy_array[i], CV_INTER_LINEAR);
        cvResize(imgAfyft_array[i-1], imgAfyft_array[i], CV_INTER_LINEAR);
        
    }
    
    int k = (max_level - start_level);

    while(1)
		{    
        gauss_seidel_recursive(k, (max_level - start_level), k, pow((float)2.0,(float)(k)), imgAfxft_array, imgAfyft_array);
            
        if(k > 0){            
            // Transfer velocity from coarse to fine                           
            cvResize(imgU_array[k], imgU_array[k-1], CV_INTER_LINEAR);
            cvResize(imgV_array[k], imgV_array[k-1], CV_INTER_LINEAR);
            
            k--;            
        }else{
          break;
				}            
    }
    
    // Transfer to output image, resize if necessary
    cvResize(imgU_array[0], imgU, CV_INTER_LINEAR);
    cvResize(imgV_array[0], imgV, CV_INTER_LINEAR);
    
    // If started algorithm with smaller image than original, scale the flow field
    if(start_level > 0){
	
		cvScale(imgU, imgU, pow(2.0, start_level));
		cvScale(imgV, imgV, pow(2.0, start_level));
		
	}
    
    return 1;
}
コード例 #22
0
ファイル: Exercise6.1.c プロジェクト: CarlChenCC/examples
int main (int argc, const char * argv[])
{
    if ( argc != 2 )
    {
        fprintf(stderr, "Usage: <image>\n");
        exit(1);
    }

    IplImage* image = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE);

    if ( image == NULL )
    {
        fprintf(stderr, "Couldn't load image %s\n", argv[1]);
        exit(1);
    }

    IplImage* dst = cvCloneImage(image);
    //cvSetZero(dst);

    CvMat* rotation = cvCreateMat(2, 3, CV_32FC1);

#if 0
    // Optimized for a finding 3 pixel wide lines.
    float zeroDegreeLineData[] = {
        -10, -10, -10, -10, -10,
        3, 3, 3, 3, 3,
        14, 14, 14, 14, 14,
        3, 3, 3, 3, 3,
        -10, -10, -10, -10, -10
    };
#if 0
    float zeroDegreeLineData[] = {
        10, 10, 10, 10, 10,
        -3, -3, -3, -3, -3,
        -14, -14, -14, -14, -14,
        -3, -3, -3, -3, -3,
        10, 10, 10, 10, 10
    };
#endif

    CvMat zeroDegreeLine = cvMat(5, 5, CV_32FC1, zeroDegreeLineData);
    PrintMat("Zero Degree Line", &zeroDegreeLine);

    cv2DRotationMatrix(cvPoint2D32f(2,2), 60.0, 1.0, rotation);

    CvMat* kernel = cvCreateMat(5, 5, CV_32FC1);

#else
    // Optimized for finding 1 pixel wide lines. The sum of all co-efficients is 0, so this kernel has
    // the tendency to send pixels towards zero
#if 0
    float zeroDegreeLineData[] = {
        10, 10, 10,
        -20, -20, -20,
        10, 10, 10
    };
#elif 0
    float zeroDegreeLineData[] = {
        -10, -10, -10,
        20, 20, 20,
        -10, -10, -10
    };
#else
    // Line detector optimized to find a horizontal line 1 pixel wide that is darker (smaller value) than it’s surrounding pixels. This works because darker (smaller value) horizontal 1 pixel wide lines will have a smaller magnitude negative
    // component, which means their convoluted value will be higher than surrounding pixels. See Convolution.numbers
    // for a simple example how this works.
    float zeroDegreeLineData[] = {
        1, 1, 1,
        -2, -2, -2,
        1, 1, 1
    };
#endif

    CvMat zeroDegreeLine = cvMat(3, 3, CV_32FC1, zeroDegreeLineData);
    PrintMat("Zero Degree Line", &zeroDegreeLine);

    // Going to rotate the horizontal line detecting kernel by 60 degrees to that it will detect 60 degree lines.
    cv2DRotationMatrix(cvPoint2D32f(1,1), 60.0, 1.0, rotation);

    CvMat* kernel = cvCreateMat(3, 3, CV_32FC1);

#endif

    PrintMat("Rotation", rotation);

    cvWarpAffine(&zeroDegreeLine, kernel, rotation, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
    PrintMat("Kernel", kernel);

    cvFilter2D( image, dst, kernel, cvPoint(-1,-1));

    cvNamedWindow("main", CV_WINDOW_NORMAL);
    cvShowImage("main", image);
    cvWaitKey(0);

    cvShowImage("main", dst);
    cvWaitKey(0);

    return 0;
}
コード例 #23
0
/*
// Getting feature map for the selected subimage
//
// API
// int getFeatureMaps(const IplImage * image, const int k, featureMap **map);
// INPUT
// image             - selected subimage
// k                 - size of cells
// OUTPUT
// map               - feature map
// RESULT
// Error status
*/
int getFeatureMaps_dp(const IplImage* image,const int k, CvLSVMFeatureMap **map)
{
    int sizeX, sizeY;
    int p, px, strsz;
    int height, width, channels;
    int i, j, kk, c, ii, jj, d;
    float  * datadx, * datady;
    float tmp, x, y, tx, ty;
    IplImage * dx, * dy;
    int *nearest_x, *nearest_y;
    float *w, a_x, b_x;

    float kernel[3] = {-1.f, 0.f, 1.f}; 
    CvMat kernel_dx = cvMat(1, 3, CV_32F, kernel);
    CvMat kernel_dy = cvMat(3, 1, CV_32F, kernel);

    float * r;
    int    * alfa;
    
    float boundary_x[CNTPARTION+1];
    float boundary_y[CNTPARTION+1];
    float max, tmp_scal;
    int    maxi;

	height = image->height;
	width  = image->width ;

    channels  = image->nChannels;

	dx    = cvCreateImage(cvSize(image->width , image->height) , IPL_DEPTH_32F , 3);
    dy    = cvCreateImage(cvSize(image->width , image->height) , IPL_DEPTH_32F , 3);

    sizeX = width  / k;
    sizeY = height / k;
    px    = CNTPARTION  + 2 * CNTPARTION; // контрастное и не контрастное изображение
    p     = px;
    strsz = sizeX * p;
    allocFeatureMapObject(map, sizeX, sizeY, p,  px);

	cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));
	cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));
	
    for(i = 0; i <= CNTPARTION; i++)
    {
        boundary_x[i] = cosf((((float)i) * (((float)PI) / (float) (CNTPARTION))));
        boundary_y[i] = sinf((((float)i) * (((float)PI) / (float) (CNTPARTION))));
    }/*for(i = 0; i <= CNTPARTION; i++) */

    r    = (float *)malloc( sizeof(float) * (width * height));
    alfa = (int   *)malloc( sizeof(int  ) * (width * height * 2));

    for(j = 1; j < height-1; j++)
    {
        datadx = (float*)(dx->imageData + dx->widthStep *j);
        datady = (float*)(dy->imageData + dy->widthStep *j);
        for(i = 1; i < width-1; i++)
        {
			c = 0;
            x = (datadx[i*channels+c]);
            y = (datady[i*channels+c]);

            r[j * width + i] =sqrtf(x*x + y*y);
            for(kk = 1; kk < channels; kk++)
            {
                tx = (datadx[i*channels+kk]);
                ty = (datady[i*channels+kk]);
                tmp =sqrtf(tx*tx + ty*ty);
                if(tmp > r[j * width + i])
                {
                    r[j * width + i] = tmp;
                    c = kk;
                    x = tx;
                    y = ty;
                }
            }/*for(kk = 1; kk < channels; kk++)*/

            
            
            max  = boundary_x[0]*x + boundary_y[0]*y;
            maxi = 0;
            for (kk = 0; kk < CNTPARTION; kk++) {
                tmp_scal = boundary_x[kk]*x + boundary_y[kk]*y;
                if (tmp_scal> max) {
                    max = tmp_scal;
                    maxi = kk;
                }else if (-tmp_scal> max) {
                    max = -tmp_scal;
                    maxi = kk + CNTPARTION;
                }
            }
            alfa[j * width * 2 + i * 2    ] = maxi % CNTPARTION;
            alfa[j * width * 2 + i * 2 + 1] = maxi;  
        }/*for(i = 0; i < width; i++)*/
    }/*for(j = 0; j < height; j++)*/

    //подсчет весов и смещений
    nearest_x = (int *)malloc(sizeof(int) * k);
    nearest_y = (int *)malloc(sizeof(int) * k);
    w         = (float*)malloc(sizeof(float) * (k * 2));
    
    for(i = 0; i < k / 2; i++)
    {
        nearest_x[i] = -1;
        nearest_y[i] = -1;
    }/*for(i = 0; i < k / 2; i++)*/
    for(i = k / 2; i < k; i++)
    {
        nearest_x[i] = 1;
        nearest_y[i] = 1;
    }/*for(i = k / 2; i < k; i++)*/

    for(j = 0; j < k / 2; j++)
    {
        b_x = k / 2 + j + 0.5f;
        a_x = k / 2 - j - 0.5f;
        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x)); 
        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));  
    }/*for(j = 0; j < k / 2; j++)*/
    for(j = k / 2; j < k; j++)
    {
        a_x = j - k / 2 + 0.5f;
        b_x =-j + k / 2 - 0.5f + k;
        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x)); 
        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));  
    }/*for(j = k / 2; j < k; j++)*/


    //интерпол¤ци¤
    for(i = 0; i < sizeY; i++)
    {
        for(j = 0; j < sizeX; j++)
        {
            for(ii = 0; ii < k; ii++)
            {
                for(jj = 0; jj < k; jj++)
                {
					if ((i * k + ii > 0) && (i * k + ii < height - 1) && (j * k + jj > 0) && (j * k + jj < width - 1))
					{
						d    =  (k*i + ii)* width + (j*k + jj);
						(*map)->Map[(i                ) * strsz + (j                ) * (*map)->p + alfa[d * 2    ]             ] += 
							r[d] * w[ii * 2    ] * w[jj * 2    ];
						(*map)->Map[(i                ) * strsz + (j                ) * (*map)->p + alfa[d * 2 + 1] + CNTPARTION] += 
							r[d] * w[ii * 2    ] * w[jj * 2    ];
						if ((i + nearest_y[ii] >= 0) && (i + nearest_y[ii] <= sizeY - 1))
						{
							(*map)->Map[(i + nearest_y[ii]) * strsz + (j                ) * (*map)->p + alfa[d * 2    ]             ] += 
								r[d] * w[ii * 2 + 1] * w[jj * 2    ];
							(*map)->Map[(i + nearest_y[ii]) * strsz + (j                ) * (*map)->p + alfa[d * 2 + 1] + CNTPARTION] += 
								r[d] * w[ii * 2 + 1] * w[jj * 2    ];
						}
						if ((j + nearest_x[jj] >= 0) && (j + nearest_x[jj] <= sizeX - 1))
						{
							(*map)->Map[(i                ) * strsz + (j + nearest_x[jj]) * (*map)->p + alfa[d * 2    ]             ] += 
								r[d] * w[ii * 2    ] * w[jj * 2 + 1];
							(*map)->Map[(i                ) * strsz + (j + nearest_x[jj]) * (*map)->p + alfa[d * 2 + 1] + CNTPARTION] += 
								r[d] * w[ii * 2    ] * w[jj * 2 + 1];
						}
						if ((i + nearest_y[ii] >= 0) && (i + nearest_y[ii] <= sizeY - 1) && (j + nearest_x[jj] >= 0) && (j + nearest_x[jj] <= sizeX - 1))
						{
							(*map)->Map[(i + nearest_y[ii]) * strsz + (j + nearest_x[jj]) * (*map)->p + alfa[d * 2    ]             ] += 
								r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
							(*map)->Map[(i + nearest_y[ii]) * strsz + (j + nearest_x[jj]) * (*map)->p + alfa[d * 2 + 1] + CNTPARTION] += 
								r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
						}
					}
                }/*for(jj = 0; jj < k; jj++)*/
            }/*for(ii = 0; ii < k; ii++)*/
        }/*for(j = 1; j < sizeX - 1; j++)*/
    }/*for(i = 1; i < sizeY - 1; i++)*/
    
    cvReleaseImage(&dx);
    cvReleaseImage(&dy);


    free(w);
    free(nearest_x);
    free(nearest_y);

    free(r);
    free(alfa);

    return LATENT_SVM_OK;
}
コード例 #24
0
ファイル: Filters.cpp プロジェクト: plsang/visualrhythm
void Filters::highPass(VRFrame* frame, int typeMask)
{
    IplImage* imgDst = 0;
    IplImage* imgAux = 0;
    IplImage* imgNew = 0;

    VRFrame* frameAux = 0;

    int cols = 0;
    int rows = 0;
    CvMat *filter = 0;

    // mask type
    double masks[][9] = {
        // Type 1
        { 
            -1, -1, -1,
            -1,  8, -1,
            -1, -1, -1,
        },
        // Type 2
        {
            0, -1,  0,
            -1, 4, -1,
            0, -1,  0,
        },
        // Type 3
        {
            1 , -2,  1,
            -2,  4, -2,
            1 , -2,  1,
        },
    };

    // allocate memory for temporary image
    imgAux = VRFrame::imgAlloc(frame);
    imgDst = VRFrame::imgAlloc(frame);
    imgNew = VRFrame::imgAlloc(frame);

    frameAux = new VRFrame(frame);

    // type 0 is the default mask. set to this type if type not found
    if (typeMask > 3 || typeMask < 0)
        typeMask = 0;

    Log::writeLog("%s :: param: frame[%x] typeMask[%d]", __FUNCTION__, frame, typeMask);

    cols = 3;
    rows = 3;

    imgAux->imageData = frameAux->data->imageData;
    imgAux->widthStep = frameAux->data->width;

    imgDst->imageData = imgAux->imageData;
    imgDst->widthStep = imgAux->width;

    Log::writeLog("%s :: cvCreateMatHeader: rows[%d] cols[%d] CV_64FC1", __FUNCTION__, rows, cols);

    filter = cvCreateMatHeader(rows, cols, CV_64FC1);

    Log::writeLog("%s :: cvSetData: filter[%x] masks[%x] cols[%d]", __FUNCTION__, filter, masks[typeMask], cols*8);

    cvSetData(filter, masks[typeMask], cols*8);

    Log::writeLog("%s :: cvFilter2D: imgAux[%x] imgDst[%x] filter[%x] cvPoint[%x]", __FUNCTION__, imgAux, imgDst, filter, cvPoint(-1,-1));

    cvFilter2D(imgAux, imgDst, filter, cvPoint(-1,-1));

    //
    VRFrame::imgCopy(imgDst, imgNew);

    frame->setImage(imgNew);

    // release memory
    VRFrame::imgDealloc(imgAux);
    VRFrame::imgDealloc(imgDst);
    delete frameAux;

}
コード例 #25
0
/*每次从摄像头获得一张图片后调用,进行运动目标检测,并画框,
  当完成一个分组后返回true, 分组没结束,则返回false,
  该函数由UI来调用*/
PREPROCESS_API bool PreProcessFrame(Frame frame, Frame &lastFrame)
{
	Frame tempFrame; 
	tempFrame = prevFrame; 

	currImg = frame.image;  

	CvSize imgSize = cvSize(currImg->width,currImg->height);
	IplImage *grayImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//当前帧的灰度图
	IplImage *gxImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//当前帧的X方向梯度图
	IplImage *gyImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//当前帧的Y方向梯度图
	IplImage *diffImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//当前帧的差分图
	IplImage *diffImg_2 = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//前一帧差分图 
	IplImage *pyr = cvCreateImage(cvSize((imgSize.width&-2)/2,(imgSize.height&-2)/2),8,1); //进行腐蚀去除噪声的中间临时图片

	int height,width;//定义图像的高,宽,步长 

	char Kx[9] = {1,0,-1,2,0,-2,1,0,-1};//X方向掩模,用于得到X方向梯度图
	char Ky[9] = {1,2,1,0,0,0,-1,-2,-1};//Y方向掩模,用于得到Y方向梯度图
	CvMat KX,KY;
	KX = cvMat(3,3,CV_8S,Kx);//构建掩模内核 
	KY = cvMat(3,3,CV_8S,Ky);//构建掩模内核

	cvCvtColor(currImg,grayImg,CV_BGR2GRAY);
	cvSmooth(grayImg,grayImg,CV_GAUSSIAN,7,7);//进行平滑处理
	cvFilter2D(grayImg,gxImg,&KX,cvPoint(-1,-1));//得到X方向的梯度图
	cvFilter2D(grayImg,gyImg,&KY,cvPoint(-1,-1));//得到Y方向的梯度图
	cvAdd(gxImg,gyImg,grayImg,NULL);//得到梯度图

	cvReleaseImage(&gxImg);
	cvReleaseImage(&gyImg);

	height = grayImg->height;
	width = grayImg->width; 

	bool alarm = false;//警戒区域是否有运动目标   

	CvRect rect;//定义矩形框

	if(!firstFrmRec)//如果是第一帧
	{
		firstFrmRec = true; 
		lastGrayImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);
		lastDiffImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);
		cvCopy(grayImg,lastGrayImg,NULL);//如果是第一帧,设置为背景 

		xRightAlarm = currImg->width - 100;
		yBottomAlarm = currImg->height - 100;
		yTopAlarm = currImg->height - 200;
	}
	else
	{
		cvAbsDiff(grayImg,lastGrayImg,diffImg);//得到当前帧的差分图
		cvCopy(grayImg,lastGrayImg,NULL);//将当前帧的梯度图作为下一帧的背景
		cvThreshold(diffImg,diffImg,15,255,CV_THRESH_BINARY);//二值化当前差分图
		if(secondFrmRec)//如果大于等于第三帧
		{
			cvAnd(diffImg,lastDiffImg,diffImg_2);//进行“与”运算,得到前一帧灰度图的“准确”运动目标
			cvPyrDown(diffImg_2,pyr,7);//向下采样
			cvErode(pyr,pyr,0,1);//腐蚀,消除小的噪声
			cvPyrUp(pyr,diffImg_2,7); 
			cvReleaseImage(&pyr);

			if(drawAlarmArea)	cvRectangle(tempFrame.image, cvPoint(xLeftAlarm, yTopAlarm), cvPoint(xRightAlarm, yBottomAlarm), CV_RGB(0, 0, 255), 3, CV_AA, 0);
			alarm = AlarmArea(xLeftAlarm, yTopAlarm, xRightAlarm, yBottomAlarm, diffImg_2);  
		}

		cvCopy(diffImg,lastDiffImg,NULL);//备份当前差分图的二值化图

		cvReleaseImage(&diffImg); 

		minLeftX = 3000;
		minLeftY = 3000; 
		maxRightX = 0;
		maxRightY = 0;

		if (alarm)//若检测出整个图片有运动目标
		{
			FindRectX(diffImg_2, 0, height); 
			FindRectY(diffImg_2, minLeftX, maxRightX);
		}

		if (!secondFrmRec)//设置第二帧已经收到 
		{
			secondFrmRec = true; 
		}
	}

	if(maxRightX*maxRightY)//如果当前帧检测到运动目标,则,画框,分组
	{
		//防止右下角出界
		maxRightX = maxRightX>1 ? maxRightX:2; 
		maxRightY = maxRightY>1 ? maxRightY:2;
		maxRightX = maxRightX<(width+1) ? maxRightX:width;
		maxRightY = maxRightY<(height+1) ? maxRightY:height;

		//防止左上角出界
		minLeftX = minLeftX>0 ? minLeftX:1;
		minLeftY = minLeftY>0 ? minLeftY:1; 
		minLeftX = minLeftX<maxRightX ? minLeftX:(maxRightX-1);
		minLeftY = minLeftY<maxRightY ? minLeftY:(maxRightY-1);

		if (drawRect) cvRectangle(tempFrame.image, cvPoint(minLeftX, minLeftY), cvPoint(maxRightX, maxRightY), CV_RGB(255, 0, 0), 3, CV_AA, 0);
		
		//outobj<<minLeftX<<"	"<<minLeftY<<"	"<<maxRightX-minLeftX<<"	"<<maxRightY-minLeftY<<endl; 

		rect = cvRect(minLeftX, minLeftY, maxRightX-minLeftX, maxRightY-minLeftY); 
		tempFrame.searchRect = rect;

		signelCount++;  
		cvReleaseImage(&grayImg);
		cvReleaseImage(&diffImg_2);   
		prevFrame = frame;
		lastFrame = tempFrame; 

		if(signelCount == groupCount)//如果连续检测到5个单人的情况,分组结束 
		{
			signelCount = 0; 
			return true;
		}
		else
		{
			return false;
		}
		

		//if((minLeftY < 360) && ((maxRightX-minLeftX) < 420))//如果检测到框为单人大小
		//{
		//	signelCount++; 
		//	cvReleaseImage(&grayImg);
		//	cvReleaseImage(&diffImg_2);  
		//	prevFrame = frame;
		//	*lastFrame = tempFrame; 

		//	if(signelCount == groupCount)//如果连续检测到5个单人的情况,分组结束 
		//	{
		//		signelCount = 0;
		//		return true;
		//	}
		//	else
		//	{
		//		return false;
		//	}	
		//}
		//else //如果检测到多人情况,每张图片分为一组
		//{
		//	return false;
		//}
		

		if((minLeftY < 360) && ((maxRightX-minLeftX) < 420))//如果检测到框为单人大小
		{
			signelCount++; 
			cvReleaseImage(&grayImg);
			cvReleaseImage(&diffImg_2);  
			prevFrame = frame;
			lastFrame = tempFrame; 

			if(signelCount == groupCount)//如果连续检测到5个单人的情况,分组结束 
			{
				signelCount = 0;
				return true;
			}
			else
			{
				return false;
			}	
		}
		else //如果检测到多人情况,每张图片分为一组
		{
			signelCount = 0;
			cvReleaseImage(&grayImg);
			cvReleaseImage(&diffImg_2); 
			prevFrame = frame;
			lastFrame = tempFrame;
			return true;
		} 
	}
	else //当前帧没检测到
	{ 
		cvReleaseImage(&grayImg);
		cvReleaseImage(&diffImg_2);
		prevFrame = frame; 
		lastFrame = tempFrame;  

		if (signelCount > 0)//如果前一帧为单人,当前帧没有人
		{
			signelCount = 0; 
			return true;
		}
		else
		{
			return false;
		}
	}

}
コード例 #26
0
ファイル: convolutions.cpp プロジェクト: hone/school
int main( int argc, char * argv[] )
{
	const char * WINDOW_NAME = "Original Image vs. Box Filter vs. Gaussian";
	const int QUIT_KEY_CODE = 113;

	int box_filter_width = 3;
	float sigma = 1.0;
	std::string filename = "cameraman.tif";
	ImageRAII original_image( filename );
	CvSize image_dimensions = { original_image.image->width, original_image.image->height };
	ImageRAII box_filter_image( cvCreateImage( image_dimensions, original_image.image->depth, 3 ) );
	ImageRAII gaussian_image( cvCreateImage( image_dimensions, original_image.image->depth, 3 ) );
	ImageRAII combined_image( cvCreateImage( cvSize( original_image.image->width * 3, original_image.image->height ), original_image.image->depth, 3 ) );
	MatrixRAII box_filter = makeBoxFilter( box_filter_width );
	MatrixRAII gaussian_filter_x = make1DGaussianFilter( sigma );
	MatrixRAII gaussian_filter_y = cvCreateMat( sigma * 5, 1, CV_64FC1 );
	cvTranspose( gaussian_filter_x.matrix, gaussian_filter_y.matrix );
	std::vector<ImageRAII> original_image_channels( 3 );
	std::vector<ImageRAII> box_filter_channels( 3 );
	std::vector<ImageRAII> gaussian_filter_channels( 3 );
	std::vector<ImageRAII> gaussian_filter_2_channels( 3 );

	// initialize image channel vectors
	for( int i = 0; i < original_image.image->nChannels; i++ )
	{
		original_image_channels[i].image = cvCreateImage( image_dimensions, original_image.image->depth, 1 );
		box_filter_channels[i].image = cvCreateImage( image_dimensions, original_image.image->depth, 1 );
		gaussian_filter_channels[i].image = cvCreateImage( image_dimensions, original_image.image->depth, 1 );
		gaussian_filter_2_channels[i].image = cvCreateImage( image_dimensions, original_image.image->depth, 1 );
	}

	// split image channels
	cvSplit( original_image.image, original_image_channels[0].image, original_image_channels[1].image, original_image_channels[2].image, NULL );

	// apply filters
	for( int i = 0; i < original_image.image->nChannels; i++ )
	{
		cvFilter2D( original_image_channels[i].image, box_filter_channels[i].image, box_filter.matrix );
		cvFilter2D( original_image_channels[i].image, gaussian_filter_channels[i].image, gaussian_filter_x.matrix );
		cvFilter2D( gaussian_filter_channels[i].image, gaussian_filter_2_channels[i].image, gaussian_filter_y.matrix );
	}

	// Merge channels back
	cvMerge( box_filter_channels[0].image, box_filter_channels[1].image, box_filter_channels[2].image, NULL, box_filter_image.image );
	cvMerge( gaussian_filter_2_channels[0].image, gaussian_filter_2_channels[1].image, gaussian_filter_2_channels[2].image, NULL, gaussian_image.image );

	// Combine images side by side
	int step = original_image.image->widthStep;
	int step_destination = combined_image.image->widthStep;
	int nChan = original_image.image->nChannels;
	char *buf = combined_image.image->imageData;
	char *original_buf = original_image.image->imageData;
	char *box_filter_buf = box_filter_image.image->imageData;
	char *gaussian_filter_buf = gaussian_image.image->imageData;

	for( int row = 0; row < original_image.image->width; row++ )
	{
		for( int col = 0; col < original_image.image->height; col++ )
		{
			int width_adjust = 0;

			//original image
			// blue
			*( buf + row * step_destination + nChan * col + width_adjust ) = *( original_buf + row * step + nChan * col );
			// green
			*( buf + row * step_destination + nChan * col + 1 + width_adjust ) = *( original_buf + row * step + nChan * col );
			// red
			*( buf + row * step_destination + nChan * col + 2 + width_adjust ) = *( original_buf + row * step + nChan * col );

			// box filter
			width_adjust = original_image.image->height * nChan;
			*( buf + row * step_destination + nChan * col + width_adjust ) = *( box_filter_buf + row * step + nChan * col );
			*( buf + row * step_destination + nChan * col + 1 + width_adjust ) = *( box_filter_buf + row * step + nChan * col );
			*( buf + row * step_destination + nChan * col + 2 + width_adjust ) = *( box_filter_buf + row * step + nChan * col );

			// gaussian filter
			width_adjust = original_image.image->height * 2 * nChan;
			*( buf + row * step_destination + nChan * col + width_adjust ) = *( gaussian_filter_buf + row * step + nChan * col );
			*( buf + row * step_destination + nChan * col + 1 + width_adjust ) = *( gaussian_filter_buf + row * step + nChan * col );
			*( buf + row * step_destination + nChan * col + 2 + width_adjust ) = *( gaussian_filter_buf + row * step + nChan * col );
		}
	}

	// create windows
	cvNamedWindow( WINDOW_NAME, CV_WINDOW_AUTOSIZE );
	cvShowImage( WINDOW_NAME, combined_image.image );

	// wait for keyboard input
	int key_code = 0;
	while( key_code != QUIT_KEY_CODE )
	{
		key_code = cvWaitKey( 0 );
	}

	return 0;
}