Esempio n. 1
0
Mat Slic::calculateGradientImage(){
  // Set up the kernels
  Mat kernelH, kernelV;
  kernelH = Mat::zeros( 3, 3, CV_32F );
  kernelV = Mat::zeros( 3, 3, CV_32F );
  kernelH.at<float>(Point(1,0)) = -1;
  kernelH.at<float>(Point(1,2)) = 1;
  kernelV.at<float>(Point(0,1)) = -1;
  kernelV.at<float>(Point(2,1)) = 1;
  // (-1,-1) anchor is centre, -1 ddepth is same as source
  Point anchor = Point(-1, -1);
  int ddepth = -1;
  double delta = 0;
  // Perform the filters
  Mat dy = this->greyImage.clone();
  Mat dx = this->greyImage.clone();
  filter2D(this->greyImage, dy, ddepth , kernelV, anchor, delta, BORDER_DEFAULT );
  filter2D(this->greyImage, dx, ddepth , kernelH, anchor, delta, BORDER_DEFAULT );
  // Create the magnitude image
  Mat mag = this->greyImage.clone();

  dy.convertTo(dy, CV_32F);
  dx.convertTo(dx, CV_32F);
  mag.convertTo(mag, CV_32F);

  magnitude(dy, dx, mag);
  double min, max;
  minMaxLoc(mag, &min, &max);
  mag = 255*(mag/max);

  return mag;
}
void performTonalDistribution(Image I, Image M, Mat &B, Mat &D){
    
   //I.mat in 32F
    B = I.bf(I);
    D = Mat::zeros(B.rows, B.cols, CV_32F);
    for(int i = 0; i < I.mat.rows; i++) for(int j = 0; j < I.mat.cols; j++){
        D.at<float>(i, j) = I.mat.at<float>(i, j) - B.at<float>(i, j);
    }
   
    //create both dx and dy images for D and I
    Mat Ddx, Ddy;
    
    Mat Idx, Idy;
    
    Mat dx = Mat::Mat(1, 2, CV_32F);
    dx.at<float>(0,0) = -1; dx.at<float>(0,1) = 1;
    
    Mat dy = Mat::Mat(2, 1, CV_32F);
    dy.at<float>(0,0) = -1; dy.at<float>(1,0) = 1;
    
    filter2D(D, Ddx, D.depth(), dx);
    filter2D(D, Ddy, D.depth(), dy);
    
    filter2D(I.mat, Idx, I.mat.depth(), dx);
    filter2D(I.mat, Idy, I.mat.depth(), dy);
    
    //update gradient fields using proper requirements
    for(int i=0; i < D.rows; i++) for(int j = 0; j < D.cols; j++){
        //Doing Dx
        if(sign(Ddx.at<float>(i, j)) != sign(Idx.at<float>(i, j)))
            Ddx.at<float>(i, j) = 0;
        else if(abs(Ddx.at<float>(i, j)) > abs(Idx.at<float>(i, j)))
            Ddx.at<float>(i, j) = Idx.at<float>(i, j);
        else{}
        
        //Doing Dy
        if(sign(Ddy.at<float>(i, j)) != sign(Idy.at<float>(i, j)))
            Ddy.at<float>(i, j) = 0;
        else if(abs(Ddy.at<float>(i, j)) > abs(Idy.at<float>(i, j)))
            Ddy.at<float>(i, j) = Idy.at<float>(i, j);
        else{}
    }
    
    //do Poisson Reconstruction to get ret of fixed B and D
    D = poissonReconstruction(D, Ddx,Ddy);
    //Get it back to logarithm values 0 to 1
    double minVal, maxVal;
    minMaxLoc(D, &minVal, &maxVal);
    Mat draw;
    D.convertTo(draw, CV_8U, 255/(maxVal - minVal), -minVal * 255 / (maxVal - minVal));
    draw.convertTo(D, CV_32F);
    D /= 255;
    for(int i = 0; i < I.mat.rows; i++) for(int j = 0; j < I.mat.cols; j++){
        B.at<float>(i, j) = abs(I.mat.at<float>(i, j) - D.at<float>(i, j));
    }
   
    //Tonal Balance
    Mat Bm = M.bf(M);
    histogramMatching(B.clone(), M.mat, B);
}
Esempio n. 3
0
void find_orientations( vector<feat_val>& feat_vec, Mat& src_x_kern, Mat src_y_kern )
{
   Mat feat_Ix, feat_Iy;
   float Ix_sum = 0.0;
   float Iy_sum = 0.0;
   float mag_Ix_Iy;
   float angle;
   float tmp_x, tmp_y;
   float new_x, new_y;
   int equiv_row, equiv_col;
   int size_diff = FEATURE_SIZE * sqrt( 2.0 ) - FEATURE_SIZE;
   
   for(unsigned int i = 0; i < feat_vec.size(); i++ )
   {
      feat_vec.at( i ).feature.create( FEATURE_SIZE, FEATURE_SIZE, CV_32F );
      
      filter2D( feat_vec.at( i ).region_patch, feat_Ix, -1, src_x_kern );
      filter2D( feat_vec.at( i ).region_patch, feat_Iy, -1, src_y_kern );
   
      Ix_sum = sum( feat_Ix )[ 0 ];
      Iy_sum = sum( feat_Iy )[ 0 ];

      //Average derivative for each directions
      feat_vec.at( i ).major_orientation_x = Ix_sum / FEATURE_SIZE_SQD;
      feat_vec.at( i ).major_orientation_y = Iy_sum / FEATURE_SIZE_SQD;

      //Normalize
      mag_Ix_Iy = sqrt( feat_vec.at( i ).major_orientation_x * feat_vec.at( i ).major_orientation_x + 
         feat_vec.at( i ).major_orientation_y * feat_vec.at( i ).major_orientation_y);
      
      feat_vec.at( i ).major_orientation_x /= mag_Ix_Iy;
      feat_vec.at( i ).major_orientation_y /= mag_Ix_Iy;
      
      angle = atan2( feat_vec.at( i ).major_orientation_y , feat_vec.at( i ).major_orientation_x );
      
      feat_vec.at( i ).orientation_angle = angle;
      
      for( int curr_row = 0; curr_row < FEATURE_SIZE; curr_row++ )
      {
         tmp_y = curr_row - FEATURE_SIZE_DIV_2;
         
         for( int curr_col = 0; curr_col < FEATURE_SIZE; curr_col++ )
         {
            tmp_x = curr_col -  FEATURE_SIZE_DIV_2;
            
            new_x = tmp_x * cos( angle ) - tmp_y * sin( angle );
            new_y = tmp_x * sin( angle ) + tmp_y * cos( angle );
            
            equiv_col = new_x + FEATURE_SIZE_DIV_2;// + size_diff;
            equiv_row = new_y + FEATURE_SIZE_DIV_2;// + size_diff;
            
            feat_vec.at(i).feature.at<float>( curr_row, curr_col ) = feat_vec.at(i).region_patch.at<float>( equiv_row, equiv_col);
         }
      }
      
      //Normalize 
      feat_vec.at( i ).feature /= sum( feat_vec.at( i ).feature)[ 0 ];
   }
}
Esempio n. 4
0
//  Funkce pro filtraci obrazu Gaussovým filtrem.
//     - velikost filtru v pixelech
//	   - sigma je rozptyl pro výpočet hodnot Gaussovy funkce
void separabilityGauss( const cv::Mat& src, int velikost, double sigma, cv::Mat& sepDst, cv::Mat& noSepDst, int &noSepCnt, int &sepCnt )
{
	// velikost - musí být liché číslo, minimálně 3
	int stred = velikost/2;
	stred = MAX(1,stred);
	velikost = 2*stred+1;

	// připravte Gaussův filtr v 1D 
	cv::Mat gauss1D = cv::Mat::zeros( 1, velikost, CV_64FC1 );


	// zde implementujte výpočet koeficientù Gaussova filtru - ručně, dosazením do Gaussovy funkce
	/* *** ZAČÁTEK VLASTNÍ IMPLEMENTACE 2 *** */
	double b = 1/(2*sigma*sigma);
	for( int i = 0; i < velikost; ++i ) {
		double hodnota = exp( -b * (i-stred)*(i-stred) );
		//hodnota = a*exp( -b* (i-center)*(i-center) );
		gauss1D.at<double>(i) = hodnota;
	}
	/* *** KONEC VLASTNÍ IMPLEMENTACE 2 *** */


	// normalizace hodnot
	gauss1D = gauss1D / sum(gauss1D).val[0];

	// připravíme Gaussův filtr ve 2D 
	// využijeme konvoluce 1D Gauss. jádra ve směru x a y s jednotkovým impulsem
	// nastudujte parametry funkce filter2D - http://docs.opencv.org/modules/imgproc/doc/filtering.html#filter2d
	cv::Mat gauss2D = cv::Mat::zeros( velikost, velikost, CV_64FC1 );
	gauss2D.at<double>(stred,stred) = 1.;
	filter2D( gauss2D, gauss2D, -1, gauss1D );
	filter2D( gauss2D, gauss2D, -1, gauss1D.t() );
	gauss2D = gauss2D / sum(gauss2D).val[0];

	// rozmazání obrazu s využitím separability operátoru - využít 1D filtr
	/* *** ZAČÁTEK VLASTNÍ IMPLEMENTACE 3 *** */
	filter2D( src, sepDst, -1, gauss1D );
	filter2D( sepDst, sepDst, -1, gauss1D.t() );
	/* *** KONEC VLASTNÍ IMPLEMENTACE 3 *** */


	// rozmazání obrazu bez využití separability operátoru - využít 2D filtr
	/* *** ZAČÁTEK VLASTNÍ IMPLEMENTACE 4 *** */
	filter2D( src, noSepDst, -1, gauss2D );
	/* *** KONEC VLASTNÍ IMPLEMENTACE 4 *** */

	// ručně spočtěte a nechte vypsat na výstup - počet operací pro verzi s/bez využití separability 
	// stačí zjednodušený výpočet - počet operací násobení - např. src.rows*src.cols, apod.

	/* *** ZAČÁTEK VLASTNÍ IMPLEMENTACE 5 *** */
	sepCnt   = src.rows*src.cols*(velikost+velikost);	
	noSepCnt = src.rows*src.cols*velikost*velikost;	
	/* *** KONEC VLASTNÍ IMPLEMENTACE 5 *** */
	
	return;
}
bool IPLGradientOperator::roberts(IPLImage* image)
{
    static float rxf[2][2] = {{1.0,0},{0,-1.0}};
    static cv::Mat rxKernel(2,2,CV_32FC1,rxf);
    static float ryf[2][2] = {{0,1.0},{-1.0,0}};
    static cv::Mat ryKernel(2,2,CV_32FC1,ryf);

    int width = image->width();
    int height = image->height();

    // fast gradient
    int progress = 0;
    int maxProgress = height*width;

    notifyProgressEventHandler(-1);

    cv::Mat input;
    cv::Mat gX;
    cv::Mat gY;
    cvtColor(image->toCvMat(),input,CV_BGR2GRAY);

    filter2D(input,gX,CV_32F,rxKernel);
    filter2D(input,gY,CV_32F,ryKernel);

    for(int x=1; x<width; x++)
    {
        // progress
        notifyProgressEventHandler(100*progress++/maxProgress);
        for(int y=1; y<height; y++)
        {
          ipl_basetype gx = gX.at<cv::Vec<float,1>>(y,x).val[0] * FACTOR_TO_FLOAT ;
          ipl_basetype gy = gY.at<cv::Vec<float,1>>(y,x).val[0] * FACTOR_TO_FLOAT ;

          double phase = (gx!=0.0 || gy!=0.0 )? atan2( -gy, gx ) : 0.0;

          while( phase > 2.0 * PI ) phase -= 2.0 * PI;
          while( phase < 0.0 ) phase += 2.0 * PI;

          // phase 0.0-1.0
          phase /= 2 * PI;

          _result->phase(x,y) = phase;
          _result->magnitude(x,y) = sqrt(gx*gx + gy*gy);
        }
    }

   return true;
}
Esempio n. 6
0
void matlabHelper::applylut_1(Mat &src,Mat &dst)
{
    static int lut_endpoints[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,1,1,1,0,1,1,1,1,0};
    
    Mat k(3,3,CV_16UC1);
    
    k.at<unsigned short>(0,0)=256;
    k.at<unsigned short>(1,0)=128;
    k.at<unsigned short>(2,0)=64;
    k.at<unsigned short>(0,1)=32;
    k.at<unsigned short>(1,1)=16;
    k.at<unsigned short>(2,1)=8;
    k.at<unsigned short>(0,2)=4;
    k.at<unsigned short>(1,2)=2;
    k.at<unsigned short>(2,2)=1;
    
    dst=src.clone();
    
    filter2D(dst,dst,CV_16UC1,k);
    for(int i=0;i<dst.rows;i++)
    {
        for (int j=1;j<dst.cols;j++)
        {
            dst.at<unsigned short>(i,j)=lut_endpoints[dst.at<unsigned short>(i,j)];
        }
    }
    
    dst.convertTo(dst,CV_8UC1);
}
Esempio n. 7
0
void matlabHelper::applylut_8(Mat &src,Mat &dst,Mat& lut)
{
    Mat k(3,3,CV_16UC1);
    
    k.at<unsigned short>(0,0)=256;
    k.at<unsigned short>(1,0)=128;
    k.at<unsigned short>(2,0)=64;
    k.at<unsigned short>(0,1)=32;
    k.at<unsigned short>(1,1)=16;
    k.at<unsigned short>(2,1)=8;
    k.at<unsigned short>(0,2)=4;
    k.at<unsigned short>(1,2)=2;
    k.at<unsigned short>(2,2)=1;
    
    dst=src.clone();
    
    for(int I=7;I>=0;I--)
    {
        filter2D(dst,dst,CV_16UC1,k);
        for(int i=0;i<dst.rows;i++)
        {
            for (int j=1;j<dst.cols;j++)
            {
                dst.at<unsigned short>(i,j)=lut.at<unsigned short>(I,dst.at<unsigned short>(i,j));
            }
        }
    }
    dst.convertTo(dst,CV_8UC1);
    
}
void getAllUnitShifts(const Mat& rgbImg, vector<Mat> &shifts)
    {
    vector<int> indexes(9);
    iota(indexes.begin(), indexes.end(), 0); // (0..8)
    indexes.erase(indexes.begin() + 4); // (0..3, 5..8)

    vector<CvPoint> points;
    transform(indexes.begin(), indexes.end(), back_inserter(points), [](int i) { return cvPoint(i/3, i%3); });
    // (0, 0) -> [1,0,0; 0,0,0; 0,0,0]
    // (0, 1) -> [0,0,0; 1,0,0; 0,0,0]
    // (0, 2) -> [0,0,0; 0,0,0; 1,0,0]
    // (1, 0) -> [0,1,0; 0,0,0; 0,0,0]
    // (1, 1) -> [0,0,0; 0,1,0; 0,0,0] <-- erased (identical shift)
    // (1, 2) -> [0,0,0; 0,0,0; 0,1,0]
    // (2, 0) -> [0,0,1; 0,0,0; 0,0,0]
    // (2, 1) -> [0,0,0; 0,0,1; 0,0,0]
    // (2, 2) -> [0,0,0; 0,0,0; 0,0,1]

    transform(points.begin(), points.end(), back_inserter(shifts),
        [rgbImg] (const CvPoint &delta)
            {
            Mat kernel(3, 3, rgbImg.depth(), Scalar().all(0));
            kernel.at<float>(1, 1) = 1.0f;
            Mat shift = Mat(rgbImg.rows, rgbImg.cols, rgbImg.type(), Scalar().all(0));
            filter2D(rgbImg, shift, -1 , kernel, delta, 0, BORDER_CONSTANT);
            return shift;
            });
    }
Esempio n. 9
0
void LabelOCR::preProcess(const Mat &InputImage, Mat &binImage)
{
	IplImage* iplInputImage = new IplImage(InputImage);
	IplImage* newInputImage = NULL;
	ImageProcess pro;
	int resize_ret = pro.ImageResize(iplInputImage, 2.5, newInputImage);
	if (resize_ret < 0) {
		return;
	}

	
    Mat midImage, midImage2, dst;
    Mat Morph = getStructuringElement(MORPH_CROSS,Size( 1, 1 ) );
    Mat HPKernel = (Mat_<float>(5,5) << -1.0,  -1.0, -1.0, -1.0,  -1.0,
                                        -1.0,  -1.0, -1.0, -1.0,  -1.0,
                                        -1.0,  -1.0, 25, -1.0,  -1.0,
                                        -1.0,  -1.0, -1.0, -1.0,  -1.0,
                                        -1.0,  -1.0, -1.0, -1.0,  -1.0);
    medianBlur( cv::Mat(newInputImage), dst, 1);
	filter2D(dst, midImage2, InputImage.depth(), HPKernel);
	cvtColor(midImage2, binImage, COLOR_RGB2GRAY);
	/*IplImage* temIplImg = new IplImage(midImage);
	CvSize sz;
	sz.width = temIplImg->width;
	sz.height = temIplImg->height;
	IplImage* temIplImg1 = cvCreateImage(sz, temIplImg->depth, temIplImg->nChannels);
    cvThreshold(temIplImg, temIplImg1, 60, 255, CV_THRESH_BINARY);
	binImage = temIplImg1;*/
    //threshold(binImage, binImage ,0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
    //erode(binImage, binImage, 3, Point(-1, -1), 2, 1, 1);
    //morphologyEx( binImage,binImage,MORPH_CLOSE, Morph);
}
Esempio n. 10
0
void my_laplace(cv::Mat& srcImg, cv::Mat& dstImg)
{
    Mat kernel(3,3,CV_32F,Scalar(-1));
    kernel.at<float>(1,1) = 8.9;
    filter2D(srcImg,dstImg,srcImg.depth(),kernel);
    //cvtColor(dstImg, dstImg, CV_RGB2BGR);
}
Esempio n. 11
0
File: Dip4.cpp Progetto: kziel1/dip
/*
degraded :  degraded input image
filter   :  filter which caused degradation
snr      :  signal to noise ratio of the input image
return   :   restorated output image
*/
Mat Dip4::wienerFilter(Mat& degraded, Mat& filter, double snr){

  // be sure not to touch them
  degraded = degraded.clone();
  filter = filter.clone();
   
  // Q_k = conjugate_transpose(P_k) / | P_k | ^2  + 1/SNR^2

  Mat filterFreq = Mat(filter.size(), CV_32F);
  
  
  Mat planesFilter[] = {filterFreq, Mat::zeros(filterFreq.size(), CV_32F)};
  
  merge(planesFilter, 2, filterFreq);
  
  dft(filterFreq, filterFreq, DFT_COMPLEX_OUTPUT); // filterFreq == P


  // create Q

  split(filterFreq, planesFilter);

  Mat Re = planesFilter[0];
  Mat Im = planesFilter[1];

  Mat QRe = Re.clone();
  Mat QIm = Im.clone();

  for (int x = 0; x < filterFreq.rows; x++) for (int y = 0; y < filterFreq.cols; y++) {

    // A*_ij = Ã_ji
    float reConjugateTranspose = Re.at<float>(y, x);
    float imConjugateTranspose = -Im.at<float>(y, x);

    float resq = Re.at<float>(x, y) * Re.at<float>(x, y);
    float imsq = Im.at<float>(x, y) * Im.at<float>(x, y);
    float absreim = sqrt(resq + imsq);

    QRe.at<float>(x, y) = reConjugateTranspose / (absreim * absreim + 1/(snr * snr));
    QIm.at<float>(x, y) = imConjugateTranspose / (absreim * absreim + 1/(snr * snr));

  }
  
  Mat Q = Mat::zeros(filterFreq.size(), CV_32F);

  Mat qplanes[] = {QRe, QIm};
  
  merge(qplanes, 2, Q);

  Mat original;

  dft(Q, Q, DFT_INVERSE + DFT_SCALE);
  split(Q, planes);
  filter2D(degraded, original, -1, planes[0]);
  normalize(original, original, 0, 255, CV_MINMAX);
  original.convertTo(original, CV_8UC1);

  return original;

}
IntensityImage * DefaultPreProcessing::stepEdgeDetection(const IntensityImage &src) const {
	// Maak een basetimer aan. De basetimer wordt gebruikt om de tijd bij te houden
	// die de implementatie gebruikt.
	BaseTimer basetimer;
	// Start de basetimer.
	basetimer.start();
	cv::Mat OverHillOverDale;
	HereBeDragons::HerLoveForWhoseDearLoveIRiseAndFall(src, OverHillOverDale);
	//cv::medianBlur(*image, *image, 3);
	//cv::GaussianBlur(*image, *image, cv::Size(3, 3), 0, 0, cv::BORDER_DEFAULT);
	cv::Mat ThoroughBushThoroughBrier = (cv::Mat_<float>(9, 9) << 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, -4, -4, -4, 1, 1, 1, 1, 1, 1, -4, -4, -4, 1, 1, 1, 1, 1, 1, -4, -4, -4, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0);
	cv::Mat OverParkOverPale;
	filter2D(OverHillOverDale, OverParkOverPale, CV_8U, ThoroughBushThoroughBrier, cv::Point(-1, -1), 0, cv::BORDER_DEFAULT);
	IntensityImage * ThoroughFloodThoroughFire = ImageFactory::newIntensityImage();
	HereBeDragons::NoWantOfConscienceHoldItThatICall(OverParkOverPale, *ThoroughFloodThoroughFire);
	// Stop de timer
	basetimer.stop();
	// Schrijf de tijd dat nodig is geweest naar een output file.
	std::ofstream myfile;
	myfile.open("tijdedge.txt", std::ofstream::ate);
	myfile << "EdgeDetectionDefault convert tijd in s: " << basetimer.elapsedSeconds() << " tijd ms:"
		<< basetimer.elapsedMilliSeconds() << " tijd us" << basetimer.elapsedMicroSeconds();
	myfile.close();
	// return de gemaakte afbeelding.
	return ThoroughFloodThoroughFire;
}
Esempio n. 13
0
KDvoid Filter2D	( KDint nIdx )
{
	Mat		tSrc;
	Mat		tDst;

	// Load the source image
	tSrc = imread ( "/res/image/chicky_512.png", 1 ); 

	Mat			tKernel;
	Point		tAnchor; 
	KDint		nSize; 
	KDint       nIndex;
	KDint		nDepth;
	KDdouble	dDelta;
	
	nIndex  = 3;
	tAnchor = Point( -1, -1 );
	dDelta  = 0;
	nDepth  = -1;
	nSize   = 3 + 2 * ( nIndex % 5 );
       
    tKernel = Mat::ones ( nSize, nSize, CV_32F ) / (KDfloat) ( nSize * nSize );

	filter2D ( tSrc, tDst, nDepth , tKernel, tAnchor, dDelta, BORDER_DEFAULT );

	g_pController->setFrame ( 1, tSrc );
	g_pController->setFrame ( 2, tDst );
}
Esempio n. 14
0
void gabor_filter(cv::Mat& img,vector<cv::Mat> &featureMaps)
{
	//cv::Mat img input character image
	const int kernel_size = 7; // should be odd
	// variables for gabor filter
	double Kmax = PI/2;
	double f = sqrt(2.0);
	double sigma = 2*PI;
	int U = 0;
	int V = 0;
	int GaborH = kernel_size;
	int GaborW = kernel_size;
	int UStart = 0, UEnd = 8;
	int VStart = 1, VEnd = 2;
	// variables for filter2D
	cv::Point archor(-1,-1);
	int ddepth = CV_32F;//CV_64F
	//double delta = 0;
	//eight orientation in terms of one frequnecy
	for(V = VStart; V < VEnd; V++){
		for(U = UStart; U < UEnd; U++){
			cv::Mat kernel_re, kernel_im;
			cv::Mat dst_re, dst_im, dst_mag;
			kernel_re = getMyGabor(GaborW, GaborH, U, V,
				Kmax, f, sigma, CV_32F, "real");
			kernel_im = getMyGabor(GaborW, GaborH, U, V,
				Kmax, f, sigma, CV_32F, "imag");
			// flip kernel
			// Gabor kernel is symmetric, so do not need flip
			filter2D(img, dst_re, ddepth, kernel_re);
			filter2D(img, dst_im, ddepth, kernel_im);

			dst_mag.create(img.rows, img.cols, CV_32FC1);
			magnitude(cv::Mat(dst_re),cv::Mat(dst_im),
				dst_mag);
			//normalize gabor kernel
			cv::normalize(dst_mag, dst_mag, 0, 255, CV_MINMAX);
			dst_mag.convertTo(dst_mag,CV_8U);		
			featureMaps.push_back(dst_mag);
			kernel_re.release();
			kernel_im.release();
			dst_re.release();
			dst_im.release();
			dst_mag.release();
		}
	}
}
Mat THDUtil::sharpen(Mat img)
{
	Mat kern = (Mat_<char>(3, 3) << 0, -1, 0,
		-1, 5, -1,
		0, -1, 0);
	filter2D(img, img, -1, kern);
	return img;
}
Esempio n. 16
0
Mat CustomSIFT::computeGradient(const Mat &img)
{
    Mat h = (Mat_<double>(1,3) << -1, 0, 1);
    Mat v = (Mat_<double>(3,1) << -1, 0, 1);
    
    Mat h_grad;
    filter2D(img,h_grad,CV_32F,h);
    
    Mat v_grad;
    filter2D(img,v_grad,CV_32F,v);
//    h_grad=cv::abs(h_grad);
//    v_grad=cv::abs(v_grad);
    
    Mat chan[2] = {h_grad, v_grad};
    Mat ret;
    merge(chan,2,ret);
    return ret;
}
Esempio n. 17
0
Mat GaborFR::getFilterImagPart(Mat& src,Mat& imag)
{
    //CV_Assert(imag.type()==src.type());
    Mat dst;
    Mat kernel;
    flip(imag,kernel,-1);//中心镜面
    //	filter2D(src,dst,CV_32F,kernel,Point(-1,-1),0,BORDER_CONSTANT);
    filter2D(src,dst,CV_32F,kernel,Point(-1,-1),0,BORDER_REPLICATE);
    return dst;
}
Esempio n. 18
0
/*********************************************************
**  Dynamic Features
*
*
**********************************************************/
Mat dynamicFeatures(Mat image, vector<Mat> &imageGM, vector<Mat> &imageGb){
    Mat     dynamic;
    uint          i;
    Mat filGM,filGb;
    Mat       GM,Gb;

    filter2D(image, filGM, CV_32FC3, krn7GM);
    filter2D(image, filGb, CV_32FC3, krn7Gb);

    if(imageGM.size()>=7){
        imageGM.erase( imageGM.begin() );
        imageGb.erase( imageGb.begin() );
    }

    imageGM.push_back(filGM);
    imageGb.push_back(filGb);

    GM = RM[imageGM.size()-1]*imageGM[0];
    for(i=1;i<imageGM.size();++i)
        GM += RM[imageGM.size()-1-i]*imageGM[i];

    Gb = Rb[imageGb.size()-1]*imageGb[0];
    for(i=1;i<imageGb.size();++i)
        Gb += Rb[imageGb.size()-1-i]*imageGb[i];

    /*GM = RM[0]*imageGM[0] + RM[1]*imageGM[1] + RM[2]*imageGM[2];
    GM+= RM[3]*imageGM[3] + RM[4]*imageGM[4] + RM[5]*imageGM[5];

    Gb = Rb[0]*imageGb[0] + Rb[1]*imageGb[1] + Rb[2]*imageGb[2];
    Gb+= Rb[3]*imageGb[3] + Rb[4]*imageGb[4] + Rb[5]*imageGb[5];*/

    dynamic = GM;
    // dynamic = GM + Gb;

    double  minVal, maxVal;
    Mat mapD;
    minMaxLoc(dynamic,&minVal,&maxVal);
    mapD = (dynamic-minVal)*255/(maxVal-minVal);

    mapD.convertTo(mapD, CV_8UC3);

    return mapD;
}
Esempio n. 19
0
void Laplacian( const Mat& src, Mat& dst, int ddepth, int ksize,
                double scale, double delta, int borderType )
{
    if( ksize == 1 || ksize == 3 )
    {
        float K[2][9] =
        {{0, 1, 0, 1, -4, 1, 0, 1, 0},
         {2, 0, 2, 0, -8, 0, 2, 0, 2}};
        Mat kernel(3, 3, CV_32F, K[ksize == 3]);
        if( scale != 1 )
            kernel *= scale;
        filter2D( src, dst, ddepth, kernel, Point(-1,-1), delta, borderType );
    }
    else
    {
        const size_t STRIPE_SIZE = 1 << 14;

        int depth = src.depth();
        int ktype = std::max(CV_32F, std::max(ddepth, depth));
        int wdepth = depth == CV_8U && ksize <= 5 ? CV_16S : depth <= CV_32F ? CV_32F : CV_64F;
        int wtype = CV_MAKETYPE(wdepth, src.channels());
        Mat kd, ks;
        getSobelKernels( kd, ks, 2, 0, ksize, false, ktype );
        if( ddepth < 0 )
            ddepth = src.depth();
        int dtype = CV_MAKETYPE(ddepth, src.channels());
        dst.create( src.size(), dtype );

        int dy0 = std::min(std::max((int)(STRIPE_SIZE/(getElemSize(src.type())*src.cols)), 1), src.rows);
        Ptr<FilterEngine> fx = createSeparableLinearFilter(src.type(),
            wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); 
        Ptr<FilterEngine> fy = createSeparableLinearFilter(src.type(),
            wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() );

        int y = fx->start(src), dsty = 0, dy = 0;
        fy->start(src);
        const uchar* sptr = src.data + y*src.step;

        Mat d2x( dy0 + kd.rows - 1, src.cols, wtype );
        Mat d2y( dy0 + kd.rows - 1, src.cols, wtype );

        for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy )
        {
            fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step );
            dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step );
            if( dy > 0 )
            {
                Mat dstripe = dst.rowRange(dsty, dsty + dy);
                d2x.rows = d2y.rows = dy; // modify the headers, which should work
                d2x += d2y;
                d2x.convertTo( dstripe, dtype, scale, delta );
            }
        }
    }
}
Esempio n. 20
0
int FluxTensorMethod::apply_averaging_filters(const Mat & input, Mat & result)
{
	static deque<Mat> axay_fifo;

	result = input.clone();

	Mat ax_result;
	filter2D(input, ax_result, -1, ax_filter);

	Mat ax_ay_result;
	filter2D(ax_result, ax_ay_result, -1, ay_filter);

	axay_fifo.push_back(ax_ay_result);
	if(axay_fifo.size() < (unsigned int)nAt)
		return 1;

	apply_temporal_filter(&axay_fifo, at_filter, nAt, result);

	axay_fifo.pop_front();
	return 0;
}
Esempio n. 21
0
void skizImage::skizFilter2D(int times)
{
	Mat kern = (Mat_<double>(3,3) << 0.062467 , 0.125, 0.062467,
			0.125, 0.250131, 0.125,
			0.062467, 0.125, 0.42467);
    for(int i=0 ;i<times; ++i) 	
	{
		filter2D(image,dst,image.depth(),kern);
		image = dst;
	}

}
Esempio n. 22
0
int FluxTensorMethod::compute_Itt(const Mat & input, Mat & result)
{
	static deque<Mat> isxsy_fifo;

	result = input.clone();

	Mat sx_result;
	filter2D(input, sx_result, -1, sx_filter);

	Mat sx_sy_result;
	filter2D(sx_result, sx_sy_result, -1, sy_filter);

	isxsy_fifo.push_back(sx_sy_result);
	if(isxsy_fifo.size() < (unsigned int)nDt)
		return 1;

	apply_temporal_filter(&isxsy_fifo, dtt_filter, nDt, result);

	isxsy_fifo.pop_front();
	return 0;
}
Esempio n. 23
0
  // functions for street name detector
  void get_gradient_maps(cv::Mat& _grey_img,
  		cv::Mat& _gradient_magnitude, cv::Mat& _gradient_direction){

  	cv::Mat C = cv::Mat_<double>(_grey_img);

  	cv::Mat kernel = (cv::Mat_<double>(1,3) << -1,0,1);
  	cv::Mat grad_x;
  	filter2D(C, grad_x, -1, kernel, cv::Point(-1,-1), 0, cv::BORDER_DEFAULT);

  	cv::Mat kernel2 = (cv::Mat_<double>(3,1) << -1,0,1);
  	cv::Mat grad_y;
  	filter2D(C, grad_y, -1, kernel2, cv::Point(-1,-1), 0, cv::BORDER_DEFAULT);

  	for(int i=0; i<grad_x.rows; i++){
  		for(int j=0; j<grad_x.cols; j++){
  			_gradient_magnitude.at<double>(i,j) =
  					sqrt(pow(grad_x.at<double>(i,j),2)+pow(grad_y.at<double>(i,j),2));
  			_gradient_direction.at<double>(i,j) =
  					atan2(grad_y.at<double>(i,j), grad_x.at<double>(i,j));

  		}
  	}
  }
Esempio n. 24
0
/*********************************************************
**  Pyramid
*
*   Retorna vector de Mat.
*   resul.at(0) = suma multiescala
*
**********************************************************/
Multiscale Pyramid(Mat img, Mat kernel){
    Mat         aux;
    Multiscale pyrm;

    // Nivel 1
    filter2D(img, aux, CV_32F, kernel);
    pyrm.push_back(aux);
    resize(aux, aux, Size(), 0.5, 0.5);


    // Nivel 2
    filter2D(aux, aux, CV_32F, kernel);
    pyrm.push_back(aux);
    resize(aux, aux, Size(), 0.5, 0.5);

    // Nivel 3
    filter2D(aux, aux, CV_32F, kernel);
    pyrm.push_back(aux);
    resize(aux, aux, Size(), 0.5, 0.5);

    // Nivel 4
    filter2D(aux, aux, CV_32F, kernel);
    pyrm.push_back(aux);
    resize(aux, aux, Size(), 0.5, 0.5);

    // Nivel 5
    filter2D(aux, aux, CV_32F, kernel);
    pyrm.push_back(aux);
    resize(aux, aux, Size(), 0.5, 0.5);

    // Nivel 6
    filter2D(aux, aux, CV_32F, kernel);
    pyrm.push_back(aux);
    resize(aux, aux, Size(), 0.5, 0.5);

    // Nivel 7
    filter2D(aux, aux, CV_32F, kernel);
    pyrm.push_back(aux);
    resize(aux, aux, Size(), 0.5, 0.5);

    // Nivel 8
    filter2D(aux, aux, CV_32F, kernel);
    pyrm.push_back(aux);
    resize(aux, aux, Size(), 0.5, 0.5);

    return pyrm;
}
Esempio n. 25
0
Mat lap_dir(Mat img, int direction)
{
	int cols = img.cols;
	int rows = img.rows;
	
	Mat img_filtered;
	
	if (direction%2)
	{	
		float vertical_fk[1][3] = {1,-2,1};
		Mat filter_kernel = Mat(1, 3, CV_32FC1, vertical_fk);
		filter2D(img, img_filtered, -1, filter_kernel);
	}
	else
	{
		float horizontal_fk[3][1] = {{1}, {-2}, {1}};
		Mat filter_kernel = Mat(3, 1, CV_32FC1, horizontal_fk);
		filter2D(img, img_filtered, -1, filter_kernel);
	}


	return img_filtered;

}
 Mat ZDT(Mat Zcap)
 {
   // find the edges and take the distance transform.
   cv::Mat dxZ, dyZ, edgesZ ,magZ, ZDT, magZnorm, magZthresh;
   //cv::Scharr(Zcap,dxZ,CV_32F,1,0);
   //cv::Scharr(Zcap,dyZ,CV_32F,0,1);
   filter2D(Zcap,dxZ,-1,params::dxFilter);
   filter2D(Zcap,dyZ,-1,params::dyFilter);
   magZ = sqrt((Mat_<float>)(dxZ.mul(dxZ) + dyZ.mul(dyZ)));
   // as we get further from the camera, the x-y distance
   // between adjacent pixels increases so we must normalize.
   magZnorm = magZ / Zcap;
   cv::threshold(magZnorm,magZthresh,params::DEPTH_EDGE_THRESH,1,cv::THRESH_BINARY);
   edgesZ = magZthresh | isnan(Zcap);
   //imagesc("EDGES!",edgesZ);
   
   // take the DT
   edgesZ = 1 - edgesZ;
   edgesZ.convertTo(edgesZ,cv::DataType<uchar>::type);    
   cv::distanceTransform(edgesZ,ZDT,CV_DIST_L2,CV_DIST_MASK_PRECISE);
   
   //imagesc("ZDT!",ZDT);
   return ZDT;
 }
Esempio n. 27
0
File: Dip4.cpp Progetto: kziel1/dip
/*
img         :  input image
degradedImg :  degraded output image
filterDev   :  standard deviation of kernel for gaussian blur
snr         :  signal to noise ratio for additive gaussian noise
return      :  the used gaussian kernel
*/
Mat Dip4::degradeImage(Mat& img, Mat& degradedImg, double filterDev, double snr){

    int kSize = round(filterDev*3)*2 - 1;
   
    Mat gaussKernel = getGaussianKernel(kSize, filterDev, CV_32FC1);
    gaussKernel = gaussKernel * gaussKernel.t();
    filter2D(img, degradedImg, -1, gaussKernel);

    Mat mean, stddev;
    meanStdDev(img, mean, stddev);

    Mat noise = Mat::zeros(img.rows, img.cols, CV_32FC1);
    randn(noise, 0, stddev.at<double>(0)/snr);
    degradedImg = degradedImg + noise;
    threshold(degradedImg, degradedImg, 255, 255, CV_THRESH_TRUNC);
    threshold(degradedImg, degradedImg, 0, 0, CV_THRESH_TOZERO);

    return gaussKernel;
}
Esempio n. 28
0
//Matlab::conv2()
void matlabHelper::conv2(const Mat &img, const Mat &kernel, ConvolutionType type, Mat& dest) {
    Mat source = img;
    if(CONVOLUTION_FULL == type) {
        source = Mat();
        const int additionalRows = kernel.rows-1, additionalCols = kernel.cols-1;
        copyMakeBorder(img, source, (additionalRows+1)/2, additionalRows/2,
                       (additionalCols+1)/2, additionalCols/2, BORDER_CONSTANT, Scalar(0));
    }
    
    Point anchor(kernel.cols - kernel.cols/2 - 1, kernel.rows - kernel.rows/2 - 1);
    int borderMode = BORDER_CONSTANT;
    flip(kernel, kernel, -1);
    filter2D(source, dest, img.depth(), kernel, anchor, 0, borderMode);
    
    if(CONVOLUTION_VALID == type) {
        dest = dest.colRange((kernel.cols-1)/2, dest.cols - kernel.cols/2)
        .rowRange((kernel.rows-1)/2, dest.rows - kernel.rows/2);
    }
    
}
Esempio n. 29
0
void ImageViewer::on_actionLinear_Filter_triggered_t()
{
    int kernel_size = popup_linFilt->GetValueBox1();
    int anchor_xy = popup_linFilt->GetValueBox2();

    if(anchor_xy>kernel_size-1) {
        anchor_xy=kernel_size-1;
        popup_linFilt->SetValueBox2(anchor_xy);
     }

    cv::Mat kernel = cv::Mat::ones( kernel_size, kernel_size, CV_32F )/ (float)(kernel_size*kernel_size);
    cv::Mat dst;
    cv::Point anchor;
    anchor = cv::Point( anchor_xy, anchor_xy );
    double delta = 0.0;
    int ddepth = -1;
      /// Apply filter
    filter2D(ASM::QPixmapToCvMat(QPixmap::fromImage(*image)), dst, ddepth , kernel, anchor, delta, cv::BORDER_DEFAULT );

    imageLabel->setPixmap(ASM::cvMatToQPixmap(dst).copy());
}
Esempio n. 30
0
cv::Mat blurPatch(const cv::Mat &_patch, cv::Point2f one, cv::Point2f two) {
    cv::Mat blurredWindow = _patch.clone();
    //std::cout << "Start Blurring" << std::endl;

    cv::Mat patch = _patch.clone();
    patch.convertTo(patch, cv::DataType<double>::type);

	cv::Mat kernel = evaluateKernel(one,two);

	cv::Point2f anchor = cv::Point( -1, -1 );

	int delta = 0, ddepth = -1;
	filter2D(patch, patch, ddepth, kernel, anchor, delta, cv::BORDER_DEFAULT);

    patch.convertTo(patch, CV_8U);
    //std::cout << "Stop Blurring" << std::endl;

  //  hconcat(blurredWindow, patch, blurredWindow);
  //  imshow("Blurring", blurredWindow);
  //  cv::waitKey(1);

	return patch;
}