Ejemplo n.º 1
0
int perform_convolution(FILE *in, FILE *out, float sigma, int kx, int ky, const char *comment, BOOL binary) {
  int ux = 0, uy = 0;
  float **u = ip_load_image(in, &ux, &uy, NULL);

  if (!u)
    return 4;

  float **kernel = gaussian_kernel(kx, ky, sigma);

  if (!kernel) {
    ip_deallocate_image(ux, uy, u);
    return 5;
  }

  // TODO Schummlung entfernen
  dummies(u, ux, uy);
  float **v = convolve(ux + 2, uy + 2, u, kx, ky, kernel);

  ip_deallocate_image(ux, uy, u);
  ip_deallocate_image(kx, ky, kernel);

  if (!v)
    return 6;

  ip_save_image(out, ux, uy, v, comment, binary);

  ip_deallocate_image(ux, uy, v);

  return 0;
}
// Marr-Hildreth edge detector with Gaussian and Laplacian kernels
// inputs:
//            float *input           -    pointer to input image
//            int w, int h           -    width and height of input image
//            float sigma            -    gaussian standard deviation
//            int n                  -    kernel size
//            float tzc              -    threshold in zero-crossing
//            int padding_method     -    padding method for convolution
// output:
//            float *                -    pointer to output image
float *edges_mh(float *im, int w, int h,
                float sigma, int n, float tzc, int padding_method) {

    // generate Gaussian kernel
    float *kernel = gaussian_kernel(n,sigma);

    // smooth input image with the Gaussian kernel
    float *im_smoothed = conv2d(im, w, h, kernel, n, padding_method);

    // compute Laplacian of the smoothed image using a 3x3 operator
    float operator[9] = {1, 1, 1, 1, -8, 1, 1, 1, 1};
    float *laplacian = conv2d(im_smoothed, w+n-1, h+n-1,
                               operator, 3, padding_method);

    // compute maximum of absolute Laplacian
    float max = 0.0;
    for(int i=0; i<w; i++) {
        for(int j=0; j<h; j++) {
            //laplacian is (w+n+1)x(h+n+1) with an offset of (n+1)/2 in x and y
            float v = abs( laplacian[(i+(n+1)/2) + (j+(n+1)/2)*(w+n+1)] );
            if( v > max ) max = v;
        }
    }

    // compute laplacian zero-crossings
    float *edges = xmalloc(w*h*sizeof(float));
    for(int i=0; i<w; i++) {
        for(int j=0; j<h; j++) {
            //laplacian is (w+n+1)x(h+n+1) with an offset of (n+1)/2 in x and y
            float UP_LE = laplacian[ (i-1+(n+1)/2) + (j+1+(n+1)/2)*(w+n+1) ];
            float UP    = laplacian[ (i  +(n+1)/2) + (j+1+(n+1)/2)*(w+n+1) ];
            float UP_RI = laplacian[ (i+1+(n+1)/2) + (j+1+(n+1)/2)*(w+n+1) ];
            float LE    = laplacian[ (i-1+(n+1)/2) + (j  +(n+1)/2)*(w+n+1) ];
            float RI    = laplacian[ (i+1+(n+1)/2) + (j  +(n+1)/2)*(w+n+1) ];
            float DO_LE = laplacian[ (i-1+(n+1)/2) + (j-1+(n+1)/2)*(w+n+1) ];
            float DO    = laplacian[ (i  +(n+1)/2) + (j-1+(n+1)/2)*(w+n+1) ];
            float DO_RI = laplacian[ (i+1+(n+1)/2) + (j-1+(n+1)/2)*(w+n+1) ];
            if( (LE*RI       < 0.0 && abs(LE-RI)       > tzc*max) ||
                (UP_LE*DO_RI < 0.0 && abs(UP_LE-DO_RI) > tzc*max) ||
                (DO_LE*UP_RI < 0.0 && abs(DO_LE-UP_RI) > tzc*max) ||
                (UP*DO       < 0.0 && abs(UP-DO)       > tzc*max) ) {
                edges[i+j*w] = 255.0;
            } else {
                edges[i+j*w] = 0.0;
            }
        }
    }

    // free memory
    free(kernel);
    free(im_smoothed);
    free(laplacian);

    return edges;
}
Ejemplo n.º 3
0
//static image_float gaussian_sampler( float* in, int width,int height, int d, float scale,
//float sigma_scale )
image_float gaussian_sampler( image_float in, float scale, float sigma_scale )
{
    image_float aux,out;
    ntuple_list kernel;
    unsigned int N,M,h,n,x,y,i;
    int xc,yc,j,float_x_size,float_y_size;
    float sigma,xx,yy,sum,prec;
      
    /* compute new image size and get memory for images */
    if( in->xsize * scale > (float) UINT_MAX ||
       in->ysize * scale > (float) UINT_MAX )
        
        error("gaussian_sampler: the output image size exceeds the handled size.");
    N = (unsigned int) ceil( in->xsize * scale );
    M = (unsigned int) ceil( in->ysize * scale );

    aux = new_image_float(N,in->ysize);
    out = new_image_float(N,M);
    
    /* sigma, kernel size and memory for the kernel */
    sigma = scale < 1.0 ? sigma_scale / scale : sigma_scale;
    /*Como para ingresar a este codigo scale <1 (se evalua en la funcion LineSegmentDetection),
     siempre se va a cumplir que sigma = sigma_scale / scale */
    /*
     The size of the kernel is selected to guarantee that the
     the first discarded term is at least 10^prec times smaller
     than the central value. For that, h should be larger than x, with
     e^(-x^2/2sigma^2) = 1/10^prec.
     Then,
     x = sigma * sqrt( 2 * prec * ln(10) ).
     */
    prec = 3.0;
    h = (unsigned int) ceil( sigma * sqrt( 2.0 * prec * log(10.0) ) );
    /*La funcion log() corresponde al logaritmo neperiano*/
    n = 1+2*h; /* kernel size */
    kernel = new_ntuple_list(n);
    
    /* auxiliary float image size variables */
    float_x_size = (int) (2 * in->xsize);
    float_y_size = (int) (2 * in->ysize);
    
    gaussian_kernel( kernel, sigma, (float) h );
    float scale_inv=1/scale;
    
    /* First subsampling: x axis */
    for(x=3;x<aux->xsize-2;x++)
    {
        /*
         x   is the coordinate in the new image.
         xx  is the corresponding x-value in the original size image.
         xc  is the integer value, the pixel coordinate of xx.
         */
        xx = (float) x * scale_inv; /*Esto es para recorrer toda la imagen porque aux-> size = width*scale*/
        /* coordinate (0.0,0.0) is in the center of pixel (0,0),
         so the pixel with xc=0 get the values of xx from -0.5 to 0.5 */
        xc = (int) floor( xx + 0.5 ); /*Aca redondeamos el valor. Seria lo mismo que hacer round(xx)*/
        
        //        gaussian_kernel( kernel, sigma, (float) h + xx - (float) xc );
        
        /* the kernel must be computed for each x because the fine
         offset xx-xc is different in each case */
        
        for(y=0;y<aux->ysize;y++)
        {
            sum = 0.0;
            for(i=0;i<kernel->dim;i++)
            {
                j = xc - h + i;
                sum += in->data[ j + y * in->xsize ] * kernel->values[i];
            }
            aux->data[ x + y * aux->xsize ] = sum;
        }
    }
    
    /* Second subsampling: y axis */
    for(y=3;y<out->ysize-2;y++)
    {
        /*
         y   is the coordinate in the new image.
         yy  is the corresponding x-value in the original size image.
         yc  is the integer value, the pixel coordinate of xx.
         */
        yy = (float) y * scale_inv;
        /* coordinate (0.0,0.0) is in the center of pixel (0,0),
         so the pixel with yc=0 get the values of yy from -0.5 to 0.5 */
        yc = (int) floor( yy + 0.5 );
        //gaussian_kernel( kernel, sigma, (float) h + yy - (float) yc );
        /* the kernel must be computed for each y because the fine
         offset yy-yc is different in each case */
        
        for(x=0;x<out->xsize;x++)
        {
            sum = 0.0;
            for(i=0;i<kernel->dim;i++)
            {
                j = yc - h + i;
                sum += aux->data[ x + j * aux->xsize ] * kernel->values[i];
            }
            out->data[ x + y * out->xsize ] = sum;
        }
    }
    
    /* free memory */
    free_ntuple_list(kernel);
    free_image_float(aux);
    
    return out;
}
Ejemplo n.º 4
0
template <typename PointInT, typename PointOutT> void
pcl_1_8::Edge<PointInT, PointOutT>::canny (
    const pcl::PointCloud<PointInT> &input_x, 
    const pcl::PointCloud<PointInT> &input_y,
    pcl::PointCloud<PointOutT> &output)
{
  float tHigh = hysteresis_threshold_high_;
  float tLow = hysteresis_threshold_low_;
  const int height = input_x.height;
  const int width = input_x.width;

  output.resize (height * width);
  output.height = height;
  output.width = width;

  // Noise reduction using gaussian blurring
  pcl::PointCloud<pcl::PointXYZI>::Ptr gaussian_kernel (new pcl::PointCloud<pcl::PointXYZI>);
  kernel_.setKernelSize (3);
  kernel_.setKernelSigma (1.0);
  kernel_.setKernelType (kernel<pcl::PointXYZI>::GAUSSIAN);
  kernel_.fetchKernel (*gaussian_kernel);
  convolution_.setKernel (*gaussian_kernel);

  PointCloudIn smoothed_cloud_x;
  convolution_.setInputCloud (input_x.makeShared());
  convolution_.filter (smoothed_cloud_x);

  PointCloudIn smoothed_cloud_y;
  convolution_.setInputCloud (input_y.makeShared());
  convolution_.filter (smoothed_cloud_y);


  // Edge detection usign Sobel
  pcl::PointCloud<PointXYZIEdge>::Ptr edges (new pcl::PointCloud<PointXYZIEdge>);
  sobelMagnitudeDirection (smoothed_cloud_x, smoothed_cloud_y, *edges.get ());

  // Edge discretization
  discretizeAngles (*edges);

  pcl::PointCloud<pcl::PointXYZI>::Ptr maxima (new pcl::PointCloud<pcl::PointXYZI>);
  suppressNonMaxima (*edges, *maxima, tLow);

  // Edge tracing
  for (int i = 0; i < height; i++)
  {
    for (int j = 0; j < width; j++)
    {
      if ((*maxima)(j, i).intensity < tHigh || (*maxima)(j, i).intensity == std::numeric_limits<float>::max ())
        continue;

      (*maxima)(j, i).intensity = std::numeric_limits<float>::max ();
      cannyTraceEdge ( 1, 0, i, j, *maxima);
      cannyTraceEdge (-1, 0, i, j, *maxima);
      cannyTraceEdge ( 1, 1, i, j, *maxima);
      cannyTraceEdge (-1, -1, i, j, *maxima);
      cannyTraceEdge ( 0, -1, i, j, *maxima);
      cannyTraceEdge ( 0, 1, i, j, *maxima);
      cannyTraceEdge (-1, 1, i, j, *maxima);
      cannyTraceEdge ( 1, -1, i, j, *maxima);
    }
  }

  // Final thresholding
  for (int i = 0; i < height; i++)
  {
    for (int j = 0; j < width; j++)
    {
      if ((*maxima)(j, i).intensity == std::numeric_limits<float>::max ())
        output (j, i).magnitude = 255;
      else
        output (j, i).magnitude = 0;
    }
  }
}
Ejemplo n.º 5
0
template<typename PointInT, typename PointOutT> void
pcl_1_8::Edge<PointInT, PointOutT>::detectEdgeCanny (pcl::PointCloud<PointOutT> &output)
{
  float tHigh = hysteresis_threshold_high_;
  float tLow = hysteresis_threshold_low_;
  const int height = input_->height;
  const int width = input_->width;

  output.resize (height * width);
  output.height = height;
  output.width = width;

  //pcl::console::TicToc tt;
  //tt.tic ();
  
  // Noise reduction using gaussian blurring
  pcl::PointCloud<pcl::PointXYZI>::Ptr gaussian_kernel (new pcl::PointCloud<pcl::PointXYZI>);
  PointCloudInPtr smoothed_cloud (new PointCloudIn);
  kernel_.setKernelSize (3);
  kernel_.setKernelSigma (1.0);
  kernel_.setKernelType (kernel<pcl::PointXYZI>::GAUSSIAN);
  kernel_.fetchKernel (*gaussian_kernel);
  convolution_.setKernel (*gaussian_kernel);
  convolution_.setInputCloud (input_);
  convolution_.filter (*smoothed_cloud);
  //PCL_ERROR ("Gaussian blur: %g\n", tt.toc ()); tt.tic ();
  
  // Edge detection usign Sobel
  pcl::PointCloud<PointXYZIEdge>::Ptr edges (new pcl::PointCloud<PointXYZIEdge>);
  setInputCloud (smoothed_cloud);
  detectEdgeSobel (*edges);
  //PCL_ERROR ("Sobel: %g\n", tt.toc ()); tt.tic ();
  
  // Edge discretization
  discretizeAngles (*edges);
  //PCL_ERROR ("Discretize: %g\n", tt.toc ()); tt.tic ();

  // tHigh and non-maximal supression
  pcl::PointCloud<pcl::PointXYZI>::Ptr maxima (new pcl::PointCloud<pcl::PointXYZI>);
  suppressNonMaxima (*edges, *maxima, tLow);
  //PCL_ERROR ("NM suppress: %g\n", tt.toc ()); tt.tic ();

  // Edge tracing
  for (int i = 0; i < height; i++)
  {
    for (int j = 0; j < width; j++)
    {
      if ((*maxima)(j, i).intensity < tHigh || (*maxima)(j, i).intensity == std::numeric_limits<float>::max ())
        continue;

      (*maxima)(j, i).intensity = std::numeric_limits<float>::max ();
      cannyTraceEdge ( 1, 0, i, j, *maxima);
      cannyTraceEdge (-1, 0, i, j, *maxima);
      cannyTraceEdge ( 1, 1, i, j, *maxima);
      cannyTraceEdge (-1, -1, i, j, *maxima);
      cannyTraceEdge ( 0, -1, i, j, *maxima);
      cannyTraceEdge ( 0, 1, i, j, *maxima);
      cannyTraceEdge (-1, 1, i, j, *maxima);
      cannyTraceEdge ( 1, -1, i, j, *maxima);
    }
  }
  //PCL_ERROR ("Edge tracing: %g\n", tt.toc ());

  // Final thresholding
  for (size_t i = 0; i < input_->size (); ++i)
  {
    if ((*maxima)[i].intensity == std::numeric_limits<float>::max ())
      output[i].magnitude = 255;
    else
      output[i].magnitude = 0;
  }
}
Ejemplo n.º 6
0
int main(int argc, char *argv[]) {
	if (argc != 6) {
		printf("Usage: %s input_image_1 sigma n tzc output\n", argv[0]);
	} else {

		// Execution time:
		double start = (double)clock();
	
		// Parameters
		float sigma = atof(argv[2]);
			// <sigma> is the standard deviation of the gaussian function used to
			// create the kernel.
		int n = atoi(argv[3]);
			// <n> is the size of the kernel (n*n).
		float tzc = atof(argv[4]);
			// <tzc> is the threshold of the zero-crossing method.
	
		// Load input image (using iio)
		int w, h, pixeldim;
		float *im_orig = iio_read_image_float_vec(argv[1], &w, &h, &pixeldim);
		fprintf(stderr, "Input image loaded:\t %dx%d image with %d channel(s).\n", w, h, pixeldim);

		// Grayscale conversion (if necessary)
		double *im = malloc(w*h*sizeof(double));
		if (im == NULL){
			fprintf(stderr, "Out of memory...\n");
			exit(EXIT_FAILURE);
		}
			// allocate memory for the grayscale image <im>, output of the grayscale conversion
			// and correct allocation check.
		int z;
			// <z> is just an integer used as array index.
		int zmax = w*h;		// number of elements of <im>
		if (pixeldim==3){	// if the image is color (RGB, three channels)...
			for(z=0;z<zmax;z++){		// for each pixel in the image <im>, calculate the gray 
										// value according to the expression: 
										// I = ( 6968*R + 23434*G + 2366*B ) / 32768.
				im[z] =  (double)(6968*im_orig[3*z] + 23434*im_orig[3*z + 1] + 2366*im_orig[3*z + 2])/32768;
			}
			fprintf(stderr, "images converted to grayscale\n");
		} else {		// the image was originally grayscale...
			for(z=0;z<zmax;z++){
				im[z] = (double)im_orig[z];		// only assign the value of im_orig to im, casting to double.
			}
			fprintf(stderr, "images are already in grayscale\n");
		}

		// Generate gaussian kernel
		// see gaussian_kernel.c
		double *kernel = gaussian_kernel(n,sigma);

		// Debug: save kernel to image (this is not part of the algorithm itself)
		if (SAVE_KERNEL){
			float *kernel_float = malloc(n*n*sizeof(float));
			if (kernel_float == NULL){
				fprintf(stderr, "Out of memory...\n");
				exit(EXIT_FAILURE);
			}			
			int i;
			int imax = n*n;
			for (i=0;i<imax;i++){
				kernel_float[i] = 5000*(float)kernel[i];
			}
			iio_save_image_float_vec("kernel.png", kernel_float, n, n, 1);
			free(kernel_float);
			fprintf(stderr, "kernel saved to kernel.png\n");
		}
		// end of save kernel image
		
		// Smooth input image with gaussian kernel
		// see 2dconvolution.c
		// <im_smoothed> is calculated convolving the grayscale image <im> with
		// the gaussian kernel previously generated.
		double *im_smoothed = conv2d(im, w, h, kernel, n);

		// Debug: save smoothed image (this is not part of the algorithm itself)
		if (SAVE_SMOOTHED_IMAGE){
			float *smoothed = malloc((w+n-1)*(h+n-1)*sizeof(float));
			if (smoothed == NULL){
				fprintf(stderr, "Out of memory...\n");
				exit(EXIT_FAILURE);
			}
			int i,j, fila, col;
			int imax = w*h;
			int dif_fila_col = (n-1)/2;
			for (i=0;i<imax;i++){
				fila = (int)(i/w);
				col = i - w*fila + dif_fila_col;
				fila += dif_fila_col;
				j = col + (w+n-1)*fila;
				smoothed[i] = (float)im_smoothed[j];
			}
			iio_save_image_float_vec("smoothed.png", smoothed, w, h, 1);
			free(smoothed);
			fprintf(stderr, "smoothed image saved to smoothed.png\n");
		}
		// end of save smoothed image

		// Laplacian of the smoothed image
		double operator[9] = {1, 1, 1, 1, -8, 1, 1, 1, 1};
			// an approximation of the laplacian operator:
			//		/ 1  1  1 \
			//	   |  1 -8  1  |
			//		\ 1  1  1 /
			// now we convolve the smoothed image with this operator,
			// generating the <laplacian> image.
		double *laplacian = conv2d(im_smoothed, w+n-1, h+n-1, operator, 3);
		
		// calculate max absolute value of laplacian:
		// required for thresholding in zero-crossing (next)
		double max_l = 0;
		int p;
		int pmax = (w+n+1)*(h+n+1);
		for (p=0;p<pmax;p++){
			if (abs(laplacian[p])>max_l){
				max_l = abs(laplacian[p]);
			}
		}

		// Debug: save laplacian image (this is not part of the algorithm itself)
		if (SAVE_LAPLACIAN_IMAGE){
			float *lapl = malloc((w+n+1)*(h+n+1)*sizeof(float));
			if (lapl == NULL){
				fprintf(stderr, "Out of memory...\n");
				exit(EXIT_FAILURE);
			}
			int i,j, fila, col;
			int imax = w*h;
			int dif_fila_col = (n+1)/2;
			for (i=0;i<imax;i++){
				fila = (int)(i/w);
				col = i - w*fila + dif_fila_col;
				fila += dif_fila_col;
				j = col + (w+n+1)*fila;
				lapl[i] = (float)laplacian[j];
			}
			iio_save_image_float_vec("laplacian.png", lapl, w, h, 1);
			free(lapl);
			fprintf(stderr, "laplacian image saved to laplacian.png\n");
		}
		// end of save laplacian image

		// Zero-crossing
		float *zero_cross = calloc(w*h,sizeof(float));		// this image will only content values 0 and 255
															// but we use float for saving with iio.
		if (zero_cross == NULL){
			fprintf(stderr, "Out of memory...\n");
			exit(EXIT_FAILURE);
		}
		int ind_en_lapl, fila, col;
		int *offsets = get_neighbors_offset(w+n+1, 3);
		pmax = w*h;
		int dif_fila_col = (n+1)/2;
		for (p=0;p<pmax;p++){
			fila = ((int)(p/w));
			col = p-(w*fila) + dif_fila_col;
			fila += dif_fila_col;
			ind_en_lapl = col + (w+n+1)*fila;
			double *n3 = get_neighborhood(laplacian, ind_en_lapl, 3, offsets);
			if ((n3[3]*n3[5]<0)&&(abs(n3[3]-n3[5])>(tzc*max_l))) {
				// horizontal sign change
				zero_cross[p] = 255;
			} else if ((n3[1]*n3[7]<0)&&(abs(n3[1]-n3[7])>(tzc*max_l))) {
					// vertical sign change
					zero_cross[p] = 255;
				} else if ((n3[2]*n3[6]<0)&&(abs(n3[2]-n3[6])>(tzc*max_l))) {
						// +45deg sign change
						zero_cross[p] = 255;
					} else if ((n3[0]*n3[8]<0)&&(abs(n3[0]-n3[8])>(tzc*max_l))) {
							// -45deg sign change
							zero_cross[p] = 255;
						}
			free_neighborhood(n3);
		}
		free_neighbors_offsets(offsets);

		// Save output image
		iio_save_image_float_vec(argv[5], zero_cross, w, h, 1);
		fprintf(stderr, "Output Image saved in %s:\t %dx%d image with %d channel(s).\n", argv[5], w, h, pixeldim);
	
		// Free memory
		free(zero_cross);
		free(im_orig);
		free(im);
		free(im_smoothed);
		free(laplacian);
		free_gaussian_kernel(kernel);

		fprintf(stderr, "marr-hildreth edge detector computation done.\n");

		// Execution time:
		double finish = (double)clock();
		double exectime = (finish - start)/CLOCKS_PER_SEC;
		fprintf(stderr, "execution time: %1.3f s.\n", exectime);		

		return 0;
	
	} // else (argc)
}
Ejemplo n.º 7
0
int
main (int argc, char ** argv)
{
    int viewport_source, viewport_convolved = 0;
    int direction = -1;
    int nb_threads = 0;
    char border_policy = 'Z';
    double threshold = 0.001;
    pcl::filters::Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB> convolution;
    Eigen::ArrayXf gaussian_kernel(5);
    gaussian_kernel << 1.f/16, 1.f/4, 3.f/8, 1.f/4, 1.f/16;
    pcl::console::print_info ("convolution kernel:");
    for (int i = 0; i < gaussian_kernel.size (); ++i)
        pcl::console::print_info (" %f", gaussian_kernel[i]);
    pcl::console::print_info ("\n");

    if (argc < 3)
    {
        usage (argv);
        return 1;
    }

    // check if user is requesting help
    std::string arg (argv[1]);

    if (arg == "--help" || arg == "-h")
    {
        usage (argv);
        return 1;
    }

    // user don't need help find convolving direction
    // convolve row
    if (pcl::console::find_switch (argc, argv, "-r"))
        direction = 0;
    else
    {
        // convolve column
        if (pcl::console::find_switch (argc, argv, "-c"))
            direction = 1;
        else
            // convolve both
            if (pcl::console::find_switch (argc, argv, "-s"))
                direction = 2;
            else
            {
                // wrong direction given print usage
                usage (argv);
                return 1;
            }
    }

    // number of threads if any
    if (pcl::console::parse_argument (argc, argv, "-t", nb_threads) != -1 )
    {
        if (nb_threads <= 0)
            nb_threads = 1;
    }
    convolution.setNumberOfThreads (nb_threads);

    // borders policy if any
    if (pcl::console::parse_argument (argc, argv, "-p", border_policy) != -1 )
    {
        switch (border_policy)
        {
        case 'Z' :
            convolution.setBordersPolicy (pcl::filters::Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::BORDERS_POLICY_IGNORE);
            break;
        case 'M' :
            convolution.setBordersPolicy (pcl::filters::Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::BORDERS_POLICY_MIRROR);
            break;
        case 'D' :
            convolution.setBordersPolicy (pcl::filters::Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::BORDERS_POLICY_DUPLICATE);
            break;
        default :
        {
            usage (argv);
            return (1);
        }
        }
    }
    else
        convolution.setBordersPolicy (pcl::filters::Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::BORDERS_POLICY_IGNORE);

    // distance threshold if any
    if (pcl::console::parse_argument (argc, argv, "-d", threshold) == -1 )
    {
        threshold = 0.01;
    }
    convolution.setDistanceThreshold (static_cast<float> (threshold));

    // all set
    // we have file name and convolving direction
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGB> ());
    if (pcl::io::loadPCDFile (argv[1], *cloud) == -1)
    {
        pcl::console::print_error ("Couldn't read file %s \n", argv[1]);
        return (-1);
    }
    cloud->is_dense = false;
    convolution.setInputCloud (cloud);
    convolution.setKernel (gaussian_kernel);
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr convolved (new pcl::PointCloud<pcl::PointXYZRGB> ());
    double t0;
    pcl::console::print_info ("convolving %s along \n", argv[1]);
    std::ostringstream convolved_label;
    convolved_label << "convolved along ";
    switch (direction)
    {
    case 0:
    {
        convolved_label << "rows... ";
        t0 = pcl::getTime ();
        convolution.convolveRows (*convolved);
        break;
    }
    case 1:
    {
        convolved_label << "columns... ";
        t0 = pcl::getTime ();
        convolution.convolveCols (*convolved);
        break;
    }
    case 2:
    {
        convolved_label << "rows and columns... ";
        t0 = pcl::getTime ();
        convolution.convolve (*convolved);
        break;
    }
    }
    convolved_label << pcl::getTime () - t0 << "s";
    // Display
    boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer (new pcl::visualization::PCLVisualizer ("Convolution"));
    // viewport stuff
    viewer->createViewPort (0, 0, 0.5, 1, viewport_source);
    viewer->createViewPort (0.5, 0, 1, 1, viewport_convolved);
    viewer->setBackgroundColor (0, 0, 0);

    // Source
    pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> color_handler_source (cloud);
    viewer->addPointCloud<pcl::PointXYZRGB> (cloud, color_handler_source, "source", viewport_source);
    viewer->addText ("source", 10, 10, "source_label", viewport_source);

    // Convolved
    pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> color_handler_convolved (convolved);
    viewer->addPointCloud<pcl::PointXYZRGB> (convolved, color_handler_convolved, "convolved", viewport_convolved);
    viewer->addText (convolved_label.str (), 10, 10, "convolved_label", viewport_convolved);
    viewer->spin ();
    pcl::PCDWriter writer;
    writer.write<pcl::PointXYZRGB> ("convolved.pcd", *convolved, false);
}