Ejemplo n.º 1
0
Archivo: filters.c Proyecto: yhsesq/yhs
/* Procedure associated with the ok button. */
void ok_sigmabutton_callback(Widget w, XtPointer client_data,
                               XmSelectionBoxCallbackStruct *call_data){
    float sigma;
	 char *value;

    /* Get sigma value from user's selection */
	 XmStringGetLtoR(call_data->value, XmSTRING_DEFAULT_CHARSET, &value);

	 sigma = atof(value);

    if ( atoi(value) < 0.5 || atoi(value) > 20 )
      XBell(XtDisplay(w),100);
    else
      switch (filter) {
	case f1: gaussian_filter(sigma); break;
	case f2: dgaussian_filter(sigma); break;
	case f3: d2gaussian_filter(sigma); break;
	case f4: dgaux_gauy_filter(sigma); break;
	case f5: dgauy_gaux_filter(sigma); break;
	case f6: total_gradient_filter(sigma); break;
	case f7: gradient_x_y_filter(sigma); break;
	default: break;
      }

    action=SELECT;
    refresh_action();	
}
Ejemplo n.º 2
0
    double MSSIM(float *I, float *J, int m, int n) {
        double sum = 0.0;
        int num_pixel = 0;

        double *w = gaussian_filter(SSIM_win_width, gf_sigma);
        // iterate over all the available pixels
        for (int i = SSIM_win_hwidth; i < m - SSIM_win_hwidth; i ++) {
            for (int j = SSIM_win_hwidth; j < n - SSIM_win_hwidth; j ++) {
                double ssim = SSIM(i, j, I, J, m, n, w);
                sum += ssim;
                num_pixel ++; // just use counter, hate formula
            }
        }
        return sum / num_pixel;
    }
Ejemplo n.º 3
0
/* Compute a refinement of the optical flow (wx and wy are modified) between im1 and im2 */
void variational(image_t *wx, image_t *wy, const color_image_t *im1, const color_image_t *im2, variational_params_t *params){
  
    // Check parameters
    if(!params){
        params = (variational_params_t*) malloc(sizeof(variational_params_t));
        if(!params){
          fprintf(stderr,"error: not enough memory\n");
          exit(1);
        }
        variational_params_default(params);
    }

    // initialize global variables
    half_alpha = 0.5f*params->alpha;
    half_gamma_over3 = params->gamma*0.5f/3.0f;
    half_delta_over3 = params->delta*0.5f/3.0f;
    
    float deriv_filter[3] = {0.0f, -8.0f/12.0f, 1.0f/12.0f};
    deriv = convolution_new(2, deriv_filter, 0);
    float deriv_filter_flow[2] = {0.0f, -0.5f};
    deriv_flow = convolution_new(1, deriv_filter_flow, 0);


    // presmooth images
    int width = im1->width, height = im1->height, filter_size;
    color_image_t *smooth_im1 = color_image_new(width, height), *smooth_im2 = color_image_new(width, height);
    float *presmooth_filter = gaussian_filter(params->sigma, &filter_size);
    convolution_t *presmoothing = convolution_new(filter_size, presmooth_filter, 1);
    color_image_convolve_hv(smooth_im1, im1, presmoothing, presmoothing);
    color_image_convolve_hv(smooth_im2, im2, presmoothing, presmoothing); 
    convolution_delete(presmoothing);
    free(presmooth_filter);
    
    compute_one_level(wx, wy, smooth_im1, smooth_im2, params);
  
    // free memory
    color_image_delete(smooth_im1);
    color_image_delete(smooth_im2);
    convolution_delete(deriv);
    convolution_delete(deriv_flow);
}
Ejemplo n.º 4
0
void build_gaussian_fft ()
{
  int    i;
  double length    = cvts.xmax * cvts.ymax;
  double fft_scale = 1. / sqrt (length);
  filter_fft      = (zomplex**) calloc (range, sizeof (zomplex*));

  for (scale = 0; scale < range; scale++)
  {
    fprintf (stderr, "Computing FFT of Gaussian at scale %i (size %i x %i)%c", 
	     scale, cvts.xmax, cvts.ymax, (char)13);
    filter_fft[scale] = (zomplex*) calloc (length, sizeof (zomplex));
    gaussian_filter (filter_fft[scale], S_I(scale), k);
    compute_fft     (filter_fft[scale], cvts.xmax, cvts.ymax);
    for (i = 0; i < length; i++)
    {
      filter_fft[scale][i].re *= fft_scale;
      filter_fft[scale][i].im *= fft_scale;
    }
  }
  fprintf (stderr, "\n");
}
Ejemplo n.º 5
0
/* create a pyramid of color images using a given scale factor, stopping when one dimension reach min_size and with applying a gaussian smoothing of standard deviation spyr (no smoothing if 0) */
color_image_pyramid_t *color_image_pyramid_create(const color_image_t *src, const float scale_factor, const int min_size, const float spyr){
    const int nb_max_scale = 1000;
    // allocate structure
    color_image_pyramid_t *pyramid = color_image_pyramid_new();
    pyramid->min_size = min_size;
    pyramid->scale_factor = scale_factor;
    convolution_t *conv = NULL;
    if(spyr>0.0f){
        int fsize;
        float *filter_coef = gaussian_filter(spyr, &fsize);
        conv = convolution_new(fsize, filter_coef, 1);
        free(filter_coef);
    }
    color_image_pyramid_set_size(pyramid, nb_max_scale);
    pyramid->images[0] = color_image_cpy(src);
    int i;
    for( i=1 ; i<nb_max_scale ; i++){
        const int oldwidth = pyramid->images[i-1]->width, oldheight = pyramid->images[i-1]->height;
        const int newwidth = (int) (1.5f + (oldwidth-1) / scale_factor);
        const int newheight = (int) (1.5f + (oldheight-1) / scale_factor);
        if( newwidth <= min_size || newheight <= min_size){
            color_image_pyramid_set_size(pyramid, i);
	        break;
	    }
        if(spyr>0.0f){
            color_image_t* tmp = color_image_new(oldwidth, oldheight);
	        color_image_convolve_hv(tmp,pyramid->images[i-1], conv, conv);
	        pyramid->images[i]= color_image_resize_bilinear(tmp, scale_factor);
	        color_image_delete(tmp);
	    }else{
	        pyramid->images[i] = color_image_resize_bilinear(pyramid->images[i-1], scale_factor);
	    }
    }
    if(spyr>0.0f){
        convolution_delete(conv);
    }
    return pyramid;
}
Ejemplo n.º 6
0
/*
 * Links:
 * http://en.wikipedia.org/wiki/Canny_edge_detector
 * http://www.tomgibara.com/computer-vision/CannyEdgeDetector.java
 * http://fourier.eng.hmc.edu/e161/lectures/canny/node1.html
 * http://www.songho.ca/dsp/cannyedge/cannyedge.html
 *
 * Note: T1 and T2 are lower and upper thresholds.
 */
pixel_t *canny_edge_detection(const pixel_t *in,
                              const bitmap_info_header_t *bmp_ih,
                              const int tmin, const int tmax,
                              const float sigma)
{
    const int nx = bmp_ih->width;
    const int ny = bmp_ih->height;
 
    pixel_t *G = calloc(nx * ny * sizeof(pixel_t), 1);
    pixel_t *after_Gx = calloc(nx * ny * sizeof(pixel_t), 1);
    pixel_t *after_Gy = calloc(nx * ny * sizeof(pixel_t), 1);
    pixel_t *nms = calloc(nx * ny * sizeof(pixel_t), 1);
    pixel_t *out = malloc(bmp_ih->bmp_bytesz * sizeof(pixel_t));
 
    if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
        nms == NULL || out == NULL) {
        fprintf(stderr, "canny_edge_detection:"
                " Failed memory allocation(s).\n");
        exit(1);
    }
 
    gaussian_filter(in, out, nx, ny, sigma);
 
    const float Gx[] = {-1, 0, 1,
                        -2, 0, 2,
                        -1, 0, 1};
 
    convolution(out, after_Gx, Gx, nx, ny, 3, false);
 
    const float Gy[] = { 1, 2, 1,
                         0, 0, 0,
                        -1,-2,-1};
 
    convolution(out, after_Gy, Gy, nx, ny, 3, false);
 
    for (int i = 1; i < nx - 1; i++)
        for (int j = 1; j < ny - 1; j++) {
            const int c = i + nx * j;
            // G[c] = abs(after_Gx[c]) + abs(after_Gy[c]);
            G[c] = (pixel_t)hypot(after_Gx[c], after_Gy[c]);
        }
 
    // Non-maximum suppression, straightforward implementation.
    for (int i = 1; i < nx - 1; i++)
        for (int j = 1; j < ny - 1; j++) {
            const int c = i + nx * j;
            const int nn = c - nx;
            const int ss = c + nx;
            const int ww = c + 1;
            const int ee = c - 1;
            const int nw = nn + 1;
            const int ne = nn - 1;
            const int sw = ss + 1;
            const int se = ss - 1;
 
            const float dir = (float)(fmod(atan2(after_Gy[c],
                                                 after_Gx[c]) + M_PI,
                                           M_PI) / M_PI) * 8;
 
            if (((dir <= 1 || dir > 7) && G[c] > G[ee] &&
                 G[c] > G[ww]) || // 0 deg
                ((dir > 1 && dir <= 3) && G[c] > G[nw] &&
                 G[c] > G[se]) || // 45 deg
                ((dir > 3 && dir <= 5) && G[c] > G[nn] &&
                 G[c] > G[ss]) || // 90 deg
                ((dir > 5 && dir <= 7) && G[c] > G[ne] &&
                 G[c] > G[sw]))   // 135 deg
                nms[c] = G[c];
            else
                nms[c] = 0;
        }
 
    // Reuse array
    // used as a stack. nx*ny/2 elements should be enough.
    int *edges = (int*) after_Gy;
    memset(out, 0, sizeof(pixel_t) * nx * ny);
    memset(edges, 0, sizeof(pixel_t) * nx * ny);
 
    // Tracing edges with hysteresis . Non-recursive implementation.
    size_t c = 1;
    for (int j = 1; j < ny - 1; j++)
        for (int i = 1; i < nx - 1; i++) {
            if (nms[c] >= tmax && out[c] == 0) { // trace edges
                out[c] = MAX_BRIGHTNESS;
                int nedges = 1;
                edges[0] = c;
 
                do {
                    nedges--;
                    const int t = edges[nedges];
 
                    int nbs[8]; // neighbours
                    nbs[0] = t - nx;     // nn
                    nbs[1] = t + nx;     // ss
                    nbs[2] = t + 1;      // ww
                    nbs[3] = t - 1;      // ee
                    nbs[4] = nbs[0] + 1; // nw
                    nbs[5] = nbs[0] - 1; // ne
                    nbs[6] = nbs[1] + 1; // sw
                    nbs[7] = nbs[1] - 1; // se
 
                    for (int k = 0; k < 8; k++)
                        if (nms[nbs[k]] >= tmin && out[nbs[k]] == 0) {
                            out[nbs[k]] = MAX_BRIGHTNESS;
                            edges[nedges] = nbs[k];
                            nedges++;
                        }
                } while (nedges > 0);
            }
            c++;
        }
 
    free(after_Gx);
    free(after_Gy);
    free(G);
    free(nms);
 
    return out;
}
    typename EMSubpixelCorrelatorView<ImagePixelT>::prerasterize_type
    EMSubpixelCorrelatorView<ImagePixelT>::prerasterize(BBox2i const& bbox) const {
      vw_out(InfoMessage, "stereo") << "EMSubpixelCorrelatorView: rasterizing image block " << bbox << ".\n";

      // Find the range of disparity values for this patch.
      // int num_good; // not used
      BBox2i search_range;
      try {
        search_range = get_disparity_range(crop(m_course_disparity, bbox));
      }
      catch (const std::exception& e) {
        search_range = BBox2i();
      }


#ifdef USE_GRAPHICS
      ImageWindow window;
      if(debug_level >= 0) {
        window = vw_create_window("disparity");
      }
#endif

      // The area in the right image that we'll be searching is
      // determined by the bbox of the left image plus the search
      // range.
      BBox2i left_crop_bbox(bbox);
      BBox2i right_crop_bbox(bbox.min() + search_range.min(),
                             bbox.max() + search_range.max());

      // The correlator requires the images to be the same size. The
      // search bbox will always be larger than the given left image
      // bbox, so we just make the left bbox the same size as the
      // right bbox.
      left_crop_bbox.max() = left_crop_bbox.min() + Vector2i(right_crop_bbox.width(), right_crop_bbox.height());

      // Finally, we must adjust both bounding boxes to account for
      // the size of the kernel itself.
      right_crop_bbox.min() -= Vector2i(m_kernel_size[0], m_kernel_size[1]);
      right_crop_bbox.max() += Vector2i(m_kernel_size[0], m_kernel_size[1]);
      left_crop_bbox.min() -= Vector2i(m_kernel_size[0], m_kernel_size[1]);
      left_crop_bbox.max() += Vector2i(m_kernel_size[0], m_kernel_size[1]);

      // We crop the images to the expanded bounding box and edge
      // extend in case the new bbox extends past the image bounds.
      ImageView<ImagePixelT> left_image_patch, right_image_patch;
      ImageView<disparity_pixel> disparity_map_patch_in;
      ImageView<result_type> disparity_map_patch_out;


      left_image_patch = crop(edge_extend(m_left_image, ZeroEdgeExtension()),
                              left_crop_bbox);
      right_image_patch = crop(edge_extend(m_right_image, ZeroEdgeExtension()),
                               right_crop_bbox);
      disparity_map_patch_in = crop(edge_extend(m_course_disparity, ZeroEdgeExtension()),
                                    left_crop_bbox);
      disparity_map_patch_out.set_size(disparity_map_patch_in.cols(), disparity_map_patch_in.rows());


      // Adjust the disparities to be relative to the cropped
      // image pixel locations
      for (int v = 0; v < disparity_map_patch_in.rows(); ++v) {
        for (int u = 0; u < disparity_map_patch_in.cols(); ++u) {
          if (disparity_map_patch_in(u,v).valid())  {
            disparity_map_patch_in(u,v).child().x() -= search_range.min().x();
            disparity_map_patch_in(u,v).child().y() -= search_range.min().y();
          }
        }
      }


      double blur_sigma_progressive = .5; // 3*sigma = 1.5 pixels

      // create the pyramid first
      std::vector<ImageView<ImagePixelT> > left_pyramid(pyramid_levels), right_pyramid(pyramid_levels);
      std::vector<BBox2i> regions_of_interest(pyramid_levels);
      std::vector<ImageView<Matrix2x2> > warps(pyramid_levels);
      std::vector<ImageView<disparity_pixel> > disparity_map_pyramid(pyramid_levels);


      // initialize the pyramid at level 0
      left_pyramid[0] = channels_to_planes(left_image_patch);
      right_pyramid[0] = channels_to_planes(right_image_patch);
      disparity_map_pyramid[0] = disparity_map_patch_in;
      regions_of_interest[0] = BBox2i(m_kernel_size[0], m_kernel_size[1],
                                      bbox.width(),bbox.height());


      // downsample the disparity map and the image pair to initialize the intermediate levels
      for(int i = 1; i < pyramid_levels; i++) {
        left_pyramid[i] = subsample(gaussian_filter(left_pyramid[i-1], blur_sigma_progressive), 2);
        right_pyramid[i] = subsample(gaussian_filter(right_pyramid[i-1], blur_sigma_progressive), 2);

        disparity_map_pyramid[i] = detail::subsample_disp_map_by_two(disparity_map_pyramid[i-1]);
        regions_of_interest[i] = BBox2i(regions_of_interest[i-1].min()/2, regions_of_interest[i-1].max()/2);
      }

      // initialize warps at the lowest resolution level
      warps[pyramid_levels-1].set_size(left_pyramid[pyramid_levels-1].cols(),
                                       left_pyramid[pyramid_levels-1].rows());
      for(int y = 0; y < warps[pyramid_levels-1].rows(); y++) {
        for(int x = 0; x < warps[pyramid_levels-1].cols(); x++) {
          warps[pyramid_levels-1](x, y).set_identity();
        }
      }

#ifdef USE_GRAPHICS
      vw_initialize_graphics(0, NULL);
      if(debug_level >= 0) {
        for(int i = 0; i < pyramid_levels; i++) {
          vw_show_image(window, left_pyramid[i]);
          usleep((int)(.2*1000*1000));
        }
      }
#endif

      // go up the pyramid; first run refinement, then upsample result for the next level
      for(int i = pyramid_levels-1; i >=0; i--) {
        vw_out() << "processing pyramid level "
                  << i << " of " << pyramid_levels-1 << std::endl;

        if(debug_level >= 0) {
          std::stringstream stream;
          stream << "pyramid_level_" << i << ".tif";
          write_image(stream.str(), disparity_map_pyramid[i]);
        }

        ImageView<ImagePixelT> process_left_image = left_pyramid[i];
        ImageView<ImagePixelT> process_right_image = right_pyramid[i];

        if(i > 0) { // in this case take refine the upsampled disparity map from the previous level,
          // and upsample for the next level
          m_subpixel_refine(edge_extend(process_left_image, ZeroEdgeExtension()), edge_extend(process_right_image, ZeroEdgeExtension()),
                            disparity_map_pyramid[i], disparity_map_pyramid[i], warps[i],
                            regions_of_interest[i], false, debug_level == i);

          // upsample the warps and the refined map for the next level of processing
          int up_width = left_pyramid[i-1].cols();
          int up_height = left_pyramid[i-1].rows();
          warps[i-1] = copy(resize(warps[i], up_width , up_height, ConstantEdgeExtension(), NearestPixelInterpolation())); //upsample affine transforms
          disparity_map_pyramid[i-1] = copy(detail::upsample_disp_map_by_two(disparity_map_pyramid[i], up_width, up_height));
        }
        else { // here there is no next level so we refine directly to the output patch
          m_subpixel_refine(edge_extend(process_left_image, ZeroEdgeExtension()), edge_extend(process_right_image, ZeroEdgeExtension()),
                            disparity_map_pyramid[i], disparity_map_patch_out, warps[i],
                            regions_of_interest[i], true, debug_level == i);
        }
      }

#ifdef USE_GRAPHICS
      if(debug_level >= 0) {
        vw_show_image(window, .5 + select_plane(channels_to_planes(disparity_map_patch_out)/6., 0));
        usleep(10*1000*1000);
      }
#endif

      // Undo the above adjustment
      for (int v = 0; v < disparity_map_patch_out.rows(); ++v) {
        for (int u = 0; u < disparity_map_patch_out.cols(); ++u) {
          if (disparity_map_patch_out(u,v).valid())  {
            disparity_map_patch_out(u,v).child().x() += search_range.min().x();
            disparity_map_patch_out(u,v).child().y() += search_range.min().y();
          }
        }
      }

#ifdef USE_GRAPHICS
      if(debug_level >= 0 ) {
        vw_destroy_window(window);
      }
#endif

      return crop(disparity_map_patch_out, BBox2i(m_kernel_size[0]-bbox.min().x(),
                                                  m_kernel_size[1]-bbox.min().y(),
                                                  m_left_image.cols(),
                                                  m_left_image.rows()));
    }
Ejemplo n.º 8
0
/* compute the saliency of a given image */
image_t* saliency(const color_image_t *im, float sigma_image, float sigma_matrix ){
    int width = im->width, height = im->height, filter_size;
    
    // smooth image
    color_image_t *sim = color_image_new(width, height);
    float *presmooth_filter = gaussian_filter(sigma_image, &filter_size);
    convolution_t *presmoothing = convolution_new(filter_size, presmooth_filter, 1);
    color_image_convolve_hv(sim, im, presmoothing, presmoothing);
    convolution_delete(presmoothing);
    free(presmooth_filter);
  
    // compute derivatives
    float deriv_filter[2] = {0.0f, -0.5f};
    convolution_t *deriv = convolution_new(1, deriv_filter, 0);
    color_image_t *imx = color_image_new(width, height), *imy = color_image_new(width, height);
    color_image_convolve_hv(imx, sim, deriv, NULL);
    color_image_convolve_hv(imy, sim, NULL, deriv);
    convolution_delete(deriv);
    
    // compute autocorrelation matrix
    image_t *imxx = image_new(width, height), *imxy = image_new(width, height), *imyy = image_new(width, height);
    v4sf *imx1p = (v4sf*) imx->c1, *imx2p = (v4sf*) imx->c2, *imx3p = (v4sf*) imx->c3, *imy1p = (v4sf*) imy->c1, *imy2p = (v4sf*) imy->c2, *imy3p = (v4sf*) imy->c3, 
        *imxxp = (v4sf*) imxx->data, *imxyp = (v4sf*) imxy->data, *imyyp = (v4sf*) imyy->data;
    int i;
    for(i = 0 ; i<height*im->stride/4 ; i++){
        *imxxp = (*imx1p)*(*imx1p) + (*imx2p)*(*imx2p) + (*imx3p)*(*imx3p);
        *imxyp = (*imx1p)*(*imy1p) + (*imx2p)*(*imy2p) + (*imx3p)*(*imy3p);
        *imyyp = (*imy1p)*(*imy1p) + (*imy2p)*(*imy2p) + (*imy3p)*(*imy3p);
        imxxp+=1; imxyp+=1; imyyp+=1;
        imx1p+=1; imx2p+=1; imx3p+=1;
        imy1p+=1; imy2p+=1; imy3p+=1;
    }
    
    // integrate autocorrelation matrix
    float *smooth_filter = gaussian_filter(sigma_matrix, &filter_size);
    convolution_t *smoothing = convolution_new(filter_size, smooth_filter, 1);
    image_t *tmp = image_new(width, height);
    convolve_horiz(tmp, imxx, smoothing);
    convolve_vert(imxx, tmp, smoothing);
    convolve_horiz(tmp, imxy, smoothing);
    convolve_vert(imxy, tmp, smoothing);    
    convolve_horiz(tmp, imyy, smoothing);
    convolve_vert(imyy, tmp, smoothing);    
    convolution_delete(smoothing);
    free(smooth_filter);
    
    // compute smallest eigenvalue
    v4sf vzeros = {0.0f,0.0f,0.0f,0.0f};
    v4sf vhalf = {0.5f,0.5f,0.5f,0.5f};
    v4sf *tmpp = (v4sf*) tmp->data;
    imxxp = (v4sf*) imxx->data; imxyp = (v4sf*) imxy->data; imyyp = (v4sf*) imyy->data;
    for(i = 0 ; i<height*im->stride/4 ; i++){
        (*tmpp) = vhalf*( (*imxxp)+(*imyyp) ) ;
        (*tmpp) = __builtin_ia32_sqrtps(__builtin_ia32_maxps(vzeros, (*tmpp) - __builtin_ia32_sqrtps(__builtin_ia32_maxps(vzeros, (*tmpp)*(*tmpp) + (*imxyp)*(*imxyp) - (*imxxp)*(*imyyp) ) )));
        tmpp+=1; imxyp+=1; imxxp+=1; imyyp+=1;
    }
    
    image_delete(imxx); image_delete(imxy); image_delete(imyy);
    color_image_delete(imx); color_image_delete(imy);
    color_image_delete(sim);
    
    return tmp;
}