Esempio n. 1
0
/* Convolve image with the 1-D kernel vector along image rows.  This
   is designed to be as efficient as possible.  Pixels outside the
   image are set to the value of the closest image pixel.
*/
static void ConvHorizontal(LWImage<flnum>& image, flnum *kernel, int ksize)
{
    flnum buffer[8000];
    
    const int rows = image.h;
    const int cols = image.w;
    const int halfsize = ksize / 2;
    assert(cols + ksize < 8000); /*TANG: this will give a limit of image size*/

    for(int comp = 0; comp < image.comps; comp++) {
        const int deltaComp = comp*image.stepComp();
        for (int r = 0; r < rows; r++) {
            /* Copy the row into buffer with pixels at ends replicated for
            half the mask size.  This avoids need to check for ends
            within inner loop. */
            for (int i = 0; i < halfsize; i++)
                buffer[i] = image.pixel(0,r)[deltaComp];
            for (int i = 0; i < cols; i++)
                buffer[halfsize + i] = image.pixel(i,r)[deltaComp]; 
            for (int i = 0; i < halfsize; i++)
                buffer[halfsize + cols + i] = image.pixel(cols-1,r)[deltaComp];

            ConvBufferFast(buffer, kernel, cols, ksize);
            for (int c = 0; c < cols; c++)
                image.pixel(c,r)[deltaComp] = buffer[c];
        }
    }
}
Esempio n. 2
0
/* Same as ConvHorizontal, but apply to vertical columns of image.
*/
static void ConvVertical(LWImage<flnum>& image, flnum *kernel, int ksize)
{
    flnum buffer[8000];

    const int rows = image.h;
    const int cols = image.w;
    const int halfsize = ksize / 2;
    assert(rows + ksize < 8000);  /*TANG: this will give a limit of image size*/

    for(int comp = 0; comp < image.comps; comp++) {
        const int deltaComp = comp*image.stepComp();
        for (int c = 0; c < cols; c++) {
            for (int i = 0; i < halfsize; i++)
                buffer[i] = image.pixel(c,0)[deltaComp];
            for (int i = 0; i < rows; i++)
                buffer[halfsize + i] = image.pixel(c,i)[deltaComp];
            for (int i = 0; i < halfsize; i++)
                buffer[halfsize + rows + i] = image.pixel(c,rows-1)[deltaComp];

            ConvBufferFast(buffer, kernel, rows, ksize);
            for (int r = 0; r < rows; r++)
                image.pixel(c,r)[deltaComp] = buffer[r];
        }
    }
}
Esempio n. 3
0
/// blur[par.Scales+1] is not used in order to look for extrema
/// while these could be computed using avalaible blur and dogs
void FindMaxMin(const flimage* dogs,  const flimage& blur, int s,
                float octSize, keypointslist& keys,siftPar &par)
{

	int width = dogs[0].w, height = dogs[0].h;

	/* Create an image map in which locations that have a keypoint are
	marked with value 1.0, to prevent two keypoints being located at
	same position.  This may seem an inefficient data structure, but
	does not add significant overhead.
	*/
	LWImage<bool> map  = alloc_image<bool>(width,height);
	flimage grad = alloc_image<float>(width,height,2);
    grad.planar = false; // Contiguous norm and dir
    for(int i=map.sizeBuffer()-1; i>=0; i--)
        map.data[i]=false;
    for(int i=grad.sizeBuffer()-1; i>=0; i--)
        grad.data[i]=0.0f;
	
    /* For each intermediate image, compute gradient and orientation
    images to be used for keypoint description.  */
    compute_gradient_orientation(blur.data, grad.data, blur.w, blur.h);
	
    /* Only find peaks at least par.BorderDist samples from image border, as
    peaks centered close to the border will lack stability. */
    assert(par.BorderDist >= 2);
    float val;
    int partialcounter = 0;
    for (int r = par.BorderDist; r < height - par.BorderDist; r++) 
        for (int c = par.BorderDist; c < width - par.BorderDist; c++) {
            /* Pixel value at (c,r) position. */
            val = *dogs[1].pixel(c,r);	

            /* DOG magnitude must be above 0.8 * par.PeakThresh threshold
            (precise threshold check will be done once peak
            interpolation is performed).  Then check whether this
            point is a peak in 3x3 region at each level, and is not
            on an elongated edge.
            */
            if (fabs(val) > 0.8 * par.PeakThresh) {
                if(LocalMaxMin(val, dogs[0], r, c) &&
                   LocalMaxMin(val, dogs[1], r, c) &&
                   LocalMaxMin(val, dogs[2], r, c) &&
                   NotOnEdge(dogs[1], r, c, octSize,par)) {
                    partialcounter++;
                    if (DEBUG) printf("%d:  (%d,%d,%d)  val: %f\n",partialcounter, s,r,c,val);
                    InterpKeyPoint(dogs, s, r, c, grad,
                                   map, octSize, keys, 5,par);	
                }
            }
		}
    free(map.data);
    free(grad.data);
}
Esempio n. 4
0
/* Create a keypoint at a peak near scale space location (s,r,c), where
   s is scale (index of DOGs image), and (r,c) is (row, col) location.
   Add to the list of keys with any new keys added.
*/
void InterpKeyPoint(
    const flimage* dogs, int s, int r, int c,
	const flimage& grad, LWImage<bool>& map,
	float octSize, keypointslist& keys, int movesRemain,siftPar &par)
{
	
	/* Fit quadratic to determine offset and peak value. */
  std::vector<float> offset(3);
	float peakval = FitQuadratic(offset, dogs, r, c);
	if (DEBUG) printf("peakval: %f, of[0]: %f  of[1]: %f  of[2]: %f\n", peakval, offset[0], offset[1], offset[2]);

	/* Move to an adjacent (row,col) location if quadratic interpolation
	   is larger than 0.6 units in some direction (we use 0.6 instead of
	   0.5 to avoid jumping back and forth near boundary).  We do not
	   perform move to adjacent scales, as it is seldom useful and we
	   do not have easy access to adjacent scale structures.  The
	   movesRemain counter allows only a fixed number of moves to
	   prevent possibility of infinite loops.
	*/
	int newr = r, newc = c;
	if (offset[1] > 0.6 && r < dogs[0].h - 3)
		newr++;
	else if (offset[1] < -0.6 && r > 3)
		newr--;

	if (offset[2] > 0.6 && c < dogs[0].w - 3)
		newc++;
	else if (offset[2] < -0.6 && c > 3)
		newc--;

	if (movesRemain > 0  &&  (newr != r || newc != c)) {
		InterpKeyPoint(dogs, s, newr, newc, grad, map,
                       octSize, keys,movesRemain - 1,par);
		return;
	}

	/* Do not create a keypoint if interpolation still remains far
	   outside expected limits, or if magnitude of peak value is below
	   threshold (i.e., contrast is too low). */
	if (fabs(offset[0]) > 1.5 || fabs(offset[1]) > 1.5 ||
		fabs(offset[2]) > 1.5 || fabs(peakval) < par.PeakThresh)		
		{
			if (DEBUG) printf("Point not well localized by FitQuadratic\n"); 	
			par.noncorrectlylocalized++;
			return;
		}
	
	/* Check that no keypoint has been created at this location (to avoid
	   duplicates).  Otherwise, mark this map location.
	*/
	if (*map.pixel(c,r)) return;
	*map.pixel(c,r) = true;

	/* The scale relative to this octave is given by octScale.  The scale
	   units are in terms of sigma for the smallest of the Gaussians in the
	   DOG used to identify that scale.
	*/
	float octScale = par.InitSigma * pow(2.0f, (s + offset[0]) / (float) par.Scales);

	/// always use histogram of orientations
	//if (UseHistogramOri)
    AssignOriHist(grad, octSize, octScale,
                  r + offset[1], c + offset[2], keys, par);
	//else
	//	AssignOriAvg(
	//		grad, ori, octSize, octScale,
	//		r + offset[1], c + offset[2], keys);
}
Esempio n. 5
0
/// Apply geometric transform to image.
///
/// The transformation \a map is applied to the image \a in and the result
/// stored in \a im. If \a adjustSize is \c true, \a im will be sized so that
/// it contains all the transformed rectangle, otherwise it stays at original
/// size.
///
/// The returned pair of integers is the offset of the returned image \a im
/// with respect to original image \a in. If \a adjustSize is \c false, this is
/// (0,0), otherwise the location of upper-left corner of \a im in pixel
/// coordinates of \a in.
///
/// Interpolation is done by spline. Anti-aliasing filter is optional.
///
/// \a vOut is the background value to put at pixels outside image.
std::pair<int,int> map_image(LWImage<float> in,
                             libNumerics::Homography map,
                             LWImage<float>& im,
                             int order, bool adjustSize,
                             bool antiAlias, float vOut)
{
    int w = in.w, h = in.h;
    float zoomOut = antiAlias?
        static_cast<float>( minZoomOut(map.mat(), w, h) ): 1.0f;
    const libNumerics::Homography oriMap(map);
    const int oriW=w, oriH=h;
    std::pair<int,int> offset(0,0);
    if(adjustSize) {
        offset = boundingBox(map, w, h);
        free(im.data);
        im = alloc_image<float>(w, h, in.comps);
    }
    if(zoomOut < 1.0f) {
        float zoomIn = 1.0f / zoomOut;
        // GF: added some extra space
        int wZoom=(int)std::ceil(w*zoomIn*1.5), hZoom=(int)std::ceil(h*zoomIn*1.5);
        LWImage<float> imZoom = alloc_image<float>(wZoom,hZoom,in.comps);
        libNumerics::matrix<double> mapZ(3,3);
        mapZ = 0.0;
        mapZ(0,0) = zoomIn;
        mapZ(1,1) = zoomIn;
        mapZ(2,2) = 1.0;
        map.mat() = mapZ*map.mat();
        map_image(in, map, imZoom, order, false, false, vOut);
        float sigma = 0.8f*sqrt(zoomIn*zoomIn-1.0f);
        gauss_convol(imZoom, sigma);
        map.mat() = 0.0;
        map.mat()(0,0)=zoomOut;
        map.mat()(1,1)=zoomOut;
        map.mat()(2,2)=1.0;
        in = imZoom;
    }
    LWImage<float> tmp = alloc_image(in);
    if( prepare_spline(tmp,order) ) {
        libNumerics::Homography inv = map.inverse();
        const int stepComp = im.stepComp();
        float* out = new float[im.comps];
        float* pixOut = im.data;
        for(int i = 0; i < im.h; i++)
            for(int j = 0; j < im.w; j++) {
                double x=j+offset.first, y=i+offset.second;
                inv(x,y);
                for(int k=0; k < im.comps; k++)
                    out[k] = vOut;
                interpolate_spline(tmp, order,
                                   static_cast<float>(x+.5),
								   static_cast<float>(y+.5), out);
                for(int k=0; k < im.comps; k++)
                    pixOut[k*stepComp] = out[k];
                pixOut += im.step();
            }
        delete [] out;
    }
    free(tmp.data);
    if(zoomOut < 1.0f) {
        free(in.data); // Was allocated above
        if(! is_number(vOut)) { // Put back mask
            libNumerics::Homography inv = oriMap.inverse();
            const int stepComp = im.stepComp();
            float* pixOut = im.data;
            for(int i = 0; i < im.h; i++)
                for(int j = 0; j < im.w; j++) {
                    double x=j+offset.first, y=i+offset.second;
                    inv(x,y);
                    if(x<0 || x>=oriW || y<0 || y>=oriH)
                        for(int k=0; k < im.comps; k++)
                            pixOut[k*stepComp] = NaN;
                    pixOut += im.step();
                }
        }
    }
    return offset;
}