Beispiel #1
0
/* Convolve image with the 1-D kernel vector along image rows.  This
   is designed to be as efficient as possible.  Pixels outside the
   image are set to the value of the closest image pixel.
*/
static void ConvHorizontal(LWImage<flnum>& image, flnum *kernel, int ksize)
{
    flnum buffer[8000];
    
    const int rows = image.h;
    const int cols = image.w;
    const int halfsize = ksize / 2;
    assert(cols + ksize < 8000); /*TANG: this will give a limit of image size*/

    for(int comp = 0; comp < image.comps; comp++) {
        const int deltaComp = comp*image.stepComp();
        for (int r = 0; r < rows; r++) {
            /* Copy the row into buffer with pixels at ends replicated for
            half the mask size.  This avoids need to check for ends
            within inner loop. */
            for (int i = 0; i < halfsize; i++)
                buffer[i] = image.pixel(0,r)[deltaComp];
            for (int i = 0; i < cols; i++)
                buffer[halfsize + i] = image.pixel(i,r)[deltaComp]; 
            for (int i = 0; i < halfsize; i++)
                buffer[halfsize + cols + i] = image.pixel(cols-1,r)[deltaComp];

            ConvBufferFast(buffer, kernel, cols, ksize);
            for (int c = 0; c < cols; c++)
                image.pixel(c,r)[deltaComp] = buffer[c];
        }
    }
}
Beispiel #2
0
/* Same as ConvHorizontal, but apply to vertical columns of image.
*/
static void ConvVertical(LWImage<flnum>& image, flnum *kernel, int ksize)
{
    flnum buffer[8000];

    const int rows = image.h;
    const int cols = image.w;
    const int halfsize = ksize / 2;
    assert(rows + ksize < 8000);  /*TANG: this will give a limit of image size*/

    for(int comp = 0; comp < image.comps; comp++) {
        const int deltaComp = comp*image.stepComp();
        for (int c = 0; c < cols; c++) {
            for (int i = 0; i < halfsize; i++)
                buffer[i] = image.pixel(c,0)[deltaComp];
            for (int i = 0; i < rows; i++)
                buffer[halfsize + i] = image.pixel(c,i)[deltaComp];
            for (int i = 0; i < halfsize; i++)
                buffer[halfsize + rows + i] = image.pixel(c,rows-1)[deltaComp];

            ConvBufferFast(buffer, kernel, rows, ksize);
            for (int r = 0; r < rows; r++)
                image.pixel(c,r)[deltaComp] = buffer[r];
        }
    }
}
Beispiel #3
0
/// Apply geometric transform to image.
///
/// The transformation \a map is applied to the image \a in and the result
/// stored in \a im. If \a adjustSize is \c true, \a im will be sized so that
/// it contains all the transformed rectangle, otherwise it stays at original
/// size.
///
/// The returned pair of integers is the offset of the returned image \a im
/// with respect to original image \a in. If \a adjustSize is \c false, this is
/// (0,0), otherwise the location of upper-left corner of \a im in pixel
/// coordinates of \a in.
///
/// Interpolation is done by spline. Anti-aliasing filter is optional.
///
/// \a vOut is the background value to put at pixels outside image.
std::pair<int,int> map_image(LWImage<float> in,
                             libNumerics::Homography map,
                             LWImage<float>& im,
                             int order, bool adjustSize,
                             bool antiAlias, float vOut)
{
    int w = in.w, h = in.h;
    float zoomOut = antiAlias?
        static_cast<float>( minZoomOut(map.mat(), w, h) ): 1.0f;
    const libNumerics::Homography oriMap(map);
    const int oriW=w, oriH=h;
    std::pair<int,int> offset(0,0);
    if(adjustSize) {
        offset = boundingBox(map, w, h);
        free(im.data);
        im = alloc_image<float>(w, h, in.comps);
    }
    if(zoomOut < 1.0f) {
        float zoomIn = 1.0f / zoomOut;
        // GF: added some extra space
        int wZoom=(int)std::ceil(w*zoomIn*1.5), hZoom=(int)std::ceil(h*zoomIn*1.5);
        LWImage<float> imZoom = alloc_image<float>(wZoom,hZoom,in.comps);
        libNumerics::matrix<double> mapZ(3,3);
        mapZ = 0.0;
        mapZ(0,0) = zoomIn;
        mapZ(1,1) = zoomIn;
        mapZ(2,2) = 1.0;
        map.mat() = mapZ*map.mat();
        map_image(in, map, imZoom, order, false, false, vOut);
        float sigma = 0.8f*sqrt(zoomIn*zoomIn-1.0f);
        gauss_convol(imZoom, sigma);
        map.mat() = 0.0;
        map.mat()(0,0)=zoomOut;
        map.mat()(1,1)=zoomOut;
        map.mat()(2,2)=1.0;
        in = imZoom;
    }
    LWImage<float> tmp = alloc_image(in);
    if( prepare_spline(tmp,order) ) {
        libNumerics::Homography inv = map.inverse();
        const int stepComp = im.stepComp();
        float* out = new float[im.comps];
        float* pixOut = im.data;
        for(int i = 0; i < im.h; i++)
            for(int j = 0; j < im.w; j++) {
                double x=j+offset.first, y=i+offset.second;
                inv(x,y);
                for(int k=0; k < im.comps; k++)
                    out[k] = vOut;
                interpolate_spline(tmp, order,
                                   static_cast<float>(x+.5),
								   static_cast<float>(y+.5), out);
                for(int k=0; k < im.comps; k++)
                    pixOut[k*stepComp] = out[k];
                pixOut += im.step();
            }
        delete [] out;
    }
    free(tmp.data);
    if(zoomOut < 1.0f) {
        free(in.data); // Was allocated above
        if(! is_number(vOut)) { // Put back mask
            libNumerics::Homography inv = oriMap.inverse();
            const int stepComp = im.stepComp();
            float* pixOut = im.data;
            for(int i = 0; i < im.h; i++)
                for(int j = 0; j < im.w; j++) {
                    double x=j+offset.first, y=i+offset.second;
                    inv(x,y);
                    if(x<0 || x>=oriW || y<0 || y>=oriH)
                        for(int k=0; k < im.comps; k++)
                            pixOut[k*stepComp] = NaN;
                    pixOut += im.step();
                }
        }
    }
    return offset;
}