Esempio n. 1
0
void sqrt64f(const double* src, double* dst, int len)
{
    CV_INSTRUMENT_REGION();

    int i = 0;

#if CV_SIMD_64F
    const int VECSZ = v_float64::nlanes;
    for( ; i < len; i += VECSZ*2 )
    {
        if( i + VECSZ*2 > len )
        {
            if( i == 0 || src == dst )
                break;
            i = len - VECSZ*2;
        }
        v_float64 t0 = vx_load(src + i), t1 = vx_load(src + i + VECSZ);
        t0 = v_sqrt(t0);
        t1 = v_sqrt(t1);
        v_store(dst + i, t0); v_store(dst + i + VECSZ, t1);
    }
    vx_cleanup();
#endif

    for( ; i < len; i++ )
        dst[i] = std::sqrt(src[i]);
}
Esempio n. 2
0
void invSqrt32f(const float* src, float* dst, int len)
{
    CV_INSTRUMENT_REGION();

    int i = 0;

#if CV_SIMD
    const int VECSZ = v_float32::nlanes;
    for( ; i < len; i += VECSZ*2 )
    {
        if( i + VECSZ*2 > len )
        {
            if( i == 0 || src == dst )
                break;
            i = len - VECSZ*2;
        }
        v_float32 t0 = vx_load(src + i), t1 = vx_load(src + i + VECSZ);
        t0 = v_invsqrt(t0);
        t1 = v_invsqrt(t1);
        v_store(dst + i, t0); v_store(dst + i + VECSZ, t1);
    }
    vx_cleanup();
#endif

    for( ; i < len; i++ )
        dst[i] = 1/std::sqrt(src[i]);
}
Esempio n. 3
0
void magnitude64f(const double* x, const double* y, double* mag, int len)
{
    CV_INSTRUMENT_REGION();

    int i = 0;

#if CV_SIMD_64F
    const int VECSZ = v_float64::nlanes;
    for( ; i < len; i += VECSZ*2 )
    {
        if( i + VECSZ*2 > len )
        {
            if( i == 0 || mag == x || mag == y )
                break;
            i = len - VECSZ*2;
        }
        v_float64 x0 = vx_load(x + i), x1 = vx_load(x + i + VECSZ);
        v_float64 y0 = vx_load(y + i), y1 = vx_load(y + i + VECSZ);
        x0 = v_sqrt(v_muladd(x0, x0, y0*y0));
        x1 = v_sqrt(v_muladd(x1, x1, y1*y1));
        v_store(mag + i, x0);
        v_store(mag + i + VECSZ, x1);
    }
    vx_cleanup();
#endif

    for( ; i < len; i++ )
    {
        double x0 = x[i], y0 = y[i];
        mag[i] = std::sqrt(x0*x0 + y0*y0);
    }
}
Esempio n. 4
0
void log64f( const double *x, double *y, int n )
{
    CV_INSTRUMENT_REGION();

    const double* const logTab = cv::details::getLogTab64f();

    const int64 LOGTAB_MASK2_64F = ((int64)1 << (52 - LOGTAB_SCALE)) - 1;
    const double
    A7 = 1.0,
    A6 = -0.5,
    A5 = 0.333333333333333314829616256247390992939472198486328125,
    A4 = -0.25,
    A3 = 0.2,
    A2 = -0.1666666666666666574148081281236954964697360992431640625,
    A1 = 0.1428571428571428769682682968777953647077083587646484375,
    A0 = -0.125;

    int i = 0;

#if CV_SIMD_64F
    const int VECSZ = v_float64::nlanes;
    const v_float64 vln2 = vx_setall_f64(ln_2);

    const v_float64
        vA0 = vx_setall_f64(A0), vA1 = vx_setall_f64(A1),
        vA2 = vx_setall_f64(A2), vA3 = vx_setall_f64(A3),
        vA4 = vx_setall_f64(A4), vA5 = vx_setall_f64(A5),
        vA6 = vx_setall_f64(A6), vA7 = vx_setall_f64(A7);

    for( ; i < n; i += VECSZ )
    {
        if( i + VECSZ > n )
        {
            if( i == 0 || x == y )
                break;
            i = n - VECSZ;
        }

        v_int64 h0 = vx_load((const int64*)x + i);
        v_int32 yi0 = v_pack(v_shr<52>(h0), vx_setzero_s64());
        yi0 = (yi0 & vx_setall_s32(0x7ff)) - vx_setall_s32(1023);

        v_int64 xi0 = (h0 & vx_setall_s64(LOGTAB_MASK2_64F)) | vx_setall_s64((int64)1023 << 52);
        h0 = v_shr<52 - LOGTAB_SCALE - 1>(h0);
        v_int32 idx = v_pack(h0, h0) & vx_setall_s32(LOGTAB_MASK*2);

        v_float64 xf0, yf0;
        v_lut_deinterleave(logTab, idx, yf0, xf0);

        yf0 = v_fma(v_cvt_f64(yi0), vln2, yf0);
        v_float64 delta = v_cvt_f64(idx == vx_setall_s32(510))*vx_setall_f64(1./512);
        xf0 = v_fma(v_reinterpret_as_f64(xi0) - vx_setall_f64(1.), xf0, delta);

        v_float64 xq = xf0*xf0;
        v_float64 zf0 = v_fma(xq, vA0, vA2);
        v_float64 zf1 = v_fma(xq, vA1, vA3);
        zf0 = v_fma(zf0, xq, vA4);
        zf1 = v_fma(zf1, xq, vA5);
        zf0 = v_fma(zf0, xq, vA6);
        zf1 = v_fma(zf1, xq, vA7);
        zf1 = v_fma(zf1, xf0, yf0);
        zf0 = v_fma(zf0, xq, zf1);

        v_store(y + i, zf0);
    }
#endif

    for( ; i < n; i++ )
    {
        Cv64suf buf;
        int64 i0 = ((const int64*)x)[i];

        buf.i = (i0 & LOGTAB_MASK2_64F) | ((int64)1023 << 52);
        int idx = (int)(i0 >> (52 - LOGTAB_SCALE - 1)) & (LOGTAB_MASK*2);

        double y0 = (((int)(i0 >> 52) & 0x7ff) - 1023) * ln_2 + logTab[idx];
        double x0 = (buf.f - 1.)*logTab[idx + 1] + (idx == 510 ? -1./512 : 0.);

        double xq = x0*x0;
        y[i] = (((A0*xq + A2)*xq + A4)*xq + A6)*xq + (((A1*xq + A3)*xq + A5)*xq + A7)*x0 + y0;
    }
}
Esempio n. 5
0
void log32f( const float *_x, float *y, int n )
{
    CV_INSTRUMENT_REGION();

    const float* const logTab_f = cv::details::getLogTab32f();

    const int LOGTAB_MASK2_32F = (1 << (23 - LOGTAB_SCALE)) - 1;
    const float
    A0 = 0.3333333333333333333333333f,
    A1 = -0.5f,
    A2 = 1.f;

    int i = 0;
    const int* x = (const int*)_x;

#if CV_SIMD
    const int VECSZ = v_float32::nlanes;
    const v_float32 vln2 = vx_setall_f32((float)ln_2);
    const v_float32 v1 = vx_setall_f32(1.f);
    const v_float32 vshift = vx_setall_f32(-1.f/512);

    const v_float32 vA0 = vx_setall_f32(A0);
    const v_float32 vA1 = vx_setall_f32(A1);
    const v_float32 vA2 = vx_setall_f32(A2);

    for( ; i < n; i += VECSZ )
    {
        if( i + VECSZ > n )
        {
            if( i == 0 || _x == y )
                break;
            i = n - VECSZ;
        }

        v_int32 h0 = vx_load(x + i);
        v_int32 yi0 = (v_shr<23>(h0) & vx_setall_s32(255)) - vx_setall_s32(127);
        v_int32 xi0 = (h0 & vx_setall_s32(LOGTAB_MASK2_32F)) | vx_setall_s32(127 << 23);

        h0 = v_shr<23 - LOGTAB_SCALE - 1>(h0) & vx_setall_s32(LOGTAB_MASK*2);
        v_float32 yf0, xf0;

        v_lut_deinterleave(logTab_f, h0, yf0, xf0);

        yf0 = v_fma(v_cvt_f32(yi0), vln2, yf0);

        v_float32 delta = v_reinterpret_as_f32(h0 == vx_setall_s32(510)) & vshift;
        xf0 = v_fma((v_reinterpret_as_f32(xi0) - v1), xf0, delta);

        v_float32 zf0 = v_fma(xf0, vA0, vA1);
        zf0 = v_fma(zf0, xf0, vA2);
        zf0 = v_fma(zf0, xf0, yf0);

        v_store(y + i, zf0);
    }
    vx_cleanup();
#endif

    for( ; i < n; i++ )
    {
        Cv32suf buf;
        int i0 = x[i];

        buf.i = (i0 & LOGTAB_MASK2_32F) | (127 << 23);
        int idx = (i0 >> (23 - LOGTAB_SCALE - 1)) & (LOGTAB_MASK*2);

        float y0 = (((i0 >> 23) & 0xff) - 127) * (float)ln_2 + logTab_f[idx];
        float x0 = (buf.f - 1.f)*logTab_f[idx + 1] + (idx == 510 ? -1.f/512 : 0.f);
        y[i] = ((A0*x0 + A1)*x0 + A2)*x0 + y0;
    }
}
Esempio n. 6
0
void exp64f( const double *_x, double *y, int n )
{
    CV_INSTRUMENT_REGION();

    const double* const expTab = cv::details::getExpTab64f();

    const double
    A5 = .99999999999999999998285227504999 / EXPPOLY_32F_A0,
    A4 = .69314718055994546743029643825322 / EXPPOLY_32F_A0,
    A3 = .24022650695886477918181338054308 / EXPPOLY_32F_A0,
    A2 = .55504108793649567998466049042729e-1 / EXPPOLY_32F_A0,
    A1 = .96180973140732918010002372686186e-2 / EXPPOLY_32F_A0,
    A0 = .13369713757180123244806654839424e-2 / EXPPOLY_32F_A0;

    int i = 0;
    const Cv64suf* x = (const Cv64suf*)_x;
    double minval = (-exp_max_val/exp_prescale);
    double maxval = (exp_max_val/exp_prescale);

#if CV_SIMD_64F
    const int VECSZ = v_float64::nlanes;
    const v_float64 vprescale = vx_setall_f64(exp_prescale);
    const v_float64 vpostscale = vx_setall_f64(exp_postscale);
    const v_float64 vminval = vx_setall_f64(minval);
    const v_float64 vmaxval = vx_setall_f64(maxval);

    const v_float64 vA1 = vx_setall_f64(A1);
    const v_float64 vA2 = vx_setall_f64(A2);
    const v_float64 vA3 = vx_setall_f64(A3);
    const v_float64 vA4 = vx_setall_f64(A4);
    const v_float64 vA5 = vx_setall_f64(A5);

    const v_int32 vidxmask = vx_setall_s32(EXPTAB_MASK);
    bool y_aligned = (size_t)(void*)y % 32 == 0;

    for( ; i < n; i += VECSZ*2 )
    {
        if( i + VECSZ*2 > n )
        {
            if( i == 0 || _x == y )
                break;
            i = n - VECSZ*2;
            y_aligned = false;
        }

        v_float64 xf0 = vx_load(&x[i].f), xf1 = vx_load(&x[i + VECSZ].f);

        xf0 = v_min(v_max(xf0, vminval), vmaxval);
        xf1 = v_min(v_max(xf1, vminval), vmaxval);

        xf0 *= vprescale;
        xf1 *= vprescale;

        v_int32 xi0 = v_round(xf0);
        v_int32 xi1 = v_round(xf1);
        xf0 = (xf0 - v_cvt_f64(xi0))*vpostscale;
        xf1 = (xf1 - v_cvt_f64(xi1))*vpostscale;

        v_float64 yf0 = v_lut(expTab, xi0 & vidxmask);
        v_float64 yf1 = v_lut(expTab, xi1 & vidxmask);

        v_int32 v0 = vx_setzero_s32(), v1023 = vx_setall_s32(1023), v2047 = vx_setall_s32(2047);
        xi0 = v_min(v_max(v_shr<EXPTAB_SCALE>(xi0) + v1023, v0), v2047);
        xi1 = v_min(v_max(v_shr<EXPTAB_SCALE>(xi1) + v1023, v0), v2047);

        v_int64 xq0, xq1, dummy;
        v_expand(xi0, xq0, dummy);
        v_expand(xi1, xq1, dummy);

        yf0 *= v_reinterpret_as_f64(v_shl<52>(xq0));
        yf1 *= v_reinterpret_as_f64(v_shl<52>(xq1));

        v_float64 zf0 = xf0 + vA1;
        v_float64 zf1 = xf1 + vA1;

        zf0 = v_fma(zf0, xf0, vA2);
        zf1 = v_fma(zf1, xf1, vA2);

        zf0 = v_fma(zf0, xf0, vA3);
        zf1 = v_fma(zf1, xf1, vA3);

        zf0 = v_fma(zf0, xf0, vA4);
        zf1 = v_fma(zf1, xf1, vA4);

        zf0 = v_fma(zf0, xf0, vA5);
        zf1 = v_fma(zf1, xf1, vA5);

        zf0 *= yf0;
        zf1 *= yf1;

        if( y_aligned )
        {
            v_store_aligned(y + i, zf0);
            v_store_aligned(y + i + VECSZ, zf1);
        }
        else
        {
            v_store(y + i, zf0);
            v_store(y + i + VECSZ, zf1);
        }
    }
    vx_cleanup();
#endif

    for( ; i < n; i++ )
    {
        double x0 = x[i].f;
        x0 = std::min(std::max(x0, minval), maxval);
        x0 *= exp_prescale;
        Cv64suf buf;

        int xi = saturate_cast<int>(x0);
        x0 = (x0 - xi)*exp_postscale;

        int t = (xi >> EXPTAB_SCALE) + 1023;
        t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
        buf.i = (int64)t << 52;

        y[i] = buf.f * expTab[xi & EXPTAB_MASK] * (((((A0*x0 + A1)*x0 + A2)*x0 + A3)*x0 + A4)*x0 + A5);
    }
}
Esempio n. 7
0
void exp32f( const float *_x, float *y, int n )
{
    CV_INSTRUMENT_REGION();

    const float* const expTab_f = cv::details::getExpTab32f();

    const float
    A4 = (float)(1.000000000000002438532970795181890933776 / EXPPOLY_32F_A0),
    A3 = (float)(.6931471805521448196800669615864773144641 / EXPPOLY_32F_A0),
    A2 = (float)(.2402265109513301490103372422686535526573 / EXPPOLY_32F_A0),
    A1 = (float)(.5550339366753125211915322047004666939128e-1 / EXPPOLY_32F_A0);

    int i = 0;
    const Cv32suf* x = (const Cv32suf*)_x;
    float minval = (float)(-exp_max_val/exp_prescale);
    float maxval = (float)(exp_max_val/exp_prescale);
    float postscale = (float)exp_postscale;

#if CV_SIMD
    const int VECSZ = v_float32::nlanes;
    const v_float32 vprescale = vx_setall_f32((float)exp_prescale);
    const v_float32 vpostscale = vx_setall_f32((float)exp_postscale);
    const v_float32 vminval = vx_setall_f32(minval);
    const v_float32 vmaxval = vx_setall_f32(maxval);

    const v_float32 vA1 = vx_setall_f32((float)A1);
    const v_float32 vA2 = vx_setall_f32((float)A2);
    const v_float32 vA3 = vx_setall_f32((float)A3);
    const v_float32 vA4 = vx_setall_f32((float)A4);

    const v_int32 vidxmask = vx_setall_s32(EXPTAB_MASK);
    bool y_aligned = (size_t)(void*)y % 32 == 0;

    for( ; i < n; i += VECSZ*2 )
    {
        if( i + VECSZ*2 > n )
        {
            if( i == 0 || _x == y )
                break;
            i = n - VECSZ*2;
            y_aligned = false;
        }

        v_float32 xf0 = vx_load(&x[i].f), xf1 = vx_load(&x[i + VECSZ].f);

        xf0 = v_min(v_max(xf0, vminval), vmaxval);
        xf1 = v_min(v_max(xf1, vminval), vmaxval);

        xf0 *= vprescale;
        xf1 *= vprescale;

        v_int32 xi0 = v_round(xf0);
        v_int32 xi1 = v_round(xf1);
        xf0 = (xf0 - v_cvt_f32(xi0))*vpostscale;
        xf1 = (xf1 - v_cvt_f32(xi1))*vpostscale;

        v_float32 yf0 = v_lut(expTab_f, xi0 & vidxmask);
        v_float32 yf1 = v_lut(expTab_f, xi1 & vidxmask);

        v_int32 v0 = vx_setzero_s32(), v127 = vx_setall_s32(127), v255 = vx_setall_s32(255);
        xi0 = v_min(v_max(v_shr<EXPTAB_SCALE>(xi0) + v127, v0), v255);
        xi1 = v_min(v_max(v_shr<EXPTAB_SCALE>(xi1) + v127, v0), v255);

        yf0 *= v_reinterpret_as_f32(v_shl<23>(xi0));
        yf1 *= v_reinterpret_as_f32(v_shl<23>(xi1));

        v_float32 zf0 = xf0 + vA1;
        v_float32 zf1 = xf1 + vA1;

        zf0 = v_fma(zf0, xf0, vA2);
        zf1 = v_fma(zf1, xf1, vA2);

        zf0 = v_fma(zf0, xf0, vA3);
        zf1 = v_fma(zf1, xf1, vA3);

        zf0 = v_fma(zf0, xf0, vA4);
        zf1 = v_fma(zf1, xf1, vA4);

        zf0 *= yf0;
        zf1 *= yf1;

        if( y_aligned )
        {
            v_store_aligned(y + i, zf0);
            v_store_aligned(y + i + VECSZ, zf1);
        }
        else
        {
            v_store(y + i, zf0);
            v_store(y + i + VECSZ, zf1);
        }
    }
    vx_cleanup();
#endif

    for( ; i < n; i++ )
    {
        float x0 = x[i].f;
        x0 = std::min(std::max(x0, minval), maxval);
        x0 *= (float)exp_prescale;
        Cv32suf buf;

        int xi = saturate_cast<int>(x0);
        x0 = (x0 - xi)*postscale;

        int t = (xi >> EXPTAB_SCALE) + 127;
        t = !(t & ~255) ? t : t < 0 ? 0 : 255;
        buf.i = t << 23;

        y[i] = buf.f * expTab_f[xi & EXPTAB_MASK] * ((((x0 + A1)*x0 + A2)*x0 + A3)*x0 + A4);
    }
}
Esempio n. 8
0
 Data<R> & operator=(const R & r)
 {
     v_store(d, r);
     return *this;
 }
Esempio n. 9
0
    TheTest & test_loadstore()
    {
        AlignedData<R> data;
        AlignedData<R> out;

        // check if addresses are aligned and unaligned respectively
        EXPECT_EQ((size_t)0, (size_t)&data.a.d % 16);
        EXPECT_NE((size_t)0, (size_t)&data.u.d % 16);
        EXPECT_EQ((size_t)0, (size_t)&out.a.d % 16);
        EXPECT_NE((size_t)0, (size_t)&out.u.d % 16);

        // check some initialization methods
        R r1 = data.a;
        R r2 = v_load(data.u.d);
        R r3 = v_load_aligned(data.a.d);
        R r4(r2);
        EXPECT_EQ(data.a[0], r1.get0());
        EXPECT_EQ(data.u[0], r2.get0());
        EXPECT_EQ(data.a[0], r3.get0());
        EXPECT_EQ(data.u[0], r4.get0());

        R r_low = v_load_low((LaneType*)data.u.d);
        EXPECT_EQ(data.u[0], r_low.get0());
        v_store(out.u.d, r_low);
        for (int i = 0; i < R::nlanes/2; ++i)
        {
            EXPECT_EQ((LaneType)data.u[i], (LaneType)out.u[i]);
        }

        R r_low_align8byte = v_load_low((LaneType*)((char*)data.u.d + 8));
        EXPECT_EQ(data.u[R::nlanes/2], r_low_align8byte.get0());
        v_store(out.u.d, r_low_align8byte);
        for (int i = 0; i < R::nlanes/2; ++i)
        {
            EXPECT_EQ((LaneType)data.u[i + R::nlanes/2], (LaneType)out.u[i]);
        }

        // check some store methods
        out.u.clear();
        out.a.clear();
        v_store(out.u.d, r1);
        v_store_aligned(out.a.d, r2);
        EXPECT_EQ(data.a, out.a);
        EXPECT_EQ(data.u, out.u);

        // check more store methods
        Data<R> d, res(0);
        R r5 = d;
        v_store_high(res.mid(), r5);
        v_store_low(res.d, r5);
        EXPECT_EQ(d, res);

        // check halves load correctness
        res.clear();
        R r6 = v_load_halves(d.d, d.mid());
        v_store(res.d, r6);
        EXPECT_EQ(d, res);

        // zero, all
        Data<R> resZ = V_RegTrait128<LaneType>::zero();
        Data<R> resV = V_RegTrait128<LaneType>::all(8);
        for (int i = 0; i < R::nlanes; ++i)
        {
            EXPECT_EQ((LaneType)0, resZ[i]);
            EXPECT_EQ((LaneType)8, resV[i]);
        }

        // reinterpret_as
        v_uint8x16 vu8 = v_reinterpret_as_u8(r1); out.a.clear(); v_store((uchar*)out.a.d, vu8); EXPECT_EQ(data.a, out.a);
        v_int8x16 vs8 = v_reinterpret_as_s8(r1); out.a.clear(); v_store((schar*)out.a.d, vs8); EXPECT_EQ(data.a, out.a);
        v_uint16x8 vu16 = v_reinterpret_as_u16(r1); out.a.clear(); v_store((ushort*)out.a.d, vu16); EXPECT_EQ(data.a, out.a);
        v_int16x8 vs16 = v_reinterpret_as_s16(r1); out.a.clear(); v_store((short*)out.a.d, vs16); EXPECT_EQ(data.a, out.a);
        v_uint32x4 vu32 = v_reinterpret_as_u32(r1); out.a.clear(); v_store((unsigned*)out.a.d, vu32); EXPECT_EQ(data.a, out.a);
        v_int32x4 vs32 = v_reinterpret_as_s32(r1); out.a.clear(); v_store((int*)out.a.d, vs32); EXPECT_EQ(data.a, out.a);
        v_uint64x2 vu64 = v_reinterpret_as_u64(r1); out.a.clear(); v_store((uint64*)out.a.d, vu64); EXPECT_EQ(data.a, out.a);
        v_int64x2 vs64 = v_reinterpret_as_s64(r1); out.a.clear(); v_store((int64*)out.a.d, vs64); EXPECT_EQ(data.a, out.a);
        v_float32x4 vf32 = v_reinterpret_as_f32(r1); out.a.clear(); v_store((float*)out.a.d, vf32); EXPECT_EQ(data.a, out.a);
#if CV_SIMD128_64F
        v_float64x2 vf64 = v_reinterpret_as_f64(r1); out.a.clear(); v_store((double*)out.a.d, vf64); EXPECT_EQ(data.a, out.a);
#endif

        return *this;
    }
Esempio n. 10
0
void spatialGradient( InputArray _src, OutputArray _dx, OutputArray _dy,
                      int ksize, int borderType )
{
    CV_INSTRUMENT_REGION()

    // Prepare InputArray src
    Mat src = _src.getMat();
    CV_Assert( !src.empty() );
    CV_Assert( src.type() == CV_8UC1 );
    CV_Assert( borderType == BORDER_DEFAULT || borderType == BORDER_REPLICATE );

    // Prepare OutputArrays dx, dy
    _dx.create( src.size(), CV_16SC1 );
    _dy.create( src.size(), CV_16SC1 );
    Mat dx = _dx.getMat(),
        dy = _dy.getMat();

    // TODO: Allow for other kernel sizes
    CV_Assert(ksize == 3);

    // Get dimensions
    const int H = src.rows,
              W = src.cols;

    // Row, column indices
    int i = 0,
        j = 0;

    // Handle border types
    int i_top    = 0,     // Case for H == 1 && W == 1 && BORDER_REPLICATE
        i_bottom = H - 1,
        j_offl   = 0,     // j offset from 0th   pixel to reach -1st pixel
        j_offr   = 0;     // j offset from W-1th pixel to reach Wth  pixel

    if ( borderType == BORDER_DEFAULT ) // Equiv. to BORDER_REFLECT_101
    {
        if ( H > 1 )
        {
            i_top    = 1;
            i_bottom = H - 2;
        }
        if ( W > 1 )
        {
            j_offl = 1;
            j_offr = -1;
        }
    }

    // Pointer to row vectors
    uchar *p_src, *c_src, *n_src; // previous, current, next row
    short *c_dx,  *c_dy;

    int i_start = 0;
    int j_start = 0;
#if CV_SIMD128 && CV_SSE2
    if(hasSIMD128())
    {
        uchar *m_src;
        short *n_dx, *n_dy;

        // Characters in variable names have the following meanings:
        // u: unsigned char
        // s: signed int
        //
        // [row][column]
        // m: offset -1
        // n: offset  0
        // p: offset  1
        // Example: umn is offset -1 in row and offset 0 in column
        for ( i = 0; i < H - 1; i += 2 )
        {
            if   ( i == 0 ) p_src = src.ptr<uchar>(i_top);
            else            p_src = src.ptr<uchar>(i-1);

            c_src = src.ptr<uchar>(i);
            n_src = src.ptr<uchar>(i+1);

            if ( i == H - 2 ) m_src = src.ptr<uchar>(i_bottom);
            else              m_src = src.ptr<uchar>(i+2);

            c_dx = dx.ptr<short>(i);
            c_dy = dy.ptr<short>(i);
            n_dx = dx.ptr<short>(i+1);
            n_dy = dy.ptr<short>(i+1);

            v_uint8x16 v_select_m = v_uint8x16(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                               0, 0, 0, 0xFF);

            // Process rest of columns 16-column chunks at a time
            for ( j = 1; j < W - 16; j += 16 )
            {
                // Load top row for 3x3 Sobel filter
                v_uint8x16 v_um = v_load(&p_src[j-1]);
                v_uint8x16 v_up = v_load(&p_src[j+1]);
                // TODO: Replace _mm_slli_si128 with hal method
                v_uint8x16 v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)),
                                                       v_uint8x16(_mm_srli_si128(v_um.val, 1)));
                v_uint16x8 v_um1, v_um2, v_un1, v_un2, v_up1, v_up2;
                v_expand(v_um, v_um1, v_um2);
                v_expand(v_un, v_un1, v_un2);
                v_expand(v_up, v_up1, v_up2);
                v_int16x8 v_s1m1 = v_reinterpret_as_s16(v_um1);
                v_int16x8 v_s1m2 = v_reinterpret_as_s16(v_um2);
                v_int16x8 v_s1n1 = v_reinterpret_as_s16(v_un1);
                v_int16x8 v_s1n2 = v_reinterpret_as_s16(v_un2);
                v_int16x8 v_s1p1 = v_reinterpret_as_s16(v_up1);
                v_int16x8 v_s1p2 = v_reinterpret_as_s16(v_up2);

                // Load second row for 3x3 Sobel filter
                v_um = v_load(&c_src[j-1]);
                v_up = v_load(&c_src[j+1]);
                // TODO: Replace _mm_slli_si128 with hal method
                v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)),
                                            v_uint8x16(_mm_srli_si128(v_um.val, 1)));
                v_expand(v_um, v_um1, v_um2);
                v_expand(v_un, v_un1, v_un2);
                v_expand(v_up, v_up1, v_up2);
                v_int16x8 v_s2m1 = v_reinterpret_as_s16(v_um1);
                v_int16x8 v_s2m2 = v_reinterpret_as_s16(v_um2);
                v_int16x8 v_s2n1 = v_reinterpret_as_s16(v_un1);
                v_int16x8 v_s2n2 = v_reinterpret_as_s16(v_un2);
                v_int16x8 v_s2p1 = v_reinterpret_as_s16(v_up1);
                v_int16x8 v_s2p2 = v_reinterpret_as_s16(v_up2);

                // Load third row for 3x3 Sobel filter
                v_um = v_load(&n_src[j-1]);
                v_up = v_load(&n_src[j+1]);
                // TODO: Replace _mm_slli_si128 with hal method
                v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)),
                                            v_uint8x16(_mm_srli_si128(v_um.val, 1)));
                v_expand(v_um, v_um1, v_um2);
                v_expand(v_un, v_un1, v_un2);
                v_expand(v_up, v_up1, v_up2);
                v_int16x8 v_s3m1 = v_reinterpret_as_s16(v_um1);
                v_int16x8 v_s3m2 = v_reinterpret_as_s16(v_um2);
                v_int16x8 v_s3n1 = v_reinterpret_as_s16(v_un1);
                v_int16x8 v_s3n2 = v_reinterpret_as_s16(v_un2);
                v_int16x8 v_s3p1 = v_reinterpret_as_s16(v_up1);
                v_int16x8 v_s3p2 = v_reinterpret_as_s16(v_up2);

                // dx & dy for rows 1, 2, 3
                v_int16x8 v_sdx1, v_sdy1;
                spatialGradientKernel<v_int16x8>( v_sdx1, v_sdy1,
                                                  v_s1m1, v_s1n1, v_s1p1,
                                                  v_s2m1,         v_s2p1,
                                                  v_s3m1, v_s3n1, v_s3p1 );

                v_int16x8 v_sdx2, v_sdy2;
                spatialGradientKernel<v_int16x8>( v_sdx2, v_sdy2,
                                                  v_s1m2, v_s1n2, v_s1p2,
                                                  v_s2m2,         v_s2p2,
                                                  v_s3m2, v_s3n2, v_s3p2 );

                // Store
                v_store(&c_dx[j],   v_sdx1);
                v_store(&c_dx[j+8], v_sdx2);
                v_store(&c_dy[j],   v_sdy1);
                v_store(&c_dy[j+8], v_sdy2);

                // Load fourth row for 3x3 Sobel filter
                v_um = v_load(&m_src[j-1]);
                v_up = v_load(&m_src[j+1]);
                // TODO: Replace _mm_slli_si128 with hal method
                v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)),
                                            v_uint8x16(_mm_srli_si128(v_um.val, 1)));
                v_expand(v_um, v_um1, v_um2);
                v_expand(v_un, v_un1, v_un2);
                v_expand(v_up, v_up1, v_up2);
                v_int16x8 v_s4m1 = v_reinterpret_as_s16(v_um1);
                v_int16x8 v_s4m2 = v_reinterpret_as_s16(v_um2);
                v_int16x8 v_s4n1 = v_reinterpret_as_s16(v_un1);
                v_int16x8 v_s4n2 = v_reinterpret_as_s16(v_un2);
                v_int16x8 v_s4p1 = v_reinterpret_as_s16(v_up1);
                v_int16x8 v_s4p2 = v_reinterpret_as_s16(v_up2);

                // dx & dy for rows 2, 3, 4
                spatialGradientKernel<v_int16x8>( v_sdx1, v_sdy1,
                                                  v_s2m1, v_s2n1, v_s2p1,
                                                  v_s3m1,         v_s3p1,
                                                  v_s4m1, v_s4n1, v_s4p1 );

                spatialGradientKernel<v_int16x8>( v_sdx2, v_sdy2,
                                                  v_s2m2, v_s2n2, v_s2p2,
                                                  v_s3m2,         v_s3p2,
                                                  v_s4m2, v_s4n2, v_s4p2 );

                // Store
                v_store(&n_dx[j],   v_sdx1);
                v_store(&n_dx[j+8], v_sdx2);
                v_store(&n_dy[j],   v_sdy1);
                v_store(&n_dy[j+8], v_sdy2);
            }
        }
    }
    i_start = i;
    j_start = j;
#endif
    int j_p, j_n;
    uchar v00, v01, v02, v10, v11, v12, v20, v21, v22;
    for ( i = 0; i < H; i++ )
    {
        if   ( i == 0 ) p_src = src.ptr<uchar>(i_top);
        else            p_src = src.ptr<uchar>(i-1);

        c_src = src.ptr<uchar>(i);

        if ( i == H - 1 ) n_src = src.ptr<uchar>(i_bottom);
        else              n_src = src.ptr<uchar>(i+1);

        c_dx = dx.ptr<short>(i);
        c_dy = dy.ptr<short>(i);

        // Process left-most column
        j = 0;
        j_p = j + j_offl;
        j_n = 1;
        if ( j_n >= W ) j_n = j + j_offr;
        v00 = p_src[j_p]; v01 = p_src[j]; v02 = p_src[j_n];
        v10 = c_src[j_p]; v11 = c_src[j]; v12 = c_src[j_n];
        v20 = n_src[j_p]; v21 = n_src[j]; v22 = n_src[j_n];
        spatialGradientKernel<short>( c_dx[0], c_dy[0], v00, v01, v02, v10,
                                      v12, v20, v21, v22 );
        v00 = v01; v10 = v11; v20 = v21;
        v01 = v02; v11 = v12; v21 = v22;

        // Process middle columns
        j = i >= i_start ? 1 : j_start;
        j_p = j - 1;
        v00 = p_src[j_p]; v01 = p_src[j];
        v10 = c_src[j_p]; v11 = c_src[j];
        v20 = n_src[j_p]; v21 = n_src[j];

        for ( ; j < W - 1; j++ )
        {
            // Get values for next column
            j_n = j + 1; v02 = p_src[j_n]; v12 = c_src[j_n]; v22 = n_src[j_n];
            spatialGradientKernel<short>( c_dx[j], c_dy[j], v00, v01, v02, v10,
                                          v12, v20, v21, v22 );

            // Move values back one column for next iteration
            v00 = v01; v10 = v11; v20 = v21;
            v01 = v02; v11 = v12; v21 = v22;
        }

        // Process right-most column
        if ( j < W )
        {
            j_n = j + j_offr; v02 = p_src[j_n]; v12 = c_src[j_n]; v22 = n_src[j_n];
            spatialGradientKernel<short>( c_dx[j], c_dy[j], v00, v01, v02, v10,
                                          v12, v20, v21, v22 );
        }
    }

}