static inline void char_to_float_vectors(const unsigned char * sourcep,
			   float32x4_t *mp0, float32x4_t * mp1)
{
 uint8x8_t rawpixels; /* source pixels as {[YUYV]0 [YUYV]1}   */
 int16x8_t widerpixels; /*  rawpixels promoted to shorts per component */
 int16x4_t high16, low16;
 int32x4_t high32, low32;
 const  int16x8_t uvbias = {0, 128, 0, 128, 0, 128, 0, 128};
 
 rawpixels = vld1_u8(sourcep);
 widerpixels = vreinterpretq_s16_u16(vmovl_u8(rawpixels));

 /* subtract uvbias from widerpixels  */
 widerpixels = vsubq_s16(widerpixels, uvbias);

 /* now take widerpixels apart into (low16, high16) and   */
 /* then expand those into (low32, high32)    */
 low16 = vget_low_s16(widerpixels);
 high16 = vget_high_s16(widerpixels);
 high32 = vmovl_s16(high16);
 low32  = vmovl_s16(low16);

 /* now convert low32 and high32 into floats and store them in   */
 /*  *mp0,  *mp1 */

 *mp0 = vcvtq_f32_s32(low32);
 *mp1 = vcvtq_f32_s32(high32);
  
}
Exemple #2
0
void test_vcvtQf32_s32 (void)
{
  float32x4_t out_float32x4_t;
  int32x4_t arg0_int32x4_t;

  out_float32x4_t = vcvtq_f32_s32 (arg0_int32x4_t);
}
void dotProd_i16_neon(const float *dataf, const float *weightsf, float *vals, const int n, const int len, const float *istd) {
    const int16_t *data = (const int16_t *)dataf;
    const int16_t *weights = (const int16_t *)weightsf;
    weightsf += n * len / 2; // sizeof(float) / sizeof(int16_t)

    for (int i = 0; i < n; i += 4) {
        int32x4_t accum0 = { 0, 0, 0, 0 };
        int32x4_t accum1 = accum0;
        int32x4_t accum2 = accum0;
        int32x4_t accum3 = accum0;

        for (int j = 0; j < len; j += 8) {
            int16x4x2_t d0 = vld2_s16(data + j);

            int16x4x2_t w0 = vld2_s16(weights);
            int16x4x2_t w1 = vld2_s16(weights + 8);
            int16x4x2_t w2 = vld2_s16(weights + 16);
            int16x4x2_t w3 = vld2_s16(weights + 24);

            accum0 = vmlal_s16(accum0, d0.val[0], w0.val[0]);
            accum0 = vmlal_s16(accum0, d0.val[1], w0.val[1]);

            accum1 = vmlal_s16(accum1, d0.val[0], w1.val[0]);
            accum1 = vmlal_s16(accum1, d0.val[1], w1.val[1]);

            accum2 = vmlal_s16(accum2, d0.val[0], w2.val[0]);
            accum2 = vmlal_s16(accum2, d0.val[1], w2.val[1]);

            accum3 = vmlal_s16(accum3, d0.val[0], w3.val[0]);
            accum3 = vmlal_s16(accum3, d0.val[1], w3.val[1]);

            weights += 32;
        }

        int32x2_t sum0 = vpadd_s32(vget_low_s32(accum0), vget_high_s32(accum0));
        int32x2_t sum1 = vpadd_s32(vget_low_s32(accum1), vget_high_s32(accum1));
        int32x2_t sum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
        int32x2_t sum3 = vpadd_s32(vget_low_s32(accum3), vget_high_s32(accum3));
        sum0 = vpadd_s32(sum0, sum1);
        sum1 = vpadd_s32(sum2, sum3);
        int32x4_t sum = vcombine_s32(sum0, sum1);

        float32x4_t val = vcvtq_f32_s32(sum);
        val = vmulq_f32(val, vld1q_f32(weightsf + i*2));
        val = vmulq_n_f32(val, istd[0]);
        val = vaddq_f32(val, vld1q_f32(weightsf + i*2 + 4));
        vst1q_f32(vals + i, val);
    }
}
void computeNetwork0_i16_neon(const float *inputf, const float *weightsf, uint8_t *d) {
    const int16_t *input = (const int16_t *)inputf;
    const int16_t *weights = (const int16_t *)weightsf;

    int32x4_t accum0 = { 0, 0, 0, 0 };
    int32x4_t accum1 = accum0;
    int32x4_t accum2 = accum0;
    int32x4_t accum3 = accum0;

    for (int i = 0; i < 96/2; i += 8) {
        int16x4x2_t d0 = vld2_s16(input + i);

        int16x4x2_t w0 = vld2_s16(weights + i * 4);
        int16x4x2_t w1 = vld2_s16(weights + i * 4 + 8);
        int16x4x2_t w2 = vld2_s16(weights + i * 4 + 16);
        int16x4x2_t w3 = vld2_s16(weights + i * 4 + 24);

        accum0 = vmlal_s16(accum0, d0.val[0], w0.val[0]);
        accum0 = vmlal_s16(accum0, d0.val[1], w0.val[1]);

        accum1 = vmlal_s16(accum1, d0.val[0], w1.val[0]);
        accum1 = vmlal_s16(accum1, d0.val[1], w1.val[1]);

        accum2 = vmlal_s16(accum2, d0.val[0], w2.val[0]);
        accum2 = vmlal_s16(accum2, d0.val[1], w2.val[1]);

        accum3 = vmlal_s16(accum3, d0.val[0], w3.val[0]);
        accum3 = vmlal_s16(accum3, d0.val[1], w3.val[1]);
    }

    int32x2_t sum0 = vpadd_s32(vget_low_s32(accum0), vget_high_s32(accum0));
    int32x2_t sum1 = vpadd_s32(vget_low_s32(accum1), vget_high_s32(accum1));
    int32x2_t sum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
    int32x2_t sum3 = vpadd_s32(vget_low_s32(accum3), vget_high_s32(accum3));
    sum0 = vpadd_s32(sum0, sum1);
    sum1 = vpadd_s32(sum2, sum3);
    int32x4_t sum = vcombine_s32(sum0, sum1);

    float32x4_t m0 = vcvtq_f32_s32(sum);

    m0 = vmulq_f32(m0, vld1q_f32(weightsf + 384/4));
    m0 = vaddq_f32(m0, vld1q_f32(weightsf + 400/4));

    float32x4_t m1, m2, m3, m4, m5, m6, m7;

    m1 = m0;

    m0 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(m0), sign_bits_f_zero_l));
    m0 = vaddq_f32(m0, ones_f);
    m0 = vmulq_f32(reciprocal(m0), m1);

    m1 = vdupq_lane_f32(vget_low_f32(m0), 0);
    m2 = vdupq_lane_f32(vget_low_f32(m0), 1);
    m3 = vdupq_lane_f32(vget_high_f32(m0), 0);
    m4 = vdupq_lane_f32(vget_high_f32(m0), 1);

    m1 = vmulq_f32(m1, vld1q_f32(weightsf + 416/4));
    m2 = vmulq_f32(m2, vld1q_f32(weightsf + (416+16)/4));
    m3 = vmulq_f32(m3, vld1q_f32(weightsf + (416+32)/4));
    m4 = vmulq_f32(m4, vld1q_f32(weightsf + (416+48)/4));

    m1 = vaddq_f32(m1, m2);
    m3 = vaddq_f32(m3, m4);
    m1 = vaddq_f32(m1, m3);
    m1 = vaddq_f32(m1, vld1q_f32(weightsf + (416+64)/4));

    m7 = m1;
    m1 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(m1), sign_bits_f));
    m1 = vaddq_f32(m1, ones_f);
    m7 = vmulq_f32(reciprocal(m1), m7);

    m3 = m0;

    m0 = vdupq_lane_f32(vget_low_f32(m0), 0);
    m1 = vdupq_lane_f32(vget_low_f32(m3), 1);
    m2 = vdupq_lane_f32(vget_high_f32(m3), 0);
    m3 = vdupq_lane_f32(vget_high_f32(m3), 1);

    m0 = vmulq_f32(m0, vld1q_f32(weightsf + 496/4));
    m1 = vmulq_f32(m1, vld1q_f32(weightsf + (496+16)/4));
    m2 = vmulq_f32(m2, vld1q_f32(weightsf + (496+32)/4));
    m3 = vmulq_f32(m3, vld1q_f32(weightsf + (496+48)/4));

    m4 = vdupq_lane_f32(vget_low_f32(m7), 0);
    m5 = vdupq_lane_f32(vget_low_f32(m7), 1);
    m6 = vdupq_lane_f32(vget_high_f32(m7), 0);
    m7 = vdupq_lane_f32(vget_high_f32(m7), 1);

    m4 = vmulq_f32(m4, vld1q_f32(weightsf + (496+64)/4));
    m5 = vmulq_f32(m5, vld1q_f32(weightsf + (496+80)/4));
    m6 = vmulq_f32(m6, vld1q_f32(weightsf + (496+96)/4));
    m7 = vmulq_f32(m7, vld1q_f32(weightsf + (496+112)/4));

    m0 = vaddq_f32(m0, m1);
    m2 = vaddq_f32(m2, m3);
    m4 = vaddq_f32(m4, m5);
    m6 = vaddq_f32(m6, m7);

    m0 = vaddq_f32(m0, m2);
    m4 = vaddq_f32(m4, m6);
    m0 = vaddq_f32(m0, m4);

    m0 = vaddq_f32(m0, vld1q_f32(weightsf + (496+128)/4));

    float32x2_t maximum = vmax_f32(vget_low_f32(m0), vget_high_f32(m0));
    d[0] = (vget_lane_f32(maximum, 1) <= vget_lane_f32(maximum, 0));
}
void computeNetwork0new_neon(const float *dataf, const float *weightsf, uint8_t *d) {
    const int16_t *data = (const int16_t *)dataf;
    const int16_t *weights = (const int16_t *)weightsf;

    int32x4_t accum0 = { 0, 0, 0, 0 };
    int32x4_t accum1 = accum0;
    int32x4_t accum2 = accum0;
    int32x4_t accum3 = accum0;

    for (int i = 0; i < 128/2; i += 8) {
        int16x4x2_t d0 = vld2_s16(data + i);

        int16x4x2_t w0 = vld2_s16(weights + i * 4);
        int16x4x2_t w1 = vld2_s16(weights + i * 4 + 8);
        int16x4x2_t w2 = vld2_s16(weights + i * 4 + 16);
        int16x4x2_t w3 = vld2_s16(weights + i * 4 + 24);

        accum0 = vmlal_s16(accum0, d0.val[0], w0.val[0]);
        accum0 = vmlal_s16(accum0, d0.val[1], w0.val[1]);

        accum1 = vmlal_s16(accum1, d0.val[0], w1.val[0]);
        accum1 = vmlal_s16(accum1, d0.val[1], w1.val[1]);

        accum2 = vmlal_s16(accum2, d0.val[0], w2.val[0]);
        accum2 = vmlal_s16(accum2, d0.val[1], w2.val[1]);

        accum3 = vmlal_s16(accum3, d0.val[0], w3.val[0]);
        accum3 = vmlal_s16(accum3, d0.val[1], w3.val[1]);
    }

    int32x2_t sum0 = vpadd_s32(vget_low_s32(accum0), vget_high_s32(accum0));
    int32x2_t sum1 = vpadd_s32(vget_low_s32(accum1), vget_high_s32(accum1));
    int32x2_t sum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
    int32x2_t sum3 = vpadd_s32(vget_low_s32(accum3), vget_high_s32(accum3));
    sum0 = vpadd_s32(sum0, sum1);
    sum1 = vpadd_s32(sum2, sum3);
    int32x4_t sum = vcombine_s32(sum0, sum1);

    float32x4_t m0 = vcvtq_f32_s32(sum);

    m0 = vmulq_f32(m0, vld1q_f32(weightsf + 512/4));
    m0 = vaddq_f32(m0, vld1q_f32(weightsf + 528/4));

    float32x4_t m1, m2, m3, m4;

    m1 = m0;

    m0 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(m0), sign_bits_f));
    m0 = vaddq_f32(m0, ones_f);
    m0 = vmulq_f32(reciprocal(m0), m1);

    m1 = vdupq_lane_f32(vget_low_f32(m0), 0);
    m2 = vdupq_lane_f32(vget_low_f32(m0), 1);
    m3 = vdupq_lane_f32(vget_high_f32(m0), 0);
    m4 = vdupq_lane_f32(vget_high_f32(m0), 1);

    m1 = vmulq_f32(m1, vld1q_f32(weightsf + 544/4));
    m2 = vmulq_f32(m2, vld1q_f32(weightsf + 560/4));
    m3 = vmulq_f32(m3, vld1q_f32(weightsf + 576/4));
    m4 = vmulq_f32(m4, vld1q_f32(weightsf + 592/4));

    m1 = vaddq_f32(m1, m2);
    m3 = vaddq_f32(m3, m4);
    m1 = vaddq_f32(m1, m3);
    m1 = vaddq_f32(m1, vld1q_f32(weightsf + 608/4));

    uint32x4_t gte = vcgeq_f32(m1, zeroes_f);
    uint16x4_t gte_u16 = vmovn_u32(gte);
    uint8x8_t gte_u8 = vmovn_u16(vcombine_u16(gte_u16, vget_low_u16(vreinterpretq_u16_u32(sign_bits_f))));
    gte_u8 = vshr_n_u8(gte_u8, 7);
    vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(gte_u8), 0);
}
Exemple #6
0
void phase(const Size2D &size,
           const s16 * src0Base, ptrdiff_t src0Stride,
           const s16 * src1Base, ptrdiff_t src1Stride,
           u8 * dstBase, ptrdiff_t dstStride)
{
    internal::assertSupportedConfiguration();
#ifdef CAROTENE_NEON
    FASTATAN2CONST(256.0f / 360.0f)
    size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
    size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;

    float32x4_t v_05 = vdupq_n_f32(0.5f);

    for (size_t i = 0; i < size.height; ++i)
    {
        const s16 * src0 = internal::getRowPtr(src0Base, src0Stride, i);
        const s16 * src1 = internal::getRowPtr(src1Base, src1Stride, i);
        u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
        size_t j = 0;

        for (; j < roiw16; j += 16)
        {
            internal::prefetch(src0 + j);
            internal::prefetch(src1 + j);

            int16x8_t v_src00 = vld1q_s16(src0 + j), v_src01 = vld1q_s16(src0 + j + 8);
            int16x8_t v_src10 = vld1q_s16(src1 + j), v_src11 = vld1q_s16(src1 + j + 8);

            // 0
            float32x4_t v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src00)));
            float32x4_t v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src10)));
            float32x4_t v_dst32f0;
            FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f0)

            v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src00)));
            v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src10)));
            float32x4_t v_dst32f1;
            FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f1)

            uint16x8_t v_dst16s0 = vcombine_u16(vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f0, v_05))),
                                                vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f1, v_05))));

            // 1
            v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src01)));
            v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src11)));
            FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f0)

            v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src01)));
            v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src11)));
            FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f1)

            uint16x8_t v_dst16s1 = vcombine_u16(vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f0, v_05))),
                                                vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f1, v_05))));

            vst1q_u8(dst + j, vcombine_u8(vmovn_u16(v_dst16s0),
                                          vmovn_u16(v_dst16s1)));
        }
        for (; j < roiw8; j += 8)
        {
            int16x8_t v_src0 = vld1q_s16(src0 + j);
            int16x8_t v_src1 = vld1q_s16(src1 + j);

            float32x4_t v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src0)));
            float32x4_t v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1)));
            float32x4_t v_dst32f0;
            FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f0)

            v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src0)));
            v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1)));
            float32x4_t v_dst32f1;
            FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f1)

            uint16x8_t v_dst = vcombine_u16(vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f0, v_05))),
                                            vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f1, v_05))));

            vst1_u8(dst + j, vmovn_u16(v_dst));
        }

        for (; j < size.width; j++)
        {
            f32 x = src0[j], y = src1[j];
            f32 a;
            FASTATAN2SCALAR(y, x, a)
            dst[j] = (u8)(s32)floor(a + 0.5f);
        }
    }
#else
    (void)size;
    (void)src0Base;
    (void)src0Stride;
    (void)src1Base;
    (void)src1Stride;
    (void)dstBase;
    (void)dstStride;
#endif
}
static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) {
  // a^b = exp2(b * log2(a))
  //   exp2(x) and log2(x) are calculated using polynomial approximations.
  float32x4_t log2_a, b_log2_a, a_exp_b;

  // Calculate log2(x), x = a.
  {
    // To calculate log2(x), we decompose x like this:
    //   x = y * 2^n
    //     n is an integer
    //     y is in the [1.0, 2.0) range
    //
    //   log2(x) = log2(y) + n
    //     n       can be evaluated by playing with float representation.
    //     log2(y) in a small range can be approximated, this code uses an order
    //             five polynomial approximation. The coefficients have been
    //             estimated with the Remez algorithm and the resulting
    //             polynomial has a maximum relative error of 0.00086%.

    // Compute n.
    //    This is done by masking the exponent, shifting it into the top bit of
    //    the mantissa, putting eight into the biased exponent (to shift/
    //    compensate the fact that the exponent has been shifted in the top/
    //    fractional part and finally getting rid of the implicit leading one
    //    from the mantissa by substracting it out.
    const uint32x4_t vec_float_exponent_mask = vdupq_n_u32(0x7F800000);
    const uint32x4_t vec_eight_biased_exponent = vdupq_n_u32(0x43800000);
    const uint32x4_t vec_implicit_leading_one = vdupq_n_u32(0x43BF8000);
    const uint32x4_t two_n = vandq_u32(vreinterpretq_u32_f32(a),
                                       vec_float_exponent_mask);
    const uint32x4_t n_1 = vshrq_n_u32(two_n, kShiftExponentIntoTopMantissa);
    const uint32x4_t n_0 = vorrq_u32(n_1, vec_eight_biased_exponent);
    const float32x4_t n =
        vsubq_f32(vreinterpretq_f32_u32(n_0),
                  vreinterpretq_f32_u32(vec_implicit_leading_one));
    // Compute y.
    const uint32x4_t vec_mantissa_mask = vdupq_n_u32(0x007FFFFF);
    const uint32x4_t vec_zero_biased_exponent_is_one = vdupq_n_u32(0x3F800000);
    const uint32x4_t mantissa = vandq_u32(vreinterpretq_u32_f32(a),
                                          vec_mantissa_mask);
    const float32x4_t y =
        vreinterpretq_f32_u32(vorrq_u32(mantissa,
                                        vec_zero_biased_exponent_is_one));
    // Approximate log2(y) ~= (y - 1) * pol5(y).
    //    pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0
    const float32x4_t C5 = vdupq_n_f32(-3.4436006e-2f);
    const float32x4_t C4 = vdupq_n_f32(3.1821337e-1f);
    const float32x4_t C3 = vdupq_n_f32(-1.2315303f);
    const float32x4_t C2 = vdupq_n_f32(2.5988452f);
    const float32x4_t C1 = vdupq_n_f32(-3.3241990f);
    const float32x4_t C0 = vdupq_n_f32(3.1157899f);
    float32x4_t pol5_y = C5;
    pol5_y = vmlaq_f32(C4, y, pol5_y);
    pol5_y = vmlaq_f32(C3, y, pol5_y);
    pol5_y = vmlaq_f32(C2, y, pol5_y);
    pol5_y = vmlaq_f32(C1, y, pol5_y);
    pol5_y = vmlaq_f32(C0, y, pol5_y);
    const float32x4_t y_minus_one =
        vsubq_f32(y, vreinterpretq_f32_u32(vec_zero_biased_exponent_is_one));
    const float32x4_t log2_y = vmulq_f32(y_minus_one, pol5_y);

    // Combine parts.
    log2_a = vaddq_f32(n, log2_y);
  }

  // b * log2(a)
  b_log2_a = vmulq_f32(b, log2_a);

  // Calculate exp2(x), x = b * log2(a).
  {
    // To calculate 2^x, we decompose x like this:
    //   x = n + y
    //     n is an integer, the value of x - 0.5 rounded down, therefore
    //     y is in the [0.5, 1.5) range
    //
    //   2^x = 2^n * 2^y
    //     2^n can be evaluated by playing with float representation.
    //     2^y in a small range can be approximated, this code uses an order two
    //         polynomial approximation. The coefficients have been estimated
    //         with the Remez algorithm and the resulting polynomial has a
    //         maximum relative error of 0.17%.
    // To avoid over/underflow, we reduce the range of input to ]-127, 129].
    const float32x4_t max_input = vdupq_n_f32(129.f);
    const float32x4_t min_input = vdupq_n_f32(-126.99999f);
    const float32x4_t x_min = vminq_f32(b_log2_a, max_input);
    const float32x4_t x_max = vmaxq_f32(x_min, min_input);
    // Compute n.
    const float32x4_t half = vdupq_n_f32(0.5f);
    const float32x4_t x_minus_half = vsubq_f32(x_max, half);
    const int32x4_t x_minus_half_floor = vcvtq_s32_f32(x_minus_half);

    // Compute 2^n.
    const int32x4_t float_exponent_bias = vdupq_n_s32(127);
    const int32x4_t two_n_exponent =
        vaddq_s32(x_minus_half_floor, float_exponent_bias);
    const float32x4_t two_n =
        vreinterpretq_f32_s32(vshlq_n_s32(two_n_exponent, kFloatExponentShift));
    // Compute y.
    const float32x4_t y = vsubq_f32(x_max, vcvtq_f32_s32(x_minus_half_floor));

    // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
    const float32x4_t C2 = vdupq_n_f32(3.3718944e-1f);
    const float32x4_t C1 = vdupq_n_f32(6.5763628e-1f);
    const float32x4_t C0 = vdupq_n_f32(1.0017247f);
    float32x4_t exp2_y = C2;
    exp2_y = vmlaq_f32(C1, y, exp2_y);
    exp2_y = vmlaq_f32(C0, y, exp2_y);

    // Combine parts.
    a_exp_b = vmulq_f32(exp2_y, two_n);
  }

  return a_exp_b;
}
Exemple #8
0
inline v_float32x4 v_cvt_f32(const v_int32x4& a)
{
    return v_float32x4(vcvtq_f32_s32(a.val));
}
Exemple #9
0
inline v_int32x4 v_ceil(const v_float32x4& a)
{
    int32x4_t a1 = vcvtq_s32_f32(a.val);
    uint32x4_t mask = vcgtq_f32(a.val, vcvtq_f32_s32(a1));
    return v_int32x4(vsubq_s32(a1, vreinterpretq_s32_u32(mask)));
}
Exemple #10
0
inline v_int32x4 v_floor(const v_float32x4& a)
{
    int32x4_t a1 = vcvtq_s32_f32(a.val);
    uint32x4_t mask = vcgtq_f32(vcvtq_f32_s32(a1), a.val);
    return v_int32x4(vaddq_s32(a1, vreinterpretq_s32_u32(mask)));
}