void byte2float48_neon(const uint8_t *t, const int pitch, float *p) {
    uint16x8_t m0, m1, m2, m3, m4, m5;
    uint32x2_t temp1, temp4;

    m0 = vmovl_u8(vld1_u8(t));
    temp1 = vld1_lane_u32((const uint32_t *)(t + 8), temp1, 0);
    temp1 = vld1_lane_u32((const uint32_t *)(t + pitch * 2), temp1, 1);
    m1 = vmovl_u8(vreinterpret_u8_u32(temp1));
    m2 = vmovl_u8(vld1_u8(t + pitch * 2 + 4));

    t += pitch * 4;

    m3 = vmovl_u8(vld1_u8(t));
    temp4 = vld1_lane_u32((const uint32_t *)(t + 8), temp4, 0);
    temp4 = vld1_lane_u32((const uint32_t *)(t + pitch * 2), temp4, 1);
    m4 = vmovl_u8(vreinterpret_u8_u32(temp4));
    m5 = vmovl_u8(vld1_u8(t + pitch * 2 + 4));

    vst1q_f32(p, vcvtq_f32_u32(vmovl_u16(vget_low_u16(m0))));
    vst1q_f32(p + 4, vcvtq_f32_u32(vmovl_u16(vget_high_u16(m0))));
    vst1q_f32(p + 8, vcvtq_f32_u32(vmovl_u16(vget_low_u16(m1))));
    vst1q_f32(p + 12, vcvtq_f32_u32(vmovl_u16(vget_high_u16(m1))));
    vst1q_f32(p + 16, vcvtq_f32_u32(vmovl_u16(vget_low_u16(m2))));
    vst1q_f32(p + 20, vcvtq_f32_u32(vmovl_u16(vget_high_u16(m2))));
    vst1q_f32(p + 24, vcvtq_f32_u32(vmovl_u16(vget_low_u16(m3))));
    vst1q_f32(p + 28, vcvtq_f32_u32(vmovl_u16(vget_high_u16(m3))));
    vst1q_f32(p + 32, vcvtq_f32_u32(vmovl_u16(vget_low_u16(m4))));
    vst1q_f32(p + 36, vcvtq_f32_u32(vmovl_u16(vget_high_u16(m4))));
    vst1q_f32(p + 40, vcvtq_f32_u32(vmovl_u16(vget_low_u16(m5))));
    vst1q_f32(p + 44, vcvtq_f32_u32(vmovl_u16(vget_high_u16(m5))));
}
Esempio n. 2
0
void vpx_idct4x4_1_add_neon(
        int16_t *input,
        uint8_t *dest,
        int dest_stride) {
    uint8x8_t d6u8;
    uint32x2_t d2u32 = vdup_n_u32(0);
    uint16x8_t q8u16;
    int16x8_t q0s16;
    uint8_t *d1, *d2;
    int16_t i, a1, cospi_16_64 = 11585;
    int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
    out = dct_const_round_shift(out * cospi_16_64);
    a1 = ROUND_POWER_OF_TWO(out, 4);

    q0s16 = vdupq_n_s16(a1);

    // dc_only_idct_add
    d1 = d2 = dest;
    for (i = 0; i < 2; i++) {
        d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 0);
        d1 += dest_stride;
        d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 1);
        d1 += dest_stride;

        q8u16 = vaddw_u8(vreinterpretq_u16_s16(q0s16),
                         vreinterpret_u8_u32(d2u32));
        d6u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));

        vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 0);
        d2 += dest_stride;
        vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 1);
        d2 += dest_stride;
    }
    return;
}
Esempio n. 3
0
File: avg_neon.c Progetto: negge/aom
unsigned int aom_avg_4x4_neon(const uint8_t *s, int p) {
  uint16x8_t v_sum;
  uint32x2_t v_s0 = vdup_n_u32(0);
  uint32x2_t v_s1 = vdup_n_u32(0);
  v_s0 = vld1_lane_u32((const uint32_t *)s, v_s0, 0);
  v_s0 = vld1_lane_u32((const uint32_t *)(s + p), v_s0, 1);
  v_s1 = vld1_lane_u32((const uint32_t *)(s + 2 * p), v_s1, 0);
  v_s1 = vld1_lane_u32((const uint32_t *)(s + 3 * p), v_s1, 1);
  v_sum = vaddl_u8(vreinterpret_u8_u32(v_s0), vreinterpret_u8_u32(v_s1));
  return (horizontal_add_u16x8(v_sum) + 8) >> 4;
}
Esempio n. 4
0
static INLINE void idct4x4_1_add_kernel(uint8_t **dest, const int stride,
                                        const int16x8_t res,
                                        uint32x2_t *const d) {
  uint16x8_t a;
  uint8x8_t b;
  *d = vld1_lane_u32((const uint32_t *)*dest, *d, 0);
  *d = vld1_lane_u32((const uint32_t *)(*dest + stride), *d, 1);
  a = vaddw_u8(vreinterpretq_u16_s16(res), vreinterpret_u8_u32(*d));
  b = vqmovun_s16(vreinterpretq_s16_u16(a));
  vst1_lane_u32((uint32_t *)*dest, vreinterpret_u32_u8(b), 0);
  *dest += stride;
  vst1_lane_u32((uint32_t *)*dest, vreinterpret_u32_u8(b), 1);
  *dest += stride;
}
Esempio n. 5
0
void test_vld1_laneu32 (void)
{
  uint32x2_t out_uint32x2_t;
  uint32x2_t arg1_uint32x2_t;

  out_uint32x2_t = vld1_lane_u32 (0, arg1_uint32x2_t, 1);
}
Esempio n. 6
0
void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
  const uint8x8_t XABCD_u8 = vld1_u8(above - 1);
  const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8);
  const uint64x1_t ____XABC = vshl_n_u64(XABCD, 32);
  const uint32x2_t zero = vdup_n_u32(0);
  const uint32x2_t IJKL = vld1_lane_u32((const uint32_t *)left, zero, 0);
  const uint8x8_t IJKL_u8 = vreinterpret_u8_u32(IJKL);
  const uint64x1_t LKJI____ = vreinterpret_u64_u8(vrev32_u8(IJKL_u8));
  const uint64x1_t LKJIXABC = vorr_u64(LKJI____, ____XABC);
  const uint8x8_t KJIXABC_ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 8));
  const uint8x8_t JIXABC__ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 16));
  const uint8_t D = vget_lane_u8(XABCD_u8, 4);
  const uint8x8_t JIXABCD_ = vset_lane_u8(D, JIXABC__, 6);
  const uint8x8_t LKJIXABC_u8 = vreinterpret_u8_u64(LKJIXABC);
  const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8);
  const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_);
  const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2);
  const uint32x2_t r3 = vreinterpret_u32_u8(avg2);
  const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8));
  const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16));
  const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24));
  vst1_lane_u32((uint32_t *)(dst + 0 * stride), r0, 0);
  vst1_lane_u32((uint32_t *)(dst + 1 * stride), r1, 0);
  vst1_lane_u32((uint32_t *)(dst + 2 * stride), r2, 0);
  vst1_lane_u32((uint32_t *)(dst + 3 * stride), r3, 0);
}
Esempio n. 7
0
void vp9_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                              const uint8_t *above, const uint8_t *left) {
  int i;
  uint32x2_t d0u32 = vdup_n_u32(0);
  (void)left;

  d0u32 = vld1_lane_u32((const uint32_t *)above, d0u32, 0);
  for (i = 0; i < 4; i++, dst += stride)
    vst1_lane_u32((uint32_t *)dst, d0u32, 0);
}
void byte2word48_neon(const uint8_t *t, const int pitch, float *pf) {
    uint16_t *p = (uint16_t *)pf;

    uint8x8_t m0, m1, m2, m3, m4, m5;

    m0 = vld1_u8(t);
    m1 = vreinterpret_u8_u32(vld1_lane_u32((const uint32_t *)(t + 8), vreinterpret_u32_u8(m1), 0));
    m1 = vreinterpret_u8_u32(vld1_lane_u32((const uint32_t *)(t + pitch * 2), vreinterpret_u32_u8(m1), 1));
    m2 = vld1_u8(t + pitch * 2 + 4);

    t += pitch * 4;

    m3 = vld1_u8(t);
    m4 = vreinterpret_u8_u32(vld1_lane_u32((const uint32_t *)(t + 8), vreinterpret_u32_u8(m4), 0));
    m4 = vreinterpret_u8_u32(vld1_lane_u32((const uint32_t *)(t + pitch * 2), vreinterpret_u32_u8(m4), 1));
    m5 = vld1_u8(t + pitch * 2 + 4);

    vst1q_u16(p, vmovl_u8(m0));
    vst1q_u16(p + 8, vmovl_u8(m1));
    vst1q_u16(p + 16, vmovl_u8(m2));
    vst1q_u16(p + 24, vmovl_u8(m3));
    vst1q_u16(p + 32, vmovl_u8(m4));
    vst1q_u16(p + 40, vmovl_u8(m5));
}
Esempio n. 9
0
void vp9_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
  int i;
  uint16x8_t q1u16, q3u16;
  int16x8_t q1s16;
  uint8x8_t d0u8 = vdup_n_u8(0);
  uint32x2_t d2u32 = vdup_n_u32(0);

  d0u8 = vld1_dup_u8(above - 1);
  d2u32 = vld1_lane_u32((const uint32_t *)above, d2u32, 0);
  q3u16 = vsubl_u8(vreinterpret_u8_u32(d2u32), d0u8);
  for (i = 0; i < 4; i++, dst += stride) {
    q1u16 = vdupq_n_u16((uint16_t)left[i]);
    q1s16 = vaddq_s16(vreinterpretq_s16_u16(q1u16),
                      vreinterpretq_s16_u16(q3u16));
    d0u8 = vqmovun_s16(q1s16);
    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
  }
}
Esempio n. 10
0
void vp9_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                              const uint8_t *above, const uint8_t *left) {
  uint8x8_t d0u8 = vdup_n_u8(0);
  uint32x2_t d1u32 = vdup_n_u32(0);
  (void)above;

  d1u32 = vld1_lane_u32((const uint32_t *)left, d1u32, 0);

  d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 0);
  vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
  dst += stride;
  d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 1);
  vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
  dst += stride;
  d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 2);
  vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
  dst += stride;
  d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 3);
  vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
}
Esempio n. 11
0
void vp8_short_idct4x4llm_neon(int16_t *input, unsigned char *pred_ptr,
                               int pred_stride, unsigned char *dst_ptr,
                               int dst_stride) {
  int i;
  uint32x2_t d6u32 = vdup_n_u32(0);
  uint8x8_t d1u8;
  int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
  uint16x8_t q1u16;
  int16x8_t q1s16, q2s16, q3s16, q4s16;
  int32x2x2_t v2tmp0, v2tmp1;
  int16x4x2_t v2tmp2, v2tmp3;

  d2 = vld1_s16(input);
  d3 = vld1_s16(input + 4);
  d4 = vld1_s16(input + 8);
  d5 = vld1_s16(input + 12);

  // 1st for loop
  q1s16 = vcombine_s16(d2, d4);  // Swap d3 d4 here
  q2s16 = vcombine_s16(d3, d5);

  q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
  q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);

  d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // a1
  d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // b1

  q3s16 = vshrq_n_s16(q3s16, 1);
  q4s16 = vshrq_n_s16(q4s16, 1);

  q3s16 = vqaddq_s16(q3s16, q2s16);
  q4s16 = vqaddq_s16(q4s16, q2s16);

  d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16));  // c1
  d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16));  // d1

  d2 = vqadd_s16(d12, d11);
  d3 = vqadd_s16(d13, d10);
  d4 = vqsub_s16(d13, d10);
  d5 = vqsub_s16(d12, d11);

  v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
  v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
  v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
                    vreinterpret_s16_s32(v2tmp1.val[0]));
  v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
                    vreinterpret_s16_s32(v2tmp1.val[1]));

  // 2nd for loop
  q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]);
  q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]);

  q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
  q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);

  d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // a1
  d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // b1

  q3s16 = vshrq_n_s16(q3s16, 1);
  q4s16 = vshrq_n_s16(q4s16, 1);

  q3s16 = vqaddq_s16(q3s16, q2s16);
  q4s16 = vqaddq_s16(q4s16, q2s16);

  d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16));  // c1
  d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16));  // d1

  d2 = vqadd_s16(d12, d11);
  d3 = vqadd_s16(d13, d10);
  d4 = vqsub_s16(d13, d10);
  d5 = vqsub_s16(d12, d11);

  d2 = vrshr_n_s16(d2, 3);
  d3 = vrshr_n_s16(d3, 3);
  d4 = vrshr_n_s16(d4, 3);
  d5 = vrshr_n_s16(d5, 3);

  v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
  v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
  v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
                    vreinterpret_s16_s32(v2tmp1.val[0]));
  v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
                    vreinterpret_s16_s32(v2tmp1.val[1]));

  q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]);
  q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]);

  // dc_only_idct_add
  for (i = 0; i < 2; i++, q1s16 = q2s16) {
    d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0);
    pred_ptr += pred_stride;
    d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1);
    pred_ptr += pred_stride;

    q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16), vreinterpret_u8_u32(d6u32));
    d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));

    vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0);
    dst_ptr += dst_stride;
    vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1);
    dst_ptr += dst_stride;
  }
  return;
}
Esempio n. 12
0
void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
  uint8x8_t d26u8, d27u8;
  uint32x2_t d26u32, d27u32;
  uint16x8_t q8u16, q9u16;
  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16;
  int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16;
  int16x8_t q8s16, q9s16, q13s16, q14s16;
  int32x4_t q1s32, q13s32, q14s32, q15s32;
  int16x4x2_t d0x2s16, d1x2s16;
  int32x4x2_t q0x2s32;
  uint8_t *d;

  d26u32 = d27u32 = vdup_n_u32(0);

  q8s16 = vld1q_s16(input);
  q9s16 = vld1q_s16(input + 8);

  d16s16 = vget_low_s16(q8s16);
  d17s16 = vget_high_s16(q8s16);
  d18s16 = vget_low_s16(q9s16);
  d19s16 = vget_high_s16(q9s16);

  d0x2s16 = vtrn_s16(d16s16, d17s16);
  d1x2s16 = vtrn_s16(d18s16, d19s16);
  q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
  q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);

  d20s16 = vdup_n_s16((int16_t)cospi_8_64);
  d21s16 = vdup_n_s16((int16_t)cospi_16_64);

  q0x2s32 =
      vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16));
  d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
  d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
  d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
  d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));

  d22s16 = vdup_n_s16((int16_t)cospi_24_64);

  // stage 1
  d23s16 = vadd_s16(d16s16, d18s16);
  d24s16 = vsub_s16(d16s16, d18s16);

  q15s32 = vmull_s16(d17s16, d22s16);
  q1s32 = vmull_s16(d17s16, d20s16);
  q13s32 = vmull_s16(d23s16, d21s16);
  q14s32 = vmull_s16(d24s16, d21s16);

  q15s32 = vmlsl_s16(q15s32, d19s16, d20s16);
  q1s32 = vmlal_s16(q1s32, d19s16, d22s16);

  d26s16 = vqrshrn_n_s32(q13s32, 14);
  d27s16 = vqrshrn_n_s32(q14s32, 14);
  d29s16 = vqrshrn_n_s32(q15s32, 14);
  d28s16 = vqrshrn_n_s32(q1s32, 14);
  q13s16 = vcombine_s16(d26s16, d27s16);
  q14s16 = vcombine_s16(d28s16, d29s16);

  // stage 2
  q8s16 = vaddq_s16(q13s16, q14s16);
  q9s16 = vsubq_s16(q13s16, q14s16);

  d16s16 = vget_low_s16(q8s16);
  d17s16 = vget_high_s16(q8s16);
  d18s16 = vget_high_s16(q9s16);  // vswp d18 d19
  d19s16 = vget_low_s16(q9s16);

  d0x2s16 = vtrn_s16(d16s16, d17s16);
  d1x2s16 = vtrn_s16(d18s16, d19s16);
  q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
  q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);

  q0x2s32 =
      vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16));
  d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
  d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
  d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
  d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));

  // do the transform on columns
  // stage 1
  d23s16 = vadd_s16(d16s16, d18s16);
  d24s16 = vsub_s16(d16s16, d18s16);

  q15s32 = vmull_s16(d17s16, d22s16);
  q1s32 = vmull_s16(d17s16, d20s16);
  q13s32 = vmull_s16(d23s16, d21s16);
  q14s32 = vmull_s16(d24s16, d21s16);

  q15s32 = vmlsl_s16(q15s32, d19s16, d20s16);
  q1s32 = vmlal_s16(q1s32, d19s16, d22s16);

  d26s16 = vqrshrn_n_s32(q13s32, 14);
  d27s16 = vqrshrn_n_s32(q14s32, 14);
  d29s16 = vqrshrn_n_s32(q15s32, 14);
  d28s16 = vqrshrn_n_s32(q1s32, 14);
  q13s16 = vcombine_s16(d26s16, d27s16);
  q14s16 = vcombine_s16(d28s16, d29s16);

  // stage 2
  q8s16 = vaddq_s16(q13s16, q14s16);
  q9s16 = vsubq_s16(q13s16, q14s16);

  q8s16 = vrshrq_n_s16(q8s16, 4);
  q9s16 = vrshrq_n_s16(q9s16, 4);

  d = dest;
  d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0);
  d += dest_stride;
  d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1);
  d += dest_stride;
  d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1);
  d += dest_stride;
  d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0);

  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32));
  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32));

  d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
  d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));

  d = dest;
  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0);
  d += dest_stride;
  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1);
  d += dest_stride;
  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1);
  d += dest_stride;
  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0);
  return;
}
Esempio n. 13
0
void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
                             int dest_stride, int tx_type) {
  uint8x8_t d26u8, d27u8;
  int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
  uint32x2_t d26u32, d27u32;
  int16x8_t q3s16, q8s16, q9s16;
  uint16x8_t q8u16, q9u16;

  d26u32 = d27u32 = vdup_n_u32(0);

  q8s16 = vld1q_s16(input);
  q9s16 = vld1q_s16(input + 8);

  TRANSPOSE4X4(&q8s16, &q9s16);

  switch (tx_type) {
    case 0:  // idct_idct is not supported. Fall back to C
      vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
      return;
      break;
    case 1:  // iadst_idct
      // generate constants
      GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
      GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);

      // first transform rows
      IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);

      // transpose the matrix
      TRANSPOSE4X4(&q8s16, &q9s16);

      // then transform columns
      IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
      break;
    case 2:  // idct_iadst
      // generate constantsyy
      GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
      GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);

      // first transform rows
      IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);

      // transpose the matrix
      TRANSPOSE4X4(&q8s16, &q9s16);

      // then transform columns
      IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);
      break;
    case 3:  // iadst_iadst
      // generate constants
      GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);

      // first transform rows
      IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);

      // transpose the matrix
      TRANSPOSE4X4(&q8s16, &q9s16);

      // then transform columns
      IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
      break;
    default:  // iadst_idct
      assert(0);
      break;
  }

  q8s16 = vrshrq_n_s16(q8s16, 4);
  q9s16 = vrshrq_n_s16(q9s16, 4);

  d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 0);
  dest += dest_stride;
  d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 1);
  dest += dest_stride;
  d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 0);
  dest += dest_stride;
  d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 1);

  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32));
  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32));

  d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
  d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));

  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 1);
  dest -= dest_stride;
  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 0);
  dest -= dest_stride;
  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1);
  dest -= dest_stride;
  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0);
  return;
}
Esempio n. 14
0
void aom_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x,  // unused
                             int x_step_q4,            // unused
                             const int16_t *filter_y, int y_step_q4, int w,
                             int h) {
  int height;
  const uint8_t *s;
  uint8_t *d;
  uint32x2_t d2u32, d3u32;
  uint32x2_t d16u32, d18u32, d20u32, d22u32, d24u32, d26u32;
  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16;
  int16x4_t d24s16, d25s16, d26s16, d27s16;
  uint16x4_t d2u16, d3u16, d4u16, d5u16;
  int16x8_t q0s16;
  uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
  int32x4_t q1s32, q2s32, q14s32, q15s32;

  assert(y_step_q4 == 16);

  (void)x_step_q4;
  (void)y_step_q4;
  (void)filter_x;

  src -= src_stride * 3;
  q0s16 = vld1q_s16(filter_y);
  for (; w > 0; w -= 4, src += 4, dst += 4) {  // loop_vert_h
    s = src;
    d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 0);
    s += src_stride;
    d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 1);
    s += src_stride;
    d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 0);
    s += src_stride;
    d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 1);
    s += src_stride;
    d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 0);
    s += src_stride;
    d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 1);
    s += src_stride;
    d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0);
    s += src_stride;

    q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32));
    q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32));
    q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32));
    q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32));

    d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
    d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
    d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
    d = dst;
    for (height = h; height > 0; height -= 4) {  // loop_vert
      d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 0);
      s += src_stride;
      d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 0);
      s += src_stride;
      d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 1);
      s += src_stride;
      d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 1);
      s += src_stride;

      q12u16 = vmovl_u8(vreinterpret_u8_u32(d24u32));
      q13u16 = vmovl_u8(vreinterpret_u8_u32(d26u32));

      d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
      d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
      d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
      d21s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));
      d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
      d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
      d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
      d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));

      __builtin_prefetch(d);
      __builtin_prefetch(d + dst_stride);
      q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, d20s16, d21s16,
                             d22s16, d24s16, q0s16);
      __builtin_prefetch(d + dst_stride * 2);
      __builtin_prefetch(d + dst_stride * 3);
      q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, d21s16, d22s16,
                             d24s16, d26s16, q0s16);
      __builtin_prefetch(s);
      __builtin_prefetch(s + src_stride);
      q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, d22s16, d24s16,
                              d26s16, d27s16, q0s16);
      __builtin_prefetch(s + src_stride * 2);
      __builtin_prefetch(s + src_stride * 3);
      q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, d24s16, d26s16,
                              d27s16, d25s16, q0s16);

      d2u16 = vqrshrun_n_s32(q1s32, 7);
      d3u16 = vqrshrun_n_s32(q2s32, 7);
      d4u16 = vqrshrun_n_s32(q14s32, 7);
      d5u16 = vqrshrun_n_s32(q15s32, 7);

      q1u16 = vcombine_u16(d2u16, d3u16);
      q2u16 = vcombine_u16(d4u16, d5u16);

      d2u32 = vreinterpret_u32_u8(vqmovn_u16(q1u16));
      d3u32 = vreinterpret_u32_u8(vqmovn_u16(q2u16));

      vst1_lane_u32((uint32_t *)d, d2u32, 0);
      d += dst_stride;
      vst1_lane_u32((uint32_t *)d, d2u32, 1);
      d += dst_stride;
      vst1_lane_u32((uint32_t *)d, d3u32, 0);
      d += dst_stride;
      vst1_lane_u32((uint32_t *)d, d3u32, 1);
      d += dst_stride;

      q8u16 = q10u16;
      d18s16 = d22s16;
      d19s16 = d24s16;
      q10u16 = q13u16;
      d22s16 = d25s16;
    }
  }
  return;
}