void vpx_idct4x4_1_add_neon( int16_t *input, uint8_t *dest, int dest_stride) { uint8x8_t d6u8; uint32x2_t d2u32 = vdup_n_u32(0); uint16x8_t q8u16; int16x8_t q0s16; uint8_t *d1, *d2; int16_t i, a1, cospi_16_64 = 11585; int16_t out = dct_const_round_shift(input[0] * cospi_16_64); out = dct_const_round_shift(out * cospi_16_64); a1 = ROUND_POWER_OF_TWO(out, 4); q0s16 = vdupq_n_s16(a1); // dc_only_idct_add d1 = d2 = dest; for (i = 0; i < 2; i++) { d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 0); d1 += dest_stride; d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 1); d1 += dest_stride; q8u16 = vaddw_u8(vreinterpretq_u16_s16(q0s16), vreinterpret_u8_u32(d2u32)); d6u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 0); d2 += dest_stride; vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 1); d2 += dest_stride; } return; }
static INLINE void idct4x4_1_add_kernel(uint8_t **dest, const int stride, const int16x8_t res, uint32x2_t *const d) { uint16x8_t a; uint8x8_t b; *d = vld1_lane_u32((const uint32_t *)*dest, *d, 0); *d = vld1_lane_u32((const uint32_t *)(*dest + stride), *d, 1); a = vaddw_u8(vreinterpretq_u16_s16(res), vreinterpret_u8_u32(*d)); b = vqmovun_s16(vreinterpretq_s16_u16(a)); vst1_lane_u32((uint32_t *)*dest, vreinterpret_u32_u8(b), 0); *dest += stride; vst1_lane_u32((uint32_t *)*dest, vreinterpret_u32_u8(b), 1); *dest += stride; }
static INLINE uint8x8x4_t read_4x8(unsigned char *src, int pitch) { uint8x8x4_t x; const uint8x8_t a = vld1_u8(src); const uint8x8_t b = vld1_u8(src + pitch * 1); const uint8x8_t c = vld1_u8(src + pitch * 2); const uint8x8_t d = vld1_u8(src + pitch * 3); const uint8x8_t e = vld1_u8(src + pitch * 4); const uint8x8_t f = vld1_u8(src + pitch * 5); const uint8x8_t g = vld1_u8(src + pitch * 6); const uint8x8_t h = vld1_u8(src + pitch * 7); const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(e)); const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b), vreinterpret_u32_u8(f)); const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c), vreinterpret_u32_u8(g)); const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d), vreinterpret_u32_u8(h)); const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]), vreinterpret_u16_u32(r26_u32.val[0])); const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]), vreinterpret_u16_u32(r37_u32.val[0])); const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]), vreinterpret_u8_u16(r13_u16.val[0])); const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]), vreinterpret_u8_u16(r13_u16.val[1])); /* * after vtrn_u32 00 01 02 03 | 40 41 42 43 10 11 12 13 | 50 51 52 53 20 21 22 23 | 60 61 62 63 30 31 32 33 | 70 71 72 73 --- * after vtrn_u16 00 01 20 21 | 40 41 60 61 02 03 22 23 | 42 43 62 63 10 11 30 31 | 50 51 70 71 12 13 32 33 | 52 52 72 73 00 01 20 21 | 40 41 60 61 10 11 30 31 | 50 51 70 71 02 03 22 23 | 42 43 62 63 12 13 32 33 | 52 52 72 73 --- * after vtrn_u8 00 10 20 30 | 40 50 60 70 01 11 21 31 | 41 51 61 71 02 12 22 32 | 42 52 62 72 03 13 23 33 | 43 53 63 73 */ x.val[0] = r01_u8.val[0]; x.val[1] = r01_u8.val[1]; x.val[2] = r23_u8.val[0]; x.val[3] = r23_u8.val[1]; return x; }
void test_vreinterpretu32_u8 (void) { uint32x2_t out_uint32x2_t; uint8x8_t arg0_uint8x8_t; out_uint32x2_t = vreinterpret_u32_u8 (arg0_uint8x8_t); }
void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const uint8x8_t XABCD_u8 = vld1_u8(above - 1); const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8); const uint64x1_t ____XABC = vshl_n_u64(XABCD, 32); const uint32x2_t zero = vdup_n_u32(0); const uint32x2_t IJKL = vld1_lane_u32((const uint32_t *)left, zero, 0); const uint8x8_t IJKL_u8 = vreinterpret_u8_u32(IJKL); const uint64x1_t LKJI____ = vreinterpret_u64_u8(vrev32_u8(IJKL_u8)); const uint64x1_t LKJIXABC = vorr_u64(LKJI____, ____XABC); const uint8x8_t KJIXABC_ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 8)); const uint8x8_t JIXABC__ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 16)); const uint8_t D = vget_lane_u8(XABCD_u8, 4); const uint8x8_t JIXABCD_ = vset_lane_u8(D, JIXABC__, 6); const uint8x8_t LKJIXABC_u8 = vreinterpret_u8_u64(LKJIXABC); const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8); const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_); const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); const uint32x2_t r3 = vreinterpret_u32_u8(avg2); const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); vst1_lane_u32((uint32_t *)(dst + 0 * stride), r0, 0); vst1_lane_u32((uint32_t *)(dst + 1 * stride), r1, 0); vst1_lane_u32((uint32_t *)(dst + 2 * stride), r2, 0); vst1_lane_u32((uint32_t *)(dst + 3 * stride), r3, 0); }
static WEBP_INLINE uint32_t Average2(const uint32_t* const a, const uint32_t* const b) { const uint8x8_t a0 = vreinterpret_u8_u64(vcreate_u64(*a)); const uint8x8_t b0 = vreinterpret_u8_u64(vcreate_u64(*b)); const uint8x8_t avg = vhadd_u8(a0, b0); return vget_lane_u32(vreinterpret_u32_u8(avg), 0); }
inline unsigned int GetByteMask(uint8x16_t a) { uint8x16_t am = vandq_u8(a, compaction_mask); uint8x8_t a_sum = vpadd_u8(vget_high_u8(am), vget_low_u8(am)); a_sum = vpadd_u8(a_sum, a_sum); a_sum = vpadd_u8(a_sum, a_sum); return vget_lane_u32(vreinterpret_u32_u8(a_sum), 0); }
static WEBP_INLINE uint32_t Average2(const uint32_t* const a, const uint32_t* const b) { const uint64x1_t a0 = { *a }, b0 = { *b }; const uint8x8_t a1 = vreinterpret_u8_u64(a0); const uint8x8_t b1 = vreinterpret_u8_u64(b0); const uint8x8_t avg = vhadd_u8(a1, b1); uint32_t ret; vst1_lane_u32(&ret, vreinterpret_u32_u8(avg), 0); return ret; }
static WEBP_INLINE uint32_t Average3(const uint32_t* const a, const uint32_t* const b, const uint32_t* const c) { const uint8x8_t a0 = vreinterpret_u8_u64(vcreate_u64(*a)); const uint8x8_t b0 = vreinterpret_u8_u64(vcreate_u64(*b)); const uint8x8_t c0 = vreinterpret_u8_u64(vcreate_u64(*c)); const uint8x8_t avg1 = vhadd_u8(a0, c0); const uint8x8_t avg2 = vhadd_u8(avg1, b0); return vget_lane_u32(vreinterpret_u32_u8(avg2), 0); }
void vp9_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { uint8x8_t d0u8 = vdup_n_u8(0); uint32x2_t d1u32 = vdup_n_u32(0); (void)above; d1u32 = vld1_lane_u32((const uint32_t *)left, d1u32, 0); d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 0); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 1); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 2); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 3); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); }
static WEBP_INLINE uint32_t ClampedAddSubtractFull(const uint32_t* const c0, const uint32_t* const c1, const uint32_t* const c2) { const uint8x8_t p0 = vreinterpret_u8_u64(vcreate_u64(*c0)); const uint8x8_t p1 = vreinterpret_u8_u64(vcreate_u64(*c1)); const uint8x8_t p2 = vreinterpret_u8_u64(vcreate_u64(*c2)); const uint16x8_t sum0 = vaddl_u8(p0, p1); // add and widen const uint16x8_t sum1 = vqsubq_u16(sum0, vmovl_u8(p2)); // widen and subtract const uint8x8_t out = vqmovn_u16(sum1); // narrow and clamp return vget_lane_u32(vreinterpret_u32_u8(out), 0); }
static WEBP_INLINE uint32_t ClampedAddSubtractHalf(const uint32_t* const c0, const uint32_t* const c1, const uint32_t* const c2) { const uint8x8_t p0 = vreinterpret_u8_u64(vcreate_u64(*c0)); const uint8x8_t p1 = vreinterpret_u8_u64(vcreate_u64(*c1)); const uint8x8_t p2 = vreinterpret_u8_u64(vcreate_u64(*c2)); const uint8x8_t avg = vhadd_u8(p0, p1); // Average(c0,c1) const uint8x8_t ab = vshr_n_u8(vqsub_u8(avg, p2), 1); // (a-b)>>1 saturated const uint8x8_t ba = vshr_n_u8(vqsub_u8(p2, avg), 1); // (b-a)>>1 saturated const uint8x8_t out = vqsub_u8(vqadd_u8(avg, ab), ba); return vget_lane_u32(vreinterpret_u32_u8(out), 0); }
static WEBP_INLINE uint32_t Average3(const uint32_t* const a, const uint32_t* const b, const uint32_t* const c) { const uint64x1_t a0 = { *a }, b0 = { *b }, c0 = { *c }; const uint8x8_t a1 = vreinterpret_u8_u64(a0); const uint8x8_t b1 = vreinterpret_u8_u64(b0); const uint8x8_t c1 = vreinterpret_u8_u64(c0); const uint8x8_t avg1 = vhadd_u8(a1, c1); const uint8x8_t avg2 = vhadd_u8(avg1, b1); uint32_t ret; vst1_lane_u32(&ret, vreinterpret_u32_u8(avg2), 0); return ret; }
static WEBP_INLINE uint32_t Average4(const uint32_t* const a, const uint32_t* const b, const uint32_t* const c, const uint32_t* const d) { const uint8x8_t a0 = vreinterpret_u8_u64(vcreate_u64(*a)); const uint8x8_t b0 = vreinterpret_u8_u64(vcreate_u64(*b)); const uint8x8_t c0 = vreinterpret_u8_u64(vcreate_u64(*c)); const uint8x8_t d0 = vreinterpret_u8_u64(vcreate_u64(*d)); const uint8x8_t avg1 = vhadd_u8(a0, b0); const uint8x8_t avg2 = vhadd_u8(c0, d0); const uint8x8_t avg3 = vhadd_u8(avg1, avg2); return vget_lane_u32(vreinterpret_u32_u8(avg3), 0); }
static WEBP_INLINE uint32_t ClampedAddSubtractFull(const uint32_t* const c0, const uint32_t* const c1, const uint32_t* const c2) { const uint64x1_t C0 = { *c0, 0 }, C1 = { *c1, 0 }, C2 = { *c2, 0 }; const uint8x8_t p0 = vreinterpret_u8_u64(C0); const uint8x8_t p1 = vreinterpret_u8_u64(C1); const uint8x8_t p2 = vreinterpret_u8_u64(C2); const uint16x8_t sum0 = vaddl_u8(p0, p1); // add and widen const uint16x8_t sum1 = vqsubq_u16(sum0, vmovl_u8(p2)); // widen and subtract const uint8x8_t out = vqmovn_u16(sum1); // narrow and clamp uint32_t ret; vst1_lane_u32(&ret, vreinterpret_u32_u8(out), 0); return ret; }
void byte2word48_neon(const uint8_t *t, const int pitch, float *pf) { uint16_t *p = (uint16_t *)pf; uint8x8_t m0, m1, m2, m3, m4, m5; m0 = vld1_u8(t); m1 = vreinterpret_u8_u32(vld1_lane_u32((const uint32_t *)(t + 8), vreinterpret_u32_u8(m1), 0)); m1 = vreinterpret_u8_u32(vld1_lane_u32((const uint32_t *)(t + pitch * 2), vreinterpret_u32_u8(m1), 1)); m2 = vld1_u8(t + pitch * 2 + 4); t += pitch * 4; m3 = vld1_u8(t); m4 = vreinterpret_u8_u32(vld1_lane_u32((const uint32_t *)(t + 8), vreinterpret_u32_u8(m4), 0)); m4 = vreinterpret_u8_u32(vld1_lane_u32((const uint32_t *)(t + pitch * 2), vreinterpret_u32_u8(m4), 1)); m5 = vld1_u8(t + pitch * 2 + 4); vst1q_u16(p, vmovl_u8(m0)); vst1q_u16(p + 8, vmovl_u8(m1)); vst1q_u16(p + 16, vmovl_u8(m2)); vst1q_u16(p + 24, vmovl_u8(m3)); vst1q_u16(p + 32, vmovl_u8(m4)); vst1q_u16(p + 40, vmovl_u8(m5)); }
static WEBP_INLINE uint32_t ClampedAddSubtractHalf(const uint32_t* const c0, const uint32_t* const c1, const uint32_t* const c2) { const uint64x1_t C0 = { *c0, 0 }, C1 = { *c1, 0 }, C2 = { *c2, 0 }; const uint8x8_t p0 = vreinterpret_u8_u64(C0); const uint8x8_t p1 = vreinterpret_u8_u64(C1); const uint8x8_t p2 = vreinterpret_u8_u64(C2); const uint8x8_t avg = vhadd_u8(p0, p1); // Average(c0,c1) const uint8x8_t ab = vshr_n_u8(vqsub_u8(avg, p2), 1); // (a-b)>>1 saturated const uint8x8_t ba = vshr_n_u8(vqsub_u8(p2, avg), 1); // (b-a)>>1 saturated const uint8x8_t out = vqsub_u8(vqadd_u8(avg, ab), ba); uint32_t ret; vst1_lane_u32(&ret, vreinterpret_u32_u8(out), 0); return ret; }
void vp9_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int i; uint16x8_t q1u16, q3u16; int16x8_t q1s16; uint8x8_t d0u8 = vdup_n_u8(0); uint32x2_t d2u32 = vdup_n_u32(0); d0u8 = vld1_dup_u8(above - 1); d2u32 = vld1_lane_u32((const uint32_t *)above, d2u32, 0); q3u16 = vsubl_u8(vreinterpret_u8_u32(d2u32), d0u8); for (i = 0; i < 4; i++, dst += stride) { q1u16 = vdupq_n_u16((uint16_t)left[i]); q1s16 = vaddq_s16(vreinterpretq_s16_u16(q1u16), vreinterpretq_s16_u16(q3u16)); d0u8 = vqmovun_s16(q1s16); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); } }
static INLINE void scaledconvolve_vert_w4( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const y_filters, const int y0_q4, const int y_step_q4, const int w, const int h) { int y; int y_q4 = y0_q4; src -= src_stride * (SUBPEL_TAPS / 2 - 1); y = h; do { const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; if (y_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3); const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0); uint8x8_t s[8], d; int16x4_t t[8], tt; load_u8_8x8(src_y, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); t[0] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[0]))); t[1] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[1]))); t[2] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[2]))); t[3] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[3]))); t[4] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[4]))); t[5] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[5]))); t[6] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[6]))); t[7] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[7]))); tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], filters, filter3, filter4); d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d), 0); } else { memcpy(dst, &src_y[3 * src_stride], w); } dst += dst_stride; y_q4 += y_step_q4; } while (--y); }
// 'do_above' and 'do_left' facilitate branch removal when inlined. static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left, int do_above, int do_left) { uint16x8_t sum_top; uint16x8_t sum_left; uint8x8_t dc0; if (do_above) { const uint8x8_t A = vld1_u8(above); // top row const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top const uint16x4_t p1 = vpadd_u16(p0, p0); const uint16x4_t p2 = vpadd_u16(p1, p1); sum_top = vcombine_u16(p2, p2); } if (do_left) { const uint8x8_t L = vld1_u8(left); // left border const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left const uint16x4_t p1 = vpadd_u16(p0, p0); const uint16x4_t p2 = vpadd_u16(p1, p1); sum_left = vcombine_u16(p2, p2); } if (do_above && do_left) { const uint16x8_t sum = vaddq_u16(sum_left, sum_top); dc0 = vrshrn_n_u16(sum, 4); } else if (do_above) { dc0 = vrshrn_n_u16(sum_top, 3); } else if (do_left) { dc0 = vrshrn_n_u16(sum_left, 3); } else { dc0 = vdup_n_u8(0x80); } { const uint8x8_t dc = vdup_lane_u8(dc0, 0); int i; for (i = 0; i < 8; ++i) { vst1_u32((uint32_t*)(dst + i * stride), vreinterpret_u32_u8(dc)); } } }
void vpx_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const uint64x1_t A0 = vreinterpret_u64_u8(vld1_u8(above)); // top row const uint64x1_t A1 = vshr_n_u64(A0, 8); const uint64x1_t A2 = vshr_n_u64(A0, 16); const uint8x8_t ABCDEFGH = vreinterpret_u8_u64(A0); const uint8x8_t BCDEFGH0 = vreinterpret_u8_u64(A1); const uint8x8_t CDEFGH00 = vreinterpret_u8_u64(A2); const uint8x8_t avg1 = vhadd_u8(ABCDEFGH, CDEFGH00); const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0); const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); const uint32x2_t r0 = vreinterpret_u32_u8(avg2); const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); (void)left; vst1_lane_u32((uint32_t *)(dst + 0 * stride), r0, 0); vst1_lane_u32((uint32_t *)(dst + 1 * stride), r1, 0); vst1_lane_u32((uint32_t *)(dst + 2 * stride), r2, 0); vst1_lane_u32((uint32_t *)(dst + 3 * stride), r3, 0); dst[3 * stride + 3] = above[7]; }
void aom_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, // unused int y_step_q4, // unused int w, int h) { int width; const uint8_t *s, *psrc; uint8_t *d, *pdst; uint8x8_t d2u8, d3u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8; uint32x2_t d2u32, d3u32, d28u32, d29u32, d30u32, d31u32; uint8x16_t q12u8, q13u8, q14u8, q15u8; int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d22s16, d23s16; int16x4_t d24s16, d25s16, d26s16, d27s16; uint16x4_t d2u16, d3u16, d4u16, d5u16, d16u16, d17u16, d18u16, d19u16; int16x8_t q0s16; uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16; int32x4_t q1s32, q2s32, q14s32, q15s32; uint16x8x2_t q0x2u16; uint8x8x2_t d0x2u8, d1x2u8; uint32x2x2_t d0x2u32; uint16x4x2_t d0x2u16, d1x2u16; uint32x4x2_t q0x2u32; assert(x_step_q4 == 16); (void)x_step_q4; (void)y_step_q4; (void)filter_y; q0s16 = vld1q_s16(filter_x); src -= 3; // adjust for taps for (; h > 0; h -= 4, src += src_stride * 4, dst += dst_stride * 4) { // loop_horiz_v s = src; d24u8 = vld1_u8(s); s += src_stride; d25u8 = vld1_u8(s); s += src_stride; d26u8 = vld1_u8(s); s += src_stride; d27u8 = vld1_u8(s); q12u8 = vcombine_u8(d24u8, d25u8); q13u8 = vcombine_u8(d26u8, d27u8); q0x2u16 = vtrnq_u16(vreinterpretq_u16_u8(q12u8), vreinterpretq_u16_u8(q13u8)); d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0])); d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0])); d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1])); d27u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[1])); d0x2u8 = vtrn_u8(d24u8, d25u8); d1x2u8 = vtrn_u8(d26u8, d27u8); __builtin_prefetch(src + src_stride * 4); __builtin_prefetch(src + src_stride * 5); __builtin_prefetch(src + src_stride * 6); q8u16 = vmovl_u8(d0x2u8.val[0]); q9u16 = vmovl_u8(d0x2u8.val[1]); q10u16 = vmovl_u8(d1x2u8.val[0]); q11u16 = vmovl_u8(d1x2u8.val[1]); d16u16 = vget_low_u16(q8u16); d17u16 = vget_high_u16(q8u16); d18u16 = vget_low_u16(q9u16); d19u16 = vget_high_u16(q9u16); q8u16 = vcombine_u16(d16u16, d18u16); // vswp 17 18 q9u16 = vcombine_u16(d17u16, d19u16); d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16)); d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16)); // vmov 23 21 for (width = w, psrc = src + 7, pdst = dst; width > 0; width -= 4, psrc += 4, pdst += 4) { // loop_horiz s = psrc; d28u32 = vld1_dup_u32((const uint32_t *)s); s += src_stride; d29u32 = vld1_dup_u32((const uint32_t *)s); s += src_stride; d31u32 = vld1_dup_u32((const uint32_t *)s); s += src_stride; d30u32 = vld1_dup_u32((const uint32_t *)s); __builtin_prefetch(psrc + 64); d0x2u16 = vtrn_u16(vreinterpret_u16_u32(d28u32), vreinterpret_u16_u32(d31u32)); d1x2u16 = vtrn_u16(vreinterpret_u16_u32(d29u32), vreinterpret_u16_u32(d30u32)); d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]), // d28 vreinterpret_u8_u16(d1x2u16.val[0])); // d29 d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]), // d31 vreinterpret_u8_u16(d1x2u16.val[1])); // d30 __builtin_prefetch(psrc + 64 + src_stride); q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]); q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]); q0x2u32 = vtrnq_u32(vreinterpretq_u32_u8(q14u8), vreinterpretq_u32_u8(q15u8)); d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0])); d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0])); q12u16 = vmovl_u8(d28u8); q13u16 = vmovl_u8(d29u8); __builtin_prefetch(psrc + 64 + src_stride * 2); d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16)); d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16)); d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16)); d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16)); d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, d18s16, d19s16, d23s16, d24s16, q0s16); q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, d19s16, d23s16, d24s16, d26s16, q0s16); q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, d23s16, d24s16, d26s16, d27s16, q0s16); q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, d24s16, d26s16, d27s16, d25s16, q0s16); __builtin_prefetch(psrc + 60 + src_stride * 3); d2u16 = vqrshrun_n_s32(q1s32, 7); d3u16 = vqrshrun_n_s32(q2s32, 7); d4u16 = vqrshrun_n_s32(q14s32, 7); d5u16 = vqrshrun_n_s32(q15s32, 7); q1u16 = vcombine_u16(d2u16, d3u16); q2u16 = vcombine_u16(d4u16, d5u16); d2u8 = vqmovn_u16(q1u16); d3u8 = vqmovn_u16(q2u16); d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), vreinterpret_u16_u8(d3u8)); d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]), vreinterpret_u32_u16(d0x2u16.val[1])); d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]), vreinterpret_u8_u32(d0x2u32.val[1])); d2u32 = vreinterpret_u32_u8(d0x2u8.val[0]); d3u32 = vreinterpret_u32_u8(d0x2u8.val[1]); d = pdst; vst1_lane_u32((uint32_t *)d, d2u32, 0); d += dst_stride; vst1_lane_u32((uint32_t *)d, d3u32, 0); d += dst_stride; vst1_lane_u32((uint32_t *)d, d2u32, 1); d += dst_stride; vst1_lane_u32((uint32_t *)d, d3u32, 1); q8u16 = q9u16; d20s16 = d23s16; q11u16 = q12u16; q9u16 = q13u16; d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); } } return; }
void computeNetwork0new_neon(const float *dataf, const float *weightsf, uint8_t *d) { const int16_t *data = (const int16_t *)dataf; const int16_t *weights = (const int16_t *)weightsf; int32x4_t accum0 = { 0, 0, 0, 0 }; int32x4_t accum1 = accum0; int32x4_t accum2 = accum0; int32x4_t accum3 = accum0; for (int i = 0; i < 128/2; i += 8) { int16x4x2_t d0 = vld2_s16(data + i); int16x4x2_t w0 = vld2_s16(weights + i * 4); int16x4x2_t w1 = vld2_s16(weights + i * 4 + 8); int16x4x2_t w2 = vld2_s16(weights + i * 4 + 16); int16x4x2_t w3 = vld2_s16(weights + i * 4 + 24); accum0 = vmlal_s16(accum0, d0.val[0], w0.val[0]); accum0 = vmlal_s16(accum0, d0.val[1], w0.val[1]); accum1 = vmlal_s16(accum1, d0.val[0], w1.val[0]); accum1 = vmlal_s16(accum1, d0.val[1], w1.val[1]); accum2 = vmlal_s16(accum2, d0.val[0], w2.val[0]); accum2 = vmlal_s16(accum2, d0.val[1], w2.val[1]); accum3 = vmlal_s16(accum3, d0.val[0], w3.val[0]); accum3 = vmlal_s16(accum3, d0.val[1], w3.val[1]); } int32x2_t sum0 = vpadd_s32(vget_low_s32(accum0), vget_high_s32(accum0)); int32x2_t sum1 = vpadd_s32(vget_low_s32(accum1), vget_high_s32(accum1)); int32x2_t sum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2)); int32x2_t sum3 = vpadd_s32(vget_low_s32(accum3), vget_high_s32(accum3)); sum0 = vpadd_s32(sum0, sum1); sum1 = vpadd_s32(sum2, sum3); int32x4_t sum = vcombine_s32(sum0, sum1); float32x4_t m0 = vcvtq_f32_s32(sum); m0 = vmulq_f32(m0, vld1q_f32(weightsf + 512/4)); m0 = vaddq_f32(m0, vld1q_f32(weightsf + 528/4)); float32x4_t m1, m2, m3, m4; m1 = m0; m0 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(m0), sign_bits_f)); m0 = vaddq_f32(m0, ones_f); m0 = vmulq_f32(reciprocal(m0), m1); m1 = vdupq_lane_f32(vget_low_f32(m0), 0); m2 = vdupq_lane_f32(vget_low_f32(m0), 1); m3 = vdupq_lane_f32(vget_high_f32(m0), 0); m4 = vdupq_lane_f32(vget_high_f32(m0), 1); m1 = vmulq_f32(m1, vld1q_f32(weightsf + 544/4)); m2 = vmulq_f32(m2, vld1q_f32(weightsf + 560/4)); m3 = vmulq_f32(m3, vld1q_f32(weightsf + 576/4)); m4 = vmulq_f32(m4, vld1q_f32(weightsf + 592/4)); m1 = vaddq_f32(m1, m2); m3 = vaddq_f32(m3, m4); m1 = vaddq_f32(m1, m3); m1 = vaddq_f32(m1, vld1q_f32(weightsf + 608/4)); uint32x4_t gte = vcgeq_f32(m1, zeroes_f); uint16x4_t gte_u16 = vmovn_u32(gte); uint8x8_t gte_u8 = vmovn_u16(vcombine_u16(gte_u16, vget_low_u16(vreinterpretq_u16_u32(sign_bits_f)))); gte_u8 = vshr_n_u8(gte_u8, 7); vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(gte_u8), 0); }
void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type) { uint8x8_t d26u8, d27u8; int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16; uint32x2_t d26u32, d27u32; int16x8_t q3s16, q8s16, q9s16; uint16x8_t q8u16, q9u16; d26u32 = d27u32 = vdup_n_u32(0); q8s16 = vld1q_s16(input); q9s16 = vld1q_s16(input + 8); TRANSPOSE4X4(&q8s16, &q9s16); switch (tx_type) { case 0: // idct_idct is not supported. Fall back to C vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type); return; break; case 1: // iadst_idct // generate constants GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16); GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); // first transform rows IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16); // transpose the matrix TRANSPOSE4X4(&q8s16, &q9s16); // then transform columns IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); break; case 2: // idct_iadst // generate constantsyy GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16); GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); // first transform rows IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); // transpose the matrix TRANSPOSE4X4(&q8s16, &q9s16); // then transform columns IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16); break; case 3: // iadst_iadst // generate constants GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); // first transform rows IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); // transpose the matrix TRANSPOSE4X4(&q8s16, &q9s16); // then transform columns IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); break; default: // iadst_idct assert(0); break; } q8s16 = vrshrq_n_s16(q8s16, 4); q9s16 = vrshrq_n_s16(q9s16, 4); d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 0); dest += dest_stride; d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 1); dest += dest_stride; d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 0); dest += dest_stride; d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 1); q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32)); q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32)); d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 1); dest -= dest_stride; vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 0); dest -= dest_stride; vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1); dest -= dest_stride; vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0); return; }
void vp9_lpf_vertical_4_neon( unsigned char *src, int pitch, unsigned char *blimit, unsigned char *limit, unsigned char *thresh, int count) { int i, pitch8; uint8_t *s; uint8x8_t dblimit, dlimit, dthresh; uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8; uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3; uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7; uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11; uint8x8x4_t d4Result; if (count == 0) // end_vp9_lf_h_edge return; dblimit = vld1_u8(blimit); dlimit = vld1_u8(limit); dthresh = vld1_u8(thresh); pitch8 = pitch * 8; for (i = 0; i < count; i++, src += pitch8) { s = src - (i + 1) * 4; d3u8 = vld1_u8(s); s += pitch; d4u8 = vld1_u8(s); s += pitch; d5u8 = vld1_u8(s); s += pitch; d6u8 = vld1_u8(s); s += pitch; d7u8 = vld1_u8(s); s += pitch; d16u8 = vld1_u8(s); s += pitch; d17u8 = vld1_u8(s); s += pitch; d18u8 = vld1_u8(s); d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8), vreinterpret_u32_u8(d7u8)); d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8), vreinterpret_u32_u8(d16u8)); d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8), vreinterpret_u32_u8(d17u8)); d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8), vreinterpret_u32_u8(d18u8)); d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]), vreinterpret_u16_u32(d2tmp2.val[0])); d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]), vreinterpret_u16_u32(d2tmp3.val[0])); d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]), vreinterpret_u16_u32(d2tmp2.val[1])); d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]), vreinterpret_u16_u32(d2tmp3.val[1])); d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]), vreinterpret_u8_u16(d2tmp5.val[0])); d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]), vreinterpret_u8_u16(d2tmp5.val[1])); d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]), vreinterpret_u8_u16(d2tmp7.val[0])); d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]), vreinterpret_u8_u16(d2tmp7.val[1])); d3u8 = d2tmp8.val[0]; d4u8 = d2tmp8.val[1]; d5u8 = d2tmp9.val[0]; d6u8 = d2tmp9.val[1]; d7u8 = d2tmp10.val[0]; d16u8 = d2tmp10.val[1]; d17u8 = d2tmp11.val[0]; d18u8 = d2tmp11.val[1]; vp9_loop_filter_neon(dblimit, dlimit, dthresh, d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8, &d4u8, &d5u8, &d6u8, &d7u8); d4Result.val[0] = d4u8; d4Result.val[1] = d5u8; d4Result.val[2] = d6u8; d4Result.val[3] = d7u8; src -= 2; vst4_lane_u8(src, d4Result, 0); src += pitch; vst4_lane_u8(src, d4Result, 1); src += pitch; vst4_lane_u8(src, d4Result, 2); src += pitch; vst4_lane_u8(src, d4Result, 3); src += pitch; vst4_lane_u8(src, d4Result, 4); src += pitch; vst4_lane_u8(src, d4Result, 5); src += pitch; vst4_lane_u8(src, d4Result, 6); src += pitch; vst4_lane_u8(src, d4Result, 7); } return; }
static INLINE void scaledconvolve_horiz_w4( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const x_filters, const int x0_q4, const int x_step_q4, const int w, const int h) { DECLARE_ALIGNED(16, uint8_t, temp[4 * 4]); int x, y, z; src -= SUBPEL_TAPS / 2 - 1; y = h; do { int x_q4 = x0_q4; x = 0; do { // process 4 src_x steps for (z = 0; z < 4; ++z) { const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; if (x_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(x_filters[x_q4 & SUBPEL_MASK]); const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3); const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0); uint8x8_t s[8], d; int16x8_t ss[4]; int16x4_t t[8], tt; load_u8_8x4(src_x, src_stride, &s[0], &s[1], &s[2], &s[3]); transpose_u8_8x4(&s[0], &s[1], &s[2], &s[3]); ss[0] = vreinterpretq_s16_u16(vmovl_u8(s[0])); ss[1] = vreinterpretq_s16_u16(vmovl_u8(s[1])); ss[2] = vreinterpretq_s16_u16(vmovl_u8(s[2])); ss[3] = vreinterpretq_s16_u16(vmovl_u8(s[3])); t[0] = vget_low_s16(ss[0]); t[1] = vget_low_s16(ss[1]); t[2] = vget_low_s16(ss[2]); t[3] = vget_low_s16(ss[3]); t[4] = vget_high_s16(ss[0]); t[5] = vget_high_s16(ss[1]); t[6] = vget_high_s16(ss[2]); t[7] = vget_high_s16(ss[3]); tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], filters, filter3, filter4); d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7); vst1_lane_u32((uint32_t *)&temp[4 * z], vreinterpret_u32_u8(d), 0); } else { int i; for (i = 0; i < 4; ++i) { temp[z * 4 + i] = src_x[i * src_stride + 3]; } } x_q4 += x_step_q4; } // transpose the 4x4 filters values back to dst { const uint8x8x4_t d4 = vld4_u8(temp); vst1_lane_u32((uint32_t *)&dst[x + 0 * dst_stride], vreinterpret_u32_u8(d4.val[0]), 0); vst1_lane_u32((uint32_t *)&dst[x + 1 * dst_stride], vreinterpret_u32_u8(d4.val[1]), 0); vst1_lane_u32((uint32_t *)&dst[x + 2 * dst_stride], vreinterpret_u32_u8(d4.val[2]), 0); vst1_lane_u32((uint32_t *)&dst[x + 3 * dst_stride], vreinterpret_u32_u8(d4.val[3]), 0); } x += 4; } while (x < w); src += src_stride * 4; dst += dst_stride * 4; y -= 4; } while (y > 0); }
void vp8_short_idct4x4llm_neon(int16_t *input, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, int dst_stride) { int i; uint32x2_t d6u32 = vdup_n_u32(0); uint8x8_t d1u8; int16x4_t d2, d3, d4, d5, d10, d11, d12, d13; uint16x8_t q1u16; int16x8_t q1s16, q2s16, q3s16, q4s16; int32x2x2_t v2tmp0, v2tmp1; int16x4x2_t v2tmp2, v2tmp3; d2 = vld1_s16(input); d3 = vld1_s16(input + 4); d4 = vld1_s16(input + 8); d5 = vld1_s16(input + 12); // 1st for loop q1s16 = vcombine_s16(d2, d4); // Swap d3 d4 here q2s16 = vcombine_s16(d3, d5); q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2); q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1); d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1 d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1 q3s16 = vshrq_n_s16(q3s16, 1); q4s16 = vshrq_n_s16(q4s16, 1); q3s16 = vqaddq_s16(q3s16, q2s16); q4s16 = vqaddq_s16(q4s16, q2s16); d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1 d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1 d2 = vqadd_s16(d12, d11); d3 = vqadd_s16(d13, d10); d4 = vqsub_s16(d13, d10); d5 = vqsub_s16(d12, d11); v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]), vreinterpret_s16_s32(v2tmp1.val[0])); v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]), vreinterpret_s16_s32(v2tmp1.val[1])); // 2nd for loop q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]); q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]); q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2); q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1); d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1 d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1 q3s16 = vshrq_n_s16(q3s16, 1); q4s16 = vshrq_n_s16(q4s16, 1); q3s16 = vqaddq_s16(q3s16, q2s16); q4s16 = vqaddq_s16(q4s16, q2s16); d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1 d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1 d2 = vqadd_s16(d12, d11); d3 = vqadd_s16(d13, d10); d4 = vqsub_s16(d13, d10); d5 = vqsub_s16(d12, d11); d2 = vrshr_n_s16(d2, 3); d3 = vrshr_n_s16(d3, 3); d4 = vrshr_n_s16(d4, 3); d5 = vrshr_n_s16(d5, 3); v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]), vreinterpret_s16_s32(v2tmp1.val[0])); v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]), vreinterpret_s16_s32(v2tmp1.val[1])); q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]); q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]); // dc_only_idct_add for (i = 0; i < 2; i++, q1s16 = q2s16) { d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0); pred_ptr += pred_stride; d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1); pred_ptr += pred_stride; q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16), vreinterpret_u8_u32(d6u32)); d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16)); vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0); dst_ptr += dst_stride; vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1); dst_ptr += dst_stride; } return; }
void aom_lpf_vertical_8_neon(uint8_t *src, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { int i; uint8_t *s; uint8x8_t dblimit, dlimit, dthresh; uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8; uint8x8_t d16u8, d17u8, d18u8; uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3; uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7; uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11; uint8x8x4_t d4Result; uint8x8x2_t d2Result; dblimit = vld1_u8(blimit); dlimit = vld1_u8(limit); dthresh = vld1_u8(thresh); for (i = 0; i < 1; i++) { s = src + (i * (pitch << 3)) - 4; d3u8 = vld1_u8(s); s += pitch; d4u8 = vld1_u8(s); s += pitch; d5u8 = vld1_u8(s); s += pitch; d6u8 = vld1_u8(s); s += pitch; d7u8 = vld1_u8(s); s += pitch; d16u8 = vld1_u8(s); s += pitch; d17u8 = vld1_u8(s); s += pitch; d18u8 = vld1_u8(s); d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8), vreinterpret_u32_u8(d7u8)); d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8), vreinterpret_u32_u8(d16u8)); d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8), vreinterpret_u32_u8(d17u8)); d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8), vreinterpret_u32_u8(d18u8)); d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]), vreinterpret_u16_u32(d2tmp2.val[0])); d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]), vreinterpret_u16_u32(d2tmp3.val[0])); d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]), vreinterpret_u16_u32(d2tmp2.val[1])); d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]), vreinterpret_u16_u32(d2tmp3.val[1])); d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]), vreinterpret_u8_u16(d2tmp5.val[0])); d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]), vreinterpret_u8_u16(d2tmp5.val[1])); d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]), vreinterpret_u8_u16(d2tmp7.val[0])); d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]), vreinterpret_u8_u16(d2tmp7.val[1])); d3u8 = d2tmp8.val[0]; d4u8 = d2tmp8.val[1]; d5u8 = d2tmp9.val[0]; d6u8 = d2tmp9.val[1]; d7u8 = d2tmp10.val[0]; d16u8 = d2tmp10.val[1]; d17u8 = d2tmp11.val[0]; d18u8 = d2tmp11.val[1]; mbloop_filter_neon(dblimit, dlimit, dthresh, d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8, &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8); d4Result.val[0] = d0u8; d4Result.val[1] = d1u8; d4Result.val[2] = d2u8; d4Result.val[3] = d3u8; d2Result.val[0] = d4u8; d2Result.val[1] = d5u8; s = src - 3; vst4_lane_u8(s, d4Result, 0); s += pitch; vst4_lane_u8(s, d4Result, 1); s += pitch; vst4_lane_u8(s, d4Result, 2); s += pitch; vst4_lane_u8(s, d4Result, 3); s += pitch; vst4_lane_u8(s, d4Result, 4); s += pitch; vst4_lane_u8(s, d4Result, 5); s += pitch; vst4_lane_u8(s, d4Result, 6); s += pitch; vst4_lane_u8(s, d4Result, 7); s = src + 1; vst2_lane_u8(s, d2Result, 0); s += pitch; vst2_lane_u8(s, d2Result, 1); s += pitch; vst2_lane_u8(s, d2Result, 2); s += pitch; vst2_lane_u8(s, d2Result, 3); s += pitch; vst2_lane_u8(s, d2Result, 4); s += pitch; vst2_lane_u8(s, d2Result, 5); s += pitch; vst2_lane_u8(s, d2Result, 6); s += pitch; vst2_lane_u8(s, d2Result, 7); } return; }
static INLINE void mbloop_filter_neon(uint8x8_t dblimit, // mblimit uint8x8_t dlimit, // limit uint8x8_t dthresh, // thresh uint8x8_t d3u8, // p2 uint8x8_t d4u8, // p2 uint8x8_t d5u8, // p1 uint8x8_t d6u8, // p0 uint8x8_t d7u8, // q0 uint8x8_t d16u8, // q1 uint8x8_t d17u8, // q2 uint8x8_t d18u8, // q3 uint8x8_t *d0ru8, // p1 uint8x8_t *d1ru8, // p1 uint8x8_t *d2ru8, // p0 uint8x8_t *d3ru8, // q0 uint8x8_t *d4ru8, // q1 uint8x8_t *d5ru8) { // q1 uint32_t flat; uint8x8_t d0u8, d1u8, d2u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8; uint8x8_t d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8; int16x8_t q15s16; uint16x8_t q10u16, q14u16; int8x8_t d21s8, d24s8, d25s8, d26s8, d28s8, d29s8, d30s8; d19u8 = vabd_u8(d3u8, d4u8); d20u8 = vabd_u8(d4u8, d5u8); d21u8 = vabd_u8(d5u8, d6u8); d22u8 = vabd_u8(d16u8, d7u8); d23u8 = vabd_u8(d17u8, d16u8); d24u8 = vabd_u8(d18u8, d17u8); d19u8 = vmax_u8(d19u8, d20u8); d20u8 = vmax_u8(d21u8, d22u8); d25u8 = vabd_u8(d6u8, d4u8); d23u8 = vmax_u8(d23u8, d24u8); d26u8 = vabd_u8(d7u8, d17u8); d19u8 = vmax_u8(d19u8, d20u8); d24u8 = vabd_u8(d6u8, d7u8); d27u8 = vabd_u8(d3u8, d6u8); d28u8 = vabd_u8(d18u8, d7u8); d19u8 = vmax_u8(d19u8, d23u8); d23u8 = vabd_u8(d5u8, d16u8); d24u8 = vqadd_u8(d24u8, d24u8); d19u8 = vcge_u8(dlimit, d19u8); d25u8 = vmax_u8(d25u8, d26u8); d26u8 = vmax_u8(d27u8, d28u8); d23u8 = vshr_n_u8(d23u8, 1); d25u8 = vmax_u8(d25u8, d26u8); d24u8 = vqadd_u8(d24u8, d23u8); d20u8 = vmax_u8(d20u8, d25u8); d23u8 = vdup_n_u8(1); d24u8 = vcge_u8(dblimit, d24u8); d21u8 = vcgt_u8(d21u8, dthresh); d20u8 = vcge_u8(d23u8, d20u8); d19u8 = vand_u8(d19u8, d24u8); d23u8 = vcgt_u8(d22u8, dthresh); d20u8 = vand_u8(d20u8, d19u8); d22u8 = vdup_n_u8(0x80); d23u8 = vorr_u8(d21u8, d23u8); q10u16 = vcombine_u16(vreinterpret_u16_u8(d20u8), vreinterpret_u16_u8(d21u8)); d30u8 = vshrn_n_u16(q10u16, 4); flat = vget_lane_u32(vreinterpret_u32_u8(d30u8), 0); if (flat == 0xffffffff) { // Check for all 1's, power_branch_only d27u8 = vdup_n_u8(3); d21u8 = vdup_n_u8(2); q14u16 = vaddl_u8(d6u8, d7u8); q14u16 = vmlal_u8(q14u16, d3u8, d27u8); q14u16 = vmlal_u8(q14u16, d4u8, d21u8); q14u16 = vaddw_u8(q14u16, d5u8); *d0ru8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d3u8); q14u16 = vsubw_u8(q14u16, d4u8); q14u16 = vaddw_u8(q14u16, d5u8); q14u16 = vaddw_u8(q14u16, d16u8); *d1ru8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d3u8); q14u16 = vsubw_u8(q14u16, d5u8); q14u16 = vaddw_u8(q14u16, d6u8); q14u16 = vaddw_u8(q14u16, d17u8); *d2ru8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d3u8); q14u16 = vsubw_u8(q14u16, d6u8); q14u16 = vaddw_u8(q14u16, d7u8); q14u16 = vaddw_u8(q14u16, d18u8); *d3ru8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d4u8); q14u16 = vsubw_u8(q14u16, d7u8); q14u16 = vaddw_u8(q14u16, d16u8); q14u16 = vaddw_u8(q14u16, d18u8); *d4ru8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d5u8); q14u16 = vsubw_u8(q14u16, d16u8); q14u16 = vaddw_u8(q14u16, d17u8); q14u16 = vaddw_u8(q14u16, d18u8); *d5ru8 = vqrshrn_n_u16(q14u16, 3); } else { d21u8 = veor_u8(d7u8, d22u8); d24u8 = veor_u8(d6u8, d22u8); d25u8 = veor_u8(d5u8, d22u8); d26u8 = veor_u8(d16u8, d22u8); d27u8 = vdup_n_u8(3); d28s8 = vsub_s8(vreinterpret_s8_u8(d21u8), vreinterpret_s8_u8(d24u8)); d29s8 = vqsub_s8(vreinterpret_s8_u8(d25u8), vreinterpret_s8_u8(d26u8)); q15s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d27u8)); d29s8 = vand_s8(d29s8, vreinterpret_s8_u8(d23u8)); q15s16 = vaddw_s8(q15s16, d29s8); d29u8 = vdup_n_u8(4); d28s8 = vqmovn_s16(q15s16); d28s8 = vand_s8(d28s8, vreinterpret_s8_u8(d19u8)); d30s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d27u8)); d29s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d29u8)); d30s8 = vshr_n_s8(d30s8, 3); d29s8 = vshr_n_s8(d29s8, 3); d24s8 = vqadd_s8(vreinterpret_s8_u8(d24u8), d30s8); d21s8 = vqsub_s8(vreinterpret_s8_u8(d21u8), d29s8); d29s8 = vrshr_n_s8(d29s8, 1); d29s8 = vbic_s8(d29s8, vreinterpret_s8_u8(d23u8)); d25s8 = vqadd_s8(vreinterpret_s8_u8(d25u8), d29s8); d26s8 = vqsub_s8(vreinterpret_s8_u8(d26u8), d29s8); if (flat == 0) { // filter_branch_only *d0ru8 = d4u8; *d1ru8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8); *d2ru8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8); *d3ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8); *d4ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8); *d5ru8 = d17u8; return; } d21u8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8); d24u8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8); d25u8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8); d26u8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8); d23u8 = vdup_n_u8(2); q14u16 = vaddl_u8(d6u8, d7u8); q14u16 = vmlal_u8(q14u16, d3u8, d27u8); q14u16 = vmlal_u8(q14u16, d4u8, d23u8); d0u8 = vbsl_u8(d20u8, dblimit, d4u8); q14u16 = vaddw_u8(q14u16, d5u8); d1u8 = vbsl_u8(d20u8, dlimit, d25u8); d30u8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d3u8); q14u16 = vsubw_u8(q14u16, d4u8); q14u16 = vaddw_u8(q14u16, d5u8); q14u16 = vaddw_u8(q14u16, d16u8); d2u8 = vbsl_u8(d20u8, dthresh, d24u8); d31u8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d3u8); q14u16 = vsubw_u8(q14u16, d5u8); q14u16 = vaddw_u8(q14u16, d6u8); q14u16 = vaddw_u8(q14u16, d17u8); *d0ru8 = vbsl_u8(d20u8, d30u8, d0u8); d23u8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d3u8); q14u16 = vsubw_u8(q14u16, d6u8); q14u16 = vaddw_u8(q14u16, d7u8); *d1ru8 = vbsl_u8(d20u8, d31u8, d1u8); q14u16 = vaddw_u8(q14u16, d18u8); *d2ru8 = vbsl_u8(d20u8, d23u8, d2u8); d22u8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d4u8); q14u16 = vsubw_u8(q14u16, d7u8); q14u16 = vaddw_u8(q14u16, d16u8); d3u8 = vbsl_u8(d20u8, d3u8, d21u8); q14u16 = vaddw_u8(q14u16, d18u8); d4u8 = vbsl_u8(d20u8, d4u8, d26u8); d6u8 = vqrshrn_n_u16(q14u16, 3); q14u16 = vsubw_u8(q14u16, d5u8); q14u16 = vsubw_u8(q14u16, d16u8); q14u16 = vaddw_u8(q14u16, d17u8); q14u16 = vaddw_u8(q14u16, d18u8); d5u8 = vbsl_u8(d20u8, d5u8, d17u8); d7u8 = vqrshrn_n_u16(q14u16, 3); *d3ru8 = vbsl_u8(d20u8, d22u8, d3u8); *d4ru8 = vbsl_u8(d20u8, d6u8, d4u8); *d5ru8 = vbsl_u8(d20u8, d7u8, d5u8); } return; }
void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8x8_t d26u8, d27u8; uint32x2_t d26u32, d27u32; uint16x8_t q8u16, q9u16; int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16; int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16; int16x8_t q8s16, q9s16, q13s16, q14s16; int32x4_t q1s32, q13s32, q14s32, q15s32; int16x4x2_t d0x2s16, d1x2s16; int32x4x2_t q0x2s32; uint8_t *d; d26u32 = d27u32 = vdup_n_u32(0); q8s16 = vld1q_s16(input); q9s16 = vld1q_s16(input + 8); d16s16 = vget_low_s16(q8s16); d17s16 = vget_high_s16(q8s16); d18s16 = vget_low_s16(q9s16); d19s16 = vget_high_s16(q9s16); d0x2s16 = vtrn_s16(d16s16, d17s16); d1x2s16 = vtrn_s16(d18s16, d19s16); q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); d20s16 = vdup_n_s16((int16_t)cospi_8_64); d21s16 = vdup_n_s16((int16_t)cospi_16_64); q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d22s16 = vdup_n_s16((int16_t)cospi_24_64); // stage 1 d23s16 = vadd_s16(d16s16, d18s16); d24s16 = vsub_s16(d16s16, d18s16); q15s32 = vmull_s16(d17s16, d22s16); q1s32 = vmull_s16(d17s16, d20s16); q13s32 = vmull_s16(d23s16, d21s16); q14s32 = vmull_s16(d24s16, d21s16); q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); q1s32 = vmlal_s16(q1s32, d19s16, d22s16); d26s16 = vqrshrn_n_s32(q13s32, 14); d27s16 = vqrshrn_n_s32(q14s32, 14); d29s16 = vqrshrn_n_s32(q15s32, 14); d28s16 = vqrshrn_n_s32(q1s32, 14); q13s16 = vcombine_s16(d26s16, d27s16); q14s16 = vcombine_s16(d28s16, d29s16); // stage 2 q8s16 = vaddq_s16(q13s16, q14s16); q9s16 = vsubq_s16(q13s16, q14s16); d16s16 = vget_low_s16(q8s16); d17s16 = vget_high_s16(q8s16); d18s16 = vget_high_s16(q9s16); // vswp d18 d19 d19s16 = vget_low_s16(q9s16); d0x2s16 = vtrn_s16(d16s16, d17s16); d1x2s16 = vtrn_s16(d18s16, d19s16); q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); // do the transform on columns // stage 1 d23s16 = vadd_s16(d16s16, d18s16); d24s16 = vsub_s16(d16s16, d18s16); q15s32 = vmull_s16(d17s16, d22s16); q1s32 = vmull_s16(d17s16, d20s16); q13s32 = vmull_s16(d23s16, d21s16); q14s32 = vmull_s16(d24s16, d21s16); q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); q1s32 = vmlal_s16(q1s32, d19s16, d22s16); d26s16 = vqrshrn_n_s32(q13s32, 14); d27s16 = vqrshrn_n_s32(q14s32, 14); d29s16 = vqrshrn_n_s32(q15s32, 14); d28s16 = vqrshrn_n_s32(q1s32, 14); q13s16 = vcombine_s16(d26s16, d27s16); q14s16 = vcombine_s16(d28s16, d29s16); // stage 2 q8s16 = vaddq_s16(q13s16, q14s16); q9s16 = vsubq_s16(q13s16, q14s16); q8s16 = vrshrq_n_s16(q8s16, 4); q9s16 = vrshrq_n_s16(q9s16, 4); d = dest; d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0); d += dest_stride; d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1); d += dest_stride; d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1); d += dest_stride; d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0); q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32)); q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32)); d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); d = dest; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0); return; }