int64_t av1_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff, int block_size) { int64x2_t error = vdupq_n_s64(0); assert(block_size >= 8); assert((block_size % 8) == 0); do { const int16x8_t c = vld1q_s16(coeff); const int16x8_t d = vld1q_s16(dqcoeff); const int16x8_t diff = vsubq_s16(c, d); const int16x4_t diff_lo = vget_low_s16(diff); const int16x4_t diff_hi = vget_high_s16(diff); // diff is 15-bits, the squares 30, so we can store 2 in 31-bits before // accumulating them in 64-bits. const int32x4_t err0 = vmull_s16(diff_lo, diff_lo); const int32x4_t err1 = vmlal_s16(err0, diff_hi, diff_hi); const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1)); error = vaddq_s64(error, err2); coeff += 8; dqcoeff += 8; block_size -= 8; } while (block_size != 0); return vgetq_lane_s64(error, 0) + vgetq_lane_s64(error, 1); }
/* s16x8 mv mul */ void mw_neon_mv_mul_s16x8(short * A, int Row, int T, short * B, short * C) { int i = 0; int k = 0; int16x8_t neon_b, neon_c; int16x8_t neon_a0, neon_a1, neon_a2, neon_a3, neon_a4, neon_a5, neon_a6, neon_a7; int16x8_t neon_b0, neon_b1, neon_b2, neon_b3, neon_b4, neon_b5, neon_b6, neon_b7; for (i = 0; i < Row; i+=8) { neon_c = vmovq_n_s16(0); for (k = 0; k < T; k+=8) { int j = k * T + i; neon_a0 = vld1q_s16(A + j); j+=Row; neon_a1 = vld1q_s16(A + j); j+=Row; neon_a2 = vld1q_s16(A + j); j+=Row; neon_a3 = vld1q_s16(A + j); j+=Row; neon_a4 = vld1q_s16(A + j); j+=Row; neon_a5 = vld1q_s16(A + j); j+=Row; neon_a6 = vld1q_s16(A + j); j+=Row; neon_a7 = vld1q_s16(A + j); neon_b = vld1q_s16(B + k); neon_b0 = vdupq_n_s16(vgetq_lane_s16(neon_b, 0)); neon_b1 = vdupq_n_s16(vgetq_lane_s16(neon_b, 1)); neon_b2 = vdupq_n_s16(vgetq_lane_s16(neon_b, 2)); neon_b3 = vdupq_n_s16(vgetq_lane_s16(neon_b, 3)); neon_b4 = vdupq_n_s16(vgetq_lane_s16(neon_b, 4)); neon_b5 = vdupq_n_s16(vgetq_lane_s16(neon_b, 5)); neon_b6 = vdupq_n_s16(vgetq_lane_s16(neon_b, 6)); neon_b7 = vdupq_n_s16(vgetq_lane_s16(neon_b, 7)); neon_c = vaddq_s16(vmulq_s16(neon_a0, neon_b0), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a1, neon_b1), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a2, neon_b2), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a3, neon_b3), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a4, neon_b4), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a5, neon_b5), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a6, neon_b6), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a7, neon_b7), neon_c); } vst1q_s16(C + i, neon_c); } }
void rfx_decode_YCbCr_to_RGB_NEON(sint16 * y_r_buffer, sint16 * cb_g_buffer, sint16 * cr_b_buffer) { int16x8_t zero = vdupq_n_s16(0); int16x8_t max = vdupq_n_s16(255); int16x8_t y_add = vdupq_n_s16(128); int16x8_t* y_r_buf = (int16x8_t*)y_r_buffer; int16x8_t* cb_g_buf = (int16x8_t*)cb_g_buffer; int16x8_t* cr_b_buf = (int16x8_t*)cr_b_buffer; int i; for (i = 0; i < 4096 / 8; i++) { int16x8_t y = vld1q_s16((sint16*)&y_r_buf[i]); y = vaddq_s16(y, y_add); int16x8_t cr = vld1q_s16((sint16*)&cr_b_buf[i]); // r = between((y + cr + (cr >> 2) + (cr >> 3) + (cr >> 5)), 0, 255); int16x8_t r = vaddq_s16(y, cr); r = vaddq_s16(r, vshrq_n_s16(cr, 2)); r = vaddq_s16(r, vshrq_n_s16(cr, 3)); r = vaddq_s16(r, vshrq_n_s16(cr, 5)); r = vminq_s16(vmaxq_s16(r, zero), max); vst1q_s16((sint16*)&y_r_buf[i], r); // cb = cb_g_buf[i]; int16x8_t cb = vld1q_s16((sint16*)&cb_g_buf[i]); // g = between(y - (cb >> 2) - (cb >> 4) - (cb >> 5) - (cr >> 1) - (cr >> 3) - (cr >> 4) - (cr >> 5), 0, 255); int16x8_t g = vsubq_s16(y, vshrq_n_s16(cb, 2)); g = vsubq_s16(g, vshrq_n_s16(cb, 4)); g = vsubq_s16(g, vshrq_n_s16(cb, 5)); g = vsubq_s16(g, vshrq_n_s16(cr, 1)); g = vsubq_s16(g, vshrq_n_s16(cr, 3)); g = vsubq_s16(g, vshrq_n_s16(cr, 4)); g = vsubq_s16(g, vshrq_n_s16(cr, 5)); g = vminq_s16(vmaxq_s16(g, zero), max); vst1q_s16((sint16*)&cb_g_buf[i], g); // b = between((y + cb + (cb >> 1) + (cb >> 2) + (cb >> 6)), 0, 255); int16x8_t b = vaddq_s16(y, cb); b = vaddq_s16(b, vshrq_n_s16(cb, 1)); b = vaddq_s16(b, vshrq_n_s16(cb, 2)); b = vaddq_s16(b, vshrq_n_s16(cb, 6)); b = vminq_s16(vmaxq_s16(b, zero), max); vst1q_s16((sint16*)&cr_b_buf[i], b); } }
void vpx_hadamard_8x8_neon(const int16_t *src_diff, int src_stride, int16_t *coeff) { int16x8_t a0 = vld1q_s16(src_diff); int16x8_t a1 = vld1q_s16(src_diff + src_stride); int16x8_t a2 = vld1q_s16(src_diff + 2 * src_stride); int16x8_t a3 = vld1q_s16(src_diff + 3 * src_stride); int16x8_t a4 = vld1q_s16(src_diff + 4 * src_stride); int16x8_t a5 = vld1q_s16(src_diff + 5 * src_stride); int16x8_t a6 = vld1q_s16(src_diff + 6 * src_stride); int16x8_t a7 = vld1q_s16(src_diff + 7 * src_stride); hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); transpose8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); // Skip the second transpose because it is not required. vst1q_s16(coeff + 0, a0); vst1q_s16(coeff + 8, a1); vst1q_s16(coeff + 16, a2); vst1q_s16(coeff + 24, a3); vst1q_s16(coeff + 32, a4); vst1q_s16(coeff + 40, a5); vst1q_s16(coeff + 48, a6); vst1q_s16(coeff + 56, a7); }
static inline void DotProductWithScaleNeon(int32_t* cross_correlation, const int16_t* vector1, const int16_t* vector2, size_t length, int scaling) { size_t i = 0; size_t len1 = length >> 3; size_t len2 = length & 7; int64x2_t sum0 = vdupq_n_s64(0); int64x2_t sum1 = vdupq_n_s64(0); for (i = len1; i > 0; i -= 1) { int16x8_t seq1_16x8 = vld1q_s16(vector1); int16x8_t seq2_16x8 = vld1q_s16(vector2); #if defined(WEBRTC_ARCH_ARM64) int32x4_t tmp0 = vmull_s16(vget_low_s16(seq1_16x8), vget_low_s16(seq2_16x8)); int32x4_t tmp1 = vmull_high_s16(seq1_16x8, seq2_16x8); #else int32x4_t tmp0 = vmull_s16(vget_low_s16(seq1_16x8), vget_low_s16(seq2_16x8)); int32x4_t tmp1 = vmull_s16(vget_high_s16(seq1_16x8), vget_high_s16(seq2_16x8)); #endif sum0 = vpadalq_s32(sum0, tmp0); sum1 = vpadalq_s32(sum1, tmp1); vector1 += 8; vector2 += 8; } // Calculate the rest of the samples. int64_t sum_res = 0; for (i = len2; i > 0; i -= 1) { sum_res += WEBRTC_SPL_MUL_16_16(*vector1, *vector2); vector1++; vector2++; } sum0 = vaddq_s64(sum0, sum1); #if defined(WEBRTC_ARCH_ARM64) int64_t sum2 = vaddvq_s64(sum0); *cross_correlation = (int32_t)((sum2 + sum_res) >> scaling); #else int64x1_t shift = vdup_n_s64(-scaling); int64x1_t sum2 = vadd_s64(vget_low_s64(sum0), vget_high_s64(sum0)); sum2 = vadd_s64(sum2, vdup_n_s64(sum_res)); sum2 = vshl_s64(sum2, shift); vst1_lane_s32(cross_correlation, vreinterpret_s32_s64(sum2), 0); #endif }
static void inline ff_dct_unquantize_h263_neon(int qscale, int qadd, int nCoeffs, int16_t *block) { int16x8_t q0s16, q2s16, q3s16, q8s16, q10s16, q11s16, q13s16; int16x8_t q14s16, q15s16, qzs16; int16x4_t d0s16, d2s16, d3s16, dzs16; uint16x8_t q1u16, q9u16; uint16x4_t d1u16; dzs16 = vdup_n_s16(0); qzs16 = vdupq_n_s16(0); q15s16 = vdupq_n_s16(qscale << 1); q14s16 = vdupq_n_s16(qadd); q13s16 = vnegq_s16(q14s16); if (nCoeffs > 4) { for (; nCoeffs > 8; nCoeffs -= 16, block += 16) { q0s16 = vld1q_s16(block); q3s16 = vreinterpretq_s16_u16(vcltq_s16(q0s16, qzs16)); q8s16 = vld1q_s16(block + 8); q1u16 = vceqq_s16(q0s16, qzs16); q2s16 = vmulq_s16(q0s16, q15s16); q11s16 = vreinterpretq_s16_u16(vcltq_s16(q8s16, qzs16)); q10s16 = vmulq_s16(q8s16, q15s16); q3s16 = vbslq_s16(vreinterpretq_u16_s16(q3s16), q13s16, q14s16); q11s16 = vbslq_s16(vreinterpretq_u16_s16(q11s16), q13s16, q14s16); q2s16 = vaddq_s16(q2s16, q3s16); q9u16 = vceqq_s16(q8s16, qzs16); q10s16 = vaddq_s16(q10s16, q11s16); q0s16 = vbslq_s16(q1u16, q0s16, q2s16); q8s16 = vbslq_s16(q9u16, q8s16, q10s16); vst1q_s16(block, q0s16); vst1q_s16(block + 8, q8s16); } } if (nCoeffs <= 0) return; d0s16 = vld1_s16(block); d3s16 = vreinterpret_s16_u16(vclt_s16(d0s16, dzs16)); d1u16 = vceq_s16(d0s16, dzs16); d2s16 = vmul_s16(d0s16, vget_high_s16(q15s16)); d3s16 = vbsl_s16(vreinterpret_u16_s16(d3s16), vget_high_s16(q13s16), vget_high_s16(q14s16)); d2s16 = vadd_s16(d2s16, d3s16); d0s16 = vbsl_s16(d1u16, d0s16, d2s16); vst1_s16(block, d0s16); }
static INLINE void scaledconvolve_vert_w8( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const y_filters, const int y0_q4, const int y_step_q4, const int w, const int h) { int y; int y_q4 = y0_q4; src -= src_stride * (SUBPEL_TAPS / 2 - 1); y = h; do { const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; if (y_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); uint8x8_t s[8], d; load_u8_8x8(src_y, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); d = scale_filter_8(s, filters); vst1_u8(dst, d); } else { memcpy(dst, &src_y[3 * src_stride], w); } dst += dst_stride; y_q4 += y_step_q4; } while (--y); }
static void SharpYUVUpdateRGB_NEON(const int16_t* ref, const int16_t* src, int16_t* dst, int len) { int i; for (i = 0; i + 8 <= len; i += 8) { const int16x8_t A = vld1q_s16(ref + i); const int16x8_t B = vld1q_s16(src + i); const int16x8_t C = vld1q_s16(dst + i); const int16x8_t D = vsubq_s16(A, B); // diff_uv const int16x8_t E = vaddq_s16(C, D); // new_uv vst1q_s16(dst + i, E); } for (; i < len; ++i) { const int diff_uv = ref[i] - src[i]; dst[i] += diff_uv; } }
void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore* aecm, const uint16_t* far_spectrum, int32_t* echo_est) { assert((uintptr_t)echo_est % 32 == 0); assert((uintptr_t)(aecm->channelStored) % 16 == 0); assert((uintptr_t)(aecm->channelAdapt16) % 16 == 0); // This is C code of following optimized code. // During startup we store the channel every block. // memcpy(aecm->channelStored, // aecm->channelAdapt16, // sizeof(int16_t) * PART_LEN1); // Recalculate echo estimate // for (i = 0; i < PART_LEN; i += 4) { // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], // far_spectrum[i]); // echo_est[i + 1] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 1], // far_spectrum[i + 1]); // echo_est[i + 2] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 2], // far_spectrum[i + 2]); // echo_est[i + 3] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 3], // far_spectrum[i + 3]); // } // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], // far_spectrum[i]); const uint16_t* far_spectrum_p = far_spectrum; int16_t* start_adapt_p = aecm->channelAdapt16; int16_t* start_stored_p = aecm->channelStored; const int16_t* end_stored_p = aecm->channelStored + PART_LEN; int32_t* echo_est_p = echo_est; uint16x8_t far_spectrum_v; int16x8_t adapt_v; uint32x4_t echo_est_v_low, echo_est_v_high; while (start_stored_p < end_stored_p) { far_spectrum_v = vld1q_u16(far_spectrum_p); adapt_v = vld1q_s16(start_adapt_p); vst1q_s16(start_stored_p, adapt_v); echo_est_v_low = vmull_u16(vget_low_u16(far_spectrum_v), vget_low_u16(vreinterpretq_u16_s16(adapt_v))); echo_est_v_high = vmull_u16(vget_high_u16(far_spectrum_v), vget_high_u16(vreinterpretq_u16_s16(adapt_v))); vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low)); vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high)); far_spectrum_p += 8; start_adapt_p += 8; start_stored_p += 8; echo_est_p += 8; } aecm->channelStored[PART_LEN] = aecm->channelAdapt16[PART_LEN]; echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN], far_spectrum[PART_LEN]); }
static INLINE void scaledconvolve_horiz_w8( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const x_filters, const int x0_q4, const int x_step_q4, const int w, const int h) { DECLARE_ALIGNED(16, uint8_t, temp[8 * 8]); int x, y, z; src -= SUBPEL_TAPS / 2 - 1; // This function processes 8x8 areas. The intermediate height is not always // a multiple of 8, so force it to be a multiple of 8 here. y = (h + 7) & ~7; do { int x_q4 = x0_q4; x = 0; do { uint8x8_t d[8]; // process 8 src_x steps for (z = 0; z < 8; ++z) { const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; if (x_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(x_filters[x_q4 & SUBPEL_MASK]); uint8x8_t s[8]; load_u8_8x8(src_x, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); transpose_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); d[0] = scale_filter_8(s, filters); vst1_u8(&temp[8 * z], d[0]); } else { int i; for (i = 0; i < 8; ++i) { temp[z * 8 + i] = src_x[i * src_stride + 3]; } } x_q4 += x_step_q4; } // transpose the 8x8 filters values back to dst load_u8_8x8(temp, 8, &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]); transpose_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]); vst1_u8(&dst[x + 0 * dst_stride], d[0]); vst1_u8(&dst[x + 1 * dst_stride], d[1]); vst1_u8(&dst[x + 2 * dst_stride], d[2]); vst1_u8(&dst[x + 3 * dst_stride], d[3]); vst1_u8(&dst[x + 4 * dst_stride], d[4]); vst1_u8(&dst[x + 5 * dst_stride], d[5]); vst1_u8(&dst[x + 6 * dst_stride], d[6]); vst1_u8(&dst[x + 7 * dst_stride], d[7]); x += 8; } while (x < w); src += src_stride * 8; dst += dst_stride * 8; } while (y -= 8); }
static void SharpYUVFilterRow_NEON(const int16_t* A, const int16_t* B, int len, const uint16_t* best_y, uint16_t* out) { int i; const int16x8_t max = vdupq_n_s16(MAX_Y); const int16x8_t zero = vdupq_n_s16(0); for (i = 0; i + 8 <= len; i += 8) { const int16x8_t a0 = vld1q_s16(A + i + 0); const int16x8_t a1 = vld1q_s16(A + i + 1); const int16x8_t b0 = vld1q_s16(B + i + 0); const int16x8_t b1 = vld1q_s16(B + i + 1); const int16x8_t a0b1 = vaddq_s16(a0, b1); const int16x8_t a1b0 = vaddq_s16(a1, b0); const int16x8_t a0a1b0b1 = vaddq_s16(a0b1, a1b0); // A0+A1+B0+B1 const int16x8_t a0b1_2 = vaddq_s16(a0b1, a0b1); // 2*(A0+B1) const int16x8_t a1b0_2 = vaddq_s16(a1b0, a1b0); // 2*(A1+B0) const int16x8_t c0 = vshrq_n_s16(vaddq_s16(a0b1_2, a0a1b0b1), 3); const int16x8_t c1 = vshrq_n_s16(vaddq_s16(a1b0_2, a0a1b0b1), 3); const int16x8_t d0 = vaddq_s16(c1, a0); const int16x8_t d1 = vaddq_s16(c0, a1); const int16x8_t e0 = vrshrq_n_s16(d0, 1); const int16x8_t e1 = vrshrq_n_s16(d1, 1); const int16x8x2_t f = vzipq_s16(e0, e1); const int16x8_t g0 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 0)); const int16x8_t g1 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 8)); const int16x8_t h0 = vaddq_s16(g0, f.val[0]); const int16x8_t h1 = vaddq_s16(g1, f.val[1]); const int16x8_t i0 = vmaxq_s16(vminq_s16(h0, max), zero); const int16x8_t i1 = vmaxq_s16(vminq_s16(h1, max), zero); vst1q_u16(out + 2 * i + 0, vreinterpretq_u16_s16(i0)); vst1q_u16(out + 2 * i + 8, vreinterpretq_u16_s16(i1)); } for (; i < len; ++i) { const int a0b1 = A[i + 0] + B[i + 1]; const int a1b0 = A[i + 1] + B[i + 0]; const int a0a1b0b1 = a0b1 + a1b0 + 8; const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4; const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4; out[2 * i + 0] = clip_y(best_y[2 * i + 0] + v0); out[2 * i + 1] = clip_y(best_y[2 * i + 1] + v1); } }
// ref, src = [0, 510] - max diff = 16-bits // bwl = {2, 3, 4}, width = {16, 32, 64} int vp9_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) { int width = 4 << bwl; int32x4_t sse = vdupq_n_s32(0); int16x8_t total = vdupq_n_s16(0); assert(width >= 8); assert((width % 8) == 0); do { const int16x8_t r = vld1q_s16(ref); const int16x8_t s = vld1q_s16(src); const int16x8_t diff = vsubq_s16(r, s); // [-510, 510], 10 bits. const int16x4_t diff_lo = vget_low_s16(diff); const int16x4_t diff_hi = vget_high_s16(diff); sse = vmlal_s16(sse, diff_lo, diff_lo); // dynamic range 26 bits. sse = vmlal_s16(sse, diff_hi, diff_hi); total = vaddq_s16(total, diff); // dynamic range 16 bits. ref += 8; src += 8; width -= 8; } while (width != 0); { // Note: 'total''s pairwise addition could be implemented similarly to // horizontal_add_u16x8(), but one less vpaddl with 'total' when paired // with the summation of 'sse' performed better on a Cortex-A15. const int32x4_t t0 = vpaddlq_s16(total); // cascading summation of 'total' const int32x2_t t1 = vadd_s32(vget_low_s32(t0), vget_high_s32(t0)); const int32x2_t t2 = vpadd_s32(t1, t1); const int t = vget_lane_s32(t2, 0); const int64x2_t s0 = vpaddlq_s32(sse); // cascading summation of 'sse'. const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)), vreinterpret_s32_s64(vget_high_s64(s0))); const int s = vget_lane_s32(s1, 0); const int shift_factor = bwl + 2; return s - ((t * t) >> shift_factor); } }
/* s16x8 add */ void mw_neon_mm_add_s16x8(short * A, int Row, int Col, short * B, short * C) { int16x8_t neon_a, neon_b, neon_c; int size = Row * Col; int i = 0; int k = 0; for (i = 8; i <= size ; i+=8) { k = i - 8; neon_a = vld1q_s16(A + k); neon_b = vld1q_s16(B + k); neon_c = vaddq_s16(neon_a, neon_b); vst1q_s16(C + k, neon_c); } k = i - 8; for (i = 0; i < size % 8; i++) { C[k + i] = A[k + i] + B[k + i]; } }
void aom_hadamard_16x16_neon(const int16_t *src_diff, int src_stride, int16_t *coeff) { int i; /* Rearrange 16x16 to 8x32 and remove stride. * Top left first. */ aom_hadamard_8x8_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0); /* Top right. */ aom_hadamard_8x8_neon(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64); /* Bottom left. */ aom_hadamard_8x8_neon(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128); /* Bottom right. */ aom_hadamard_8x8_neon(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192); for (i = 0; i < 64; i += 8) { const int16x8_t a0 = vld1q_s16(coeff + 0); const int16x8_t a1 = vld1q_s16(coeff + 64); const int16x8_t a2 = vld1q_s16(coeff + 128); const int16x8_t a3 = vld1q_s16(coeff + 192); const int16x8_t b0 = vhaddq_s16(a0, a1); const int16x8_t b1 = vhsubq_s16(a0, a1); const int16x8_t b2 = vhaddq_s16(a2, a3); const int16x8_t b3 = vhsubq_s16(a2, a3); const int16x8_t c0 = vaddq_s16(b0, b2); const int16x8_t c1 = vaddq_s16(b1, b3); const int16x8_t c2 = vsubq_s16(b0, b2); const int16x8_t c3 = vsubq_s16(b1, b3); vst1q_s16(coeff + 0, c0); vst1q_s16(coeff + 64, c1); vst1q_s16(coeff + 128, c2); vst1q_s16(coeff + 192, c3); coeff += 8; } }
static INLINE void scaledconvolve_vert_w16( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const y_filters, const int y0_q4, const int y_step_q4, const int w, const int h) { int x, y; int y_q4 = y0_q4; src -= src_stride * (SUBPEL_TAPS / 2 - 1); y = h; do { const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; if (y_q4 & SUBPEL_MASK) { x = 0; do { const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); uint8x16_t ss[8]; uint8x8_t s[8], d[2]; load_u8_16x8(src_y, src_stride, &ss[0], &ss[1], &ss[2], &ss[3], &ss[4], &ss[5], &ss[6], &ss[7]); s[0] = vget_low_u8(ss[0]); s[1] = vget_low_u8(ss[1]); s[2] = vget_low_u8(ss[2]); s[3] = vget_low_u8(ss[3]); s[4] = vget_low_u8(ss[4]); s[5] = vget_low_u8(ss[5]); s[6] = vget_low_u8(ss[6]); s[7] = vget_low_u8(ss[7]); d[0] = scale_filter_8(s, filters); s[0] = vget_high_u8(ss[0]); s[1] = vget_high_u8(ss[1]); s[2] = vget_high_u8(ss[2]); s[3] = vget_high_u8(ss[3]); s[4] = vget_high_u8(ss[4]); s[5] = vget_high_u8(ss[5]); s[6] = vget_high_u8(ss[6]); s[7] = vget_high_u8(ss[7]); d[1] = scale_filter_8(s, filters); vst1q_u8(&dst[x], vcombine_u8(d[0], d[1])); src_y += 16; x += 16; } while (x < w); } else { memcpy(dst, &src_y[3 * src_stride], w); } dst += dst_stride; y_q4 += y_step_q4; } while (--y); }
// coeff: 16 bits, dynamic range [-32640, 32640]. // length: value range {16, 64, 256, 1024}. int aom_satd_neon(const int16_t *coeff, int length) { const int16x4_t zero = vdup_n_s16(0); int32x4_t accum = vdupq_n_s32(0); do { const int16x8_t src0 = vld1q_s16(coeff); const int16x8_t src8 = vld1q_s16(coeff + 8); accum = vabal_s16(accum, vget_low_s16(src0), zero); accum = vabal_s16(accum, vget_high_s16(src0), zero); accum = vabal_s16(accum, vget_low_s16(src8), zero); accum = vabal_s16(accum, vget_high_s16(src8), zero); length -= 16; coeff += 16; } while (length != 0); { // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024] const int64x2_t s0 = vpaddlq_s32(accum); // cascading summation of 'accum'. const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)), vreinterpret_s32_s64(vget_high_s64(s0))); const int satd = vget_lane_s32(s1, 0); return satd; } }
rfx_quantization_decode_block_NEON(INT16 * buffer, const int buffer_size, const UINT32 factor) { int16x8_t quantFactors = vdupq_n_s16(factor); int16x8_t* buf = (int16x8_t*)buffer; int16x8_t* buf_end = (int16x8_t*)(buffer + buffer_size); do { int16x8_t val = vld1q_s16((INT16*)buf); val = vshlq_s16(val, quantFactors); vst1q_s16((INT16*)buf, val); buf++; } while(buf < buf_end); }
test_vld1q_vst1q () { int16x8_t a; int16x8_t b; int i = 0; int16_t c[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; int16_t d[8]; a = vld1q_s16 (c); asm volatile ("":::"memory"); vst1q_s16 (d, a); asm volatile ("":::"memory"); for (; i < 8; i++) if (c[i] != d[i]) return 1; return 0; }
test_vreinterpretq_f64_s16 () { int16x8_t a; float64x2_t b; int16_t c[8] = { 0x2D18, 0x5444, 0x21FB, 0x4009, 0x5769, 0x8B14, 0xBF0A, 0x4005 }; float64_t d[2] = { PI_F64, E_F64 }; float64_t e[2]; int i; a = vld1q_s16 (c); b = wrap_vreinterpretq_f64_s16 (a); vst1q_f64 (e, b); for (i = 0; i < 2; i++) if (!DOUBLE_EQUALS (d[i], e[i], __DBL_EPSILON__)) return 1; return 0; };
static INLINE void scaledconvolve_vert_w4( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const y_filters, const int y0_q4, const int y_step_q4, const int w, const int h) { int y; int y_q4 = y0_q4; src -= src_stride * (SUBPEL_TAPS / 2 - 1); y = h; do { const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; if (y_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3); const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0); uint8x8_t s[8], d; int16x4_t t[8], tt; load_u8_8x8(src_y, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); t[0] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[0]))); t[1] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[1]))); t[2] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[2]))); t[3] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[3]))); t[4] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[4]))); t[5] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[5]))); t[6] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[6]))); t[7] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[7]))); tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], filters, filter3, filter4); d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d), 0); } else { memcpy(dst, &src_y[3 * src_stride], w); } dst += dst_stride; y_q4 += y_step_q4; } while (--y); }
void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore* aecm) { assert((uintptr_t)(aecm->channelStored) % 16 == 0); assert((uintptr_t)(aecm->channelAdapt16) % 16 == 0); assert((uintptr_t)(aecm->channelAdapt32) % 32 == 0); // The C code of following optimized code. // for (i = 0; i < PART_LEN1; i++) { // aecm->channelAdapt16[i] = aecm->channelStored[i]; // aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32( // (int32_t)aecm->channelStored[i], 16); // } int16_t* start_stored_p = aecm->channelStored; int16_t* start_adapt16_p = aecm->channelAdapt16; int32_t* start_adapt32_p = aecm->channelAdapt32; const int16_t* end_stored_p = start_stored_p + PART_LEN; int16x8_t stored_v; int32x4_t adapt32_v_low, adapt32_v_high; while (start_stored_p < end_stored_p) { stored_v = vld1q_s16(start_stored_p); vst1q_s16(start_adapt16_p, stored_v); adapt32_v_low = vshll_n_s16(vget_low_s16(stored_v), 16); adapt32_v_high = vshll_n_s16(vget_high_s16(stored_v), 16); vst1q_s32(start_adapt32_p, adapt32_v_low); vst1q_s32(start_adapt32_p + 4, adapt32_v_high); start_stored_p += 8; start_adapt16_p += 8; start_adapt32_p += 8; } aecm->channelAdapt16[PART_LEN] = aecm->channelStored[PART_LEN]; aecm->channelAdapt32[PART_LEN] = (int32_t)aecm->channelStored[PART_LEN] << 16; }
void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore* aecm, const uint16_t* far_spectrum, int32_t* echo_est, uint32_t* far_energy, uint32_t* echo_energy_adapt, uint32_t* echo_energy_stored) { int16_t* start_stored_p = aecm->channelStored; int16_t* start_adapt_p = aecm->channelAdapt16; int32_t* echo_est_p = echo_est; const int16_t* end_stored_p = aecm->channelStored + PART_LEN; const uint16_t* far_spectrum_p = far_spectrum; int16x8_t store_v, adapt_v; uint16x8_t spectrum_v; uint32x4_t echo_est_v_low, echo_est_v_high; uint32x4_t far_energy_v, echo_stored_v, echo_adapt_v; far_energy_v = vdupq_n_u32(0); echo_adapt_v = vdupq_n_u32(0); echo_stored_v = vdupq_n_u32(0); // Get energy for the delayed far end signal and estimated // echo using both stored and adapted channels. // The C code: // for (i = 0; i < PART_LEN1; i++) { // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], // far_spectrum[i]); // (*far_energy) += (uint32_t)(far_spectrum[i]); // *echo_energy_adapt += aecm->channelAdapt16[i] * far_spectrum[i]; // (*echo_energy_stored) += (uint32_t)echo_est[i]; // } while (start_stored_p < end_stored_p) { spectrum_v = vld1q_u16(far_spectrum_p); adapt_v = vld1q_s16(start_adapt_p); store_v = vld1q_s16(start_stored_p); far_energy_v = vaddw_u16(far_energy_v, vget_low_u16(spectrum_v)); far_energy_v = vaddw_u16(far_energy_v, vget_high_u16(spectrum_v)); echo_est_v_low = vmull_u16(vreinterpret_u16_s16(vget_low_s16(store_v)), vget_low_u16(spectrum_v)); echo_est_v_high = vmull_u16(vreinterpret_u16_s16(vget_high_s16(store_v)), vget_high_u16(spectrum_v)); vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low)); vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high)); echo_stored_v = vaddq_u32(echo_est_v_low, echo_stored_v); echo_stored_v = vaddq_u32(echo_est_v_high, echo_stored_v); echo_adapt_v = vmlal_u16(echo_adapt_v, vreinterpret_u16_s16(vget_low_s16(adapt_v)), vget_low_u16(spectrum_v)); echo_adapt_v = vmlal_u16(echo_adapt_v, vreinterpret_u16_s16(vget_high_s16(adapt_v)), vget_high_u16(spectrum_v)); start_stored_p += 8; start_adapt_p += 8; far_spectrum_p += 8; echo_est_p += 8; } AddLanes(far_energy, far_energy_v); AddLanes(echo_energy_stored, echo_stored_v); AddLanes(echo_energy_adapt, echo_adapt_v); echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN], far_spectrum[PART_LEN]); *echo_energy_stored += (uint32_t)echo_est[PART_LEN]; *far_energy += (uint32_t)far_spectrum[PART_LEN]; *echo_energy_adapt += aecm->channelAdapt16[PART_LEN] * far_spectrum[PART_LEN]; }
void vpx_idct8x8_12_add_neon( int16_t *input, uint8_t *dest, int dest_stride) { uint8_t *d1, *d2; uint8x8_t d0u8, d1u8, d2u8, d3u8; int16x4_t d10s16, d11s16, d12s16, d13s16, d16s16; int16x4_t d26s16, d27s16, d28s16, d29s16; uint64x1_t d0u64, d1u64, d2u64, d3u64; int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; uint16x8_t q8u16, q9u16, q10u16, q11u16; int32x4_t q9s32, q10s32, q11s32, q12s32; q8s16 = vld1q_s16(input); q9s16 = vld1q_s16(input + 8); q10s16 = vld1q_s16(input + 16); q11s16 = vld1q_s16(input + 24); q12s16 = vld1q_s16(input + 32); q13s16 = vld1q_s16(input + 40); q14s16 = vld1q_s16(input + 48); q15s16 = vld1q_s16(input + 56); TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, &q15s16); // First transform rows // stage 1 q0s16 = vdupq_n_s16(cospi_28_64 * 2); q1s16 = vdupq_n_s16(cospi_4_64 * 2); q4s16 = vqrdmulhq_s16(q9s16, q0s16); q0s16 = vdupq_n_s16(-cospi_20_64 * 2); q7s16 = vqrdmulhq_s16(q9s16, q1s16); q1s16 = vdupq_n_s16(cospi_12_64 * 2); q5s16 = vqrdmulhq_s16(q11s16, q0s16); q0s16 = vdupq_n_s16(cospi_16_64 * 2); q6s16 = vqrdmulhq_s16(q11s16, q1s16); // stage 2 & stage 3 - even half q1s16 = vdupq_n_s16(cospi_24_64 * 2); q9s16 = vqrdmulhq_s16(q8s16, q0s16); q0s16 = vdupq_n_s16(cospi_8_64 * 2); q13s16 = vqrdmulhq_s16(q10s16, q1s16); q15s16 = vqrdmulhq_s16(q10s16, q0s16); // stage 3 -odd half q0s16 = vaddq_s16(q9s16, q15s16); q1s16 = vaddq_s16(q9s16, q13s16); q2s16 = vsubq_s16(q9s16, q13s16); q3s16 = vsubq_s16(q9s16, q15s16); // stage 2 - odd half q13s16 = vsubq_s16(q4s16, q5s16); q4s16 = vaddq_s16(q4s16, q5s16); q14s16 = vsubq_s16(q7s16, q6s16); q7s16 = vaddq_s16(q7s16, q6s16); d26s16 = vget_low_s16(q13s16); d27s16 = vget_high_s16(q13s16); d28s16 = vget_low_s16(q14s16); d29s16 = vget_high_s16(q14s16); d16s16 = vdup_n_s16(cospi_16_64); q9s32 = vmull_s16(d28s16, d16s16); q10s32 = vmull_s16(d29s16, d16s16); q11s32 = vmull_s16(d28s16, d16s16); q12s32 = vmull_s16(d29s16, d16s16); q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); q10s32 = vmlsl_s16(q10s32, d27s16, d16s16); q11s32 = vmlal_s16(q11s32, d26s16, d16s16); q12s32 = vmlal_s16(q12s32, d27s16, d16s16); d10s16 = vqrshrn_n_s32(q9s32, 14); d11s16 = vqrshrn_n_s32(q10s32, 14); d12s16 = vqrshrn_n_s32(q11s32, 14); d13s16 = vqrshrn_n_s32(q12s32, 14); q5s16 = vcombine_s16(d10s16, d11s16); q6s16 = vcombine_s16(d12s16, d13s16); // stage 4 q8s16 = vaddq_s16(q0s16, q7s16); q9s16 = vaddq_s16(q1s16, q6s16); q10s16 = vaddq_s16(q2s16, q5s16); q11s16 = vaddq_s16(q3s16, q4s16); q12s16 = vsubq_s16(q3s16, q4s16); q13s16 = vsubq_s16(q2s16, q5s16); q14s16 = vsubq_s16(q1s16, q6s16); q15s16 = vsubq_s16(q0s16, q7s16); TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, &q15s16); IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, &q15s16); q8s16 = vrshrq_n_s16(q8s16, 5); q9s16 = vrshrq_n_s16(q9s16, 5); q10s16 = vrshrq_n_s16(q10s16, 5); q11s16 = vrshrq_n_s16(q11s16, 5); q12s16 = vrshrq_n_s16(q12s16, 5); q13s16 = vrshrq_n_s16(q13s16, 5); q14s16 = vrshrq_n_s16(q14s16, 5); q15s16 = vrshrq_n_s16(q15s16, 5); d1 = d2 = dest; d0u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d1u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d2u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d3u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); d2 += dest_stride; q8s16 = q12s16; q9s16 = q13s16; q10s16 = q14s16; q11s16 = q15s16; d0u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d1u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d2u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d3u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); d2 += dest_stride; return; }
void vpx_idct8x8_64_add_neon( int16_t *input, uint8_t *dest, int dest_stride) { uint8_t *d1, *d2; uint8x8_t d0u8, d1u8, d2u8, d3u8; uint64x1_t d0u64, d1u64, d2u64, d3u64; int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; uint16x8_t q8u16, q9u16, q10u16, q11u16; q8s16 = vld1q_s16(input); q9s16 = vld1q_s16(input + 8); q10s16 = vld1q_s16(input + 16); q11s16 = vld1q_s16(input + 24); q12s16 = vld1q_s16(input + 32); q13s16 = vld1q_s16(input + 40); q14s16 = vld1q_s16(input + 48); q15s16 = vld1q_s16(input + 56); TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, &q15s16); IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, &q15s16); TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, &q15s16); IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, &q15s16); q8s16 = vrshrq_n_s16(q8s16, 5); q9s16 = vrshrq_n_s16(q9s16, 5); q10s16 = vrshrq_n_s16(q10s16, 5); q11s16 = vrshrq_n_s16(q11s16, 5); q12s16 = vrshrq_n_s16(q12s16, 5); q13s16 = vrshrq_n_s16(q13s16, 5); q14s16 = vrshrq_n_s16(q14s16, 5); q15s16 = vrshrq_n_s16(q15s16, 5); d1 = d2 = dest; d0u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d1u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d2u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d3u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); d2 += dest_stride; q8s16 = q12s16; q9s16 = q13s16; q10s16 = q14s16; q11s16 = q15s16; d0u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d1u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d2u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; d3u64 = vld1_u64((uint64_t *)d1); d1 += dest_stride; q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); d2 += dest_stride; vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); d2 += dest_stride; return; }
// Warn for incompatible pointer types used with vld/vst intrinsics. int16x8_t test5(int *p) { return vld1q_s16(p); // expected-warning {{incompatible pointer types}} }
void vp9_highbd_iht8x8_64_add_neon(const tran_low_t *input, uint16_t *dest, int stride, int tx_type, int bd) { int32x4_t a[16]; int16x8_t c[8]; a[0] = vld1q_s32(input); a[1] = vld1q_s32(input + 4); a[2] = vld1q_s32(input + 8); a[3] = vld1q_s32(input + 12); a[4] = vld1q_s32(input + 16); a[5] = vld1q_s32(input + 20); a[6] = vld1q_s32(input + 24); a[7] = vld1q_s32(input + 28); a[8] = vld1q_s32(input + 32); a[9] = vld1q_s32(input + 36); a[10] = vld1q_s32(input + 40); a[11] = vld1q_s32(input + 44); a[12] = vld1q_s32(input + 48); a[13] = vld1q_s32(input + 52); a[14] = vld1q_s32(input + 56); a[15] = vld1q_s32(input + 60); if (bd == 8) { c[0] = vcombine_s16(vmovn_s32(a[0]), vmovn_s32(a[1])); c[1] = vcombine_s16(vmovn_s32(a[2]), vmovn_s32(a[3])); c[2] = vcombine_s16(vmovn_s32(a[4]), vmovn_s32(a[5])); c[3] = vcombine_s16(vmovn_s32(a[6]), vmovn_s32(a[7])); c[4] = vcombine_s16(vmovn_s32(a[8]), vmovn_s32(a[9])); c[5] = vcombine_s16(vmovn_s32(a[10]), vmovn_s32(a[11])); c[6] = vcombine_s16(vmovn_s32(a[12]), vmovn_s32(a[13])); c[7] = vcombine_s16(vmovn_s32(a[14]), vmovn_s32(a[15])); switch (tx_type) { case DCT_DCT: { const int16x8_t cospis = vld1q_s16(kCospi); const int16x4_t cospis0 = vget_low_s16(cospis); // cospi 0, 8, 16, 24 const int16x4_t cospis1 = vget_high_s16(cospis); // cospi 4, 12, 20, 28 idct8x8_64_1d_bd8(cospis0, cospis1, c); idct8x8_64_1d_bd8(cospis0, cospis1, c); break; } case ADST_DCT: { const int16x8_t cospis = vld1q_s16(kCospi); const int16x4_t cospis0 = vget_low_s16(cospis); // cospi 0, 8, 16, 24 const int16x4_t cospis1 = vget_high_s16(cospis); // cospi 4, 12, 20, 28 idct8x8_64_1d_bd8(cospis0, cospis1, c); transpose_s16_8x8(&c[0], &c[1], &c[2], &c[3], &c[4], &c[5], &c[6], &c[7]); iadst8(c); break; } case DCT_ADST: { const int16x8_t cospis = vld1q_s16(kCospi); const int16x4_t cospis0 = vget_low_s16(cospis); // cospi 0, 8, 16, 24 const int16x4_t cospis1 = vget_high_s16(cospis); // cospi 4, 12, 20, 28 transpose_s16_8x8(&c[0], &c[1], &c[2], &c[3], &c[4], &c[5], &c[6], &c[7]); iadst8(c); idct8x8_64_1d_bd8(cospis0, cospis1, c); break; } default: { transpose_s16_8x8(&c[0], &c[1], &c[2], &c[3], &c[4], &c[5], &c[6], &c[7]); iadst8(c); transpose_s16_8x8(&c[0], &c[1], &c[2], &c[3], &c[4], &c[5], &c[6], &c[7]); iadst8(c); break; } } c[0] = vrshrq_n_s16(c[0], 5); c[1] = vrshrq_n_s16(c[1], 5); c[2] = vrshrq_n_s16(c[2], 5); c[3] = vrshrq_n_s16(c[3], 5); c[4] = vrshrq_n_s16(c[4], 5); c[5] = vrshrq_n_s16(c[5], 5); c[6] = vrshrq_n_s16(c[6], 5); c[7] = vrshrq_n_s16(c[7], 5); } else { switch (tx_type) { case DCT_DCT: { const int32x4_t cospis0 = vld1q_s32(kCospi32); // cospi 0, 8, 16, 24 const int32x4_t cospis1 = vld1q_s32(kCospi32 + 4); // cospi 4, 12, 20, 28 if (bd == 10) { idct8x8_64_half1d_bd10(cospis0, cospis1, &a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); idct8x8_64_half1d_bd10(cospis0, cospis1, &a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); idct8x8_64_half1d_bd10(cospis0, cospis1, &a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); idct8x8_64_half1d_bd10(cospis0, cospis1, &a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); } else { idct8x8_64_half1d_bd12(cospis0, cospis1, &a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); idct8x8_64_half1d_bd12(cospis0, cospis1, &a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); idct8x8_64_half1d_bd12(cospis0, cospis1, &a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); idct8x8_64_half1d_bd12(cospis0, cospis1, &a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); } break; } case ADST_DCT: { const int32x4_t cospis0 = vld1q_s32(kCospi32); // cospi 0, 8, 16, 24 const int32x4_t cospis1 = vld1q_s32(kCospi32 + 4); // cospi 4, 12, 20, 28 if (bd == 10) { idct8x8_64_half1d_bd10(cospis0, cospis1, &a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); idct8x8_64_half1d_bd10(cospis0, cospis1, &a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); transpose_s32_8x4(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); iadst8_bd10(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); transpose_s32_8x4(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); iadst8_bd10(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); } else { idct8x8_64_half1d_bd12(cospis0, cospis1, &a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); idct8x8_64_half1d_bd12(cospis0, cospis1, &a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); transpose_s32_8x4(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); iadst8_bd12(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); transpose_s32_8x4(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); iadst8_bd12(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); } break; } case DCT_ADST: { const int32x4_t cospis0 = vld1q_s32(kCospi32); // cospi 0, 8, 16, 24 const int32x4_t cospis1 = vld1q_s32(kCospi32 + 4); // cospi 4, 12, 20, 28 if (bd == 10) { transpose_s32_8x4(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); iadst8_bd10(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); transpose_s32_8x4(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); iadst8_bd10(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); idct8x8_64_half1d_bd10(cospis0, cospis1, &a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); idct8x8_64_half1d_bd10(cospis0, cospis1, &a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); } else { transpose_s32_8x4(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); iadst8_bd12(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); transpose_s32_8x4(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); iadst8_bd12(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); idct8x8_64_half1d_bd12(cospis0, cospis1, &a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); idct8x8_64_half1d_bd12(cospis0, cospis1, &a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); } break; } default: { assert(tx_type == ADST_ADST); if (bd == 10) { transpose_s32_8x4(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); iadst8_bd10(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); transpose_s32_8x4(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); iadst8_bd10(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); transpose_s32_8x4(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); iadst8_bd10(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); transpose_s32_8x4(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); iadst8_bd10(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); } else { transpose_s32_8x4(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); iadst8_bd12(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]); transpose_s32_8x4(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); iadst8_bd12(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14], &a[15]); transpose_s32_8x4(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); iadst8_bd12(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]); transpose_s32_8x4(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); iadst8_bd12(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7], &a[15]); } break; } } c[0] = vcombine_s16(vrshrn_n_s32(a[0], 5), vrshrn_n_s32(a[4], 5)); c[1] = vcombine_s16(vrshrn_n_s32(a[8], 5), vrshrn_n_s32(a[12], 5)); c[2] = vcombine_s16(vrshrn_n_s32(a[1], 5), vrshrn_n_s32(a[5], 5)); c[3] = vcombine_s16(vrshrn_n_s32(a[9], 5), vrshrn_n_s32(a[13], 5)); c[4] = vcombine_s16(vrshrn_n_s32(a[2], 5), vrshrn_n_s32(a[6], 5)); c[5] = vcombine_s16(vrshrn_n_s32(a[10], 5), vrshrn_n_s32(a[14], 5)); c[6] = vcombine_s16(vrshrn_n_s32(a[3], 5), vrshrn_n_s32(a[7], 5)); c[7] = vcombine_s16(vrshrn_n_s32(a[11], 5), vrshrn_n_s32(a[15], 5)); } highbd_add8x8(c, dest, stride, bd); }
rfx_dwt_2d_decode_block_horiz_NEON(INT16 * l, INT16 * h, INT16 * dst, int subband_width) { int y, n; INT16 * l_ptr = l; INT16 * h_ptr = h; INT16 * dst_ptr = dst; for (y = 0; y < subband_width; y++) { /* Even coefficients */ for (n = 0; n < subband_width; n+=8) { // dst[2n] = l[n] - ((h[n-1] + h[n] + 1) >> 1); int16x8_t l_n = vld1q_s16(l_ptr); int16x8_t h_n = vld1q_s16(h_ptr); int16x8_t h_n_m = vld1q_s16(h_ptr - 1); if (n == 0) { int16_t first = vgetq_lane_s16(h_n_m, 1); h_n_m = vsetq_lane_s16(first, h_n_m, 0); } int16x8_t tmp_n = vaddq_s16(h_n, h_n_m); tmp_n = vaddq_s16(tmp_n, vdupq_n_s16(1)); tmp_n = vshrq_n_s16(tmp_n, 1); int16x8_t dst_n = vsubq_s16(l_n, tmp_n); vst1q_s16(l_ptr, dst_n); l_ptr+=8; h_ptr+=8; } l_ptr -= subband_width; h_ptr -= subband_width; /* Odd coefficients */ for (n = 0; n < subband_width; n+=8) { // dst[2n + 1] = (h[n] << 1) + ((dst[2n] + dst[2n + 2]) >> 1); int16x8_t h_n = vld1q_s16(h_ptr); h_n = vshlq_n_s16(h_n, 1); int16x8x2_t dst_n; dst_n.val[0] = vld1q_s16(l_ptr); int16x8_t dst_n_p = vld1q_s16(l_ptr + 1); if (n == subband_width - 8) { int16_t last = vgetq_lane_s16(dst_n_p, 6); dst_n_p = vsetq_lane_s16(last, dst_n_p, 7); } dst_n.val[1] = vaddq_s16(dst_n_p, dst_n.val[0]); dst_n.val[1] = vshrq_n_s16(dst_n.val[1], 1); dst_n.val[1] = vaddq_s16(dst_n.val[1], h_n); vst2q_s16(dst_ptr, dst_n); l_ptr+=8; h_ptr+=8; dst_ptr+=16; } } }
rfx_dwt_2d_decode_block_vert_NEON(INT16 * l, INT16 * h, INT16 * dst, int subband_width) { int x, n; INT16 * l_ptr = l; INT16 * h_ptr = h; INT16 * dst_ptr = dst; int total_width = subband_width + subband_width; /* Even coefficients */ for (n = 0; n < subband_width; n++) { for (x = 0; x < total_width; x+=8) { // dst[2n] = l[n] - ((h[n-1] + h[n] + 1) >> 1); int16x8_t l_n = vld1q_s16(l_ptr); int16x8_t h_n = vld1q_s16(h_ptr); int16x8_t tmp_n = vaddq_s16(h_n, vdupq_n_s16(1));; if (n == 0) tmp_n = vaddq_s16(tmp_n, h_n); else { int16x8_t h_n_m = vld1q_s16((h_ptr - total_width)); tmp_n = vaddq_s16(tmp_n, h_n_m); } tmp_n = vshrq_n_s16(tmp_n, 1); int16x8_t dst_n = vsubq_s16(l_n, tmp_n); vst1q_s16(dst_ptr, dst_n); l_ptr+=8; h_ptr+=8; dst_ptr+=8; } dst_ptr+=total_width; } h_ptr = h; dst_ptr = dst + total_width; /* Odd coefficients */ for (n = 0; n < subband_width; n++) { for (x = 0; x < total_width; x+=8) { // dst[2n + 1] = (h[n] << 1) + ((dst[2n] + dst[2n + 2]) >> 1); int16x8_t h_n = vld1q_s16(h_ptr); int16x8_t dst_n_m = vld1q_s16(dst_ptr - total_width); h_n = vshlq_n_s16(h_n, 1); int16x8_t tmp_n = dst_n_m; if (n == subband_width - 1) tmp_n = vaddq_s16(tmp_n, dst_n_m); else { int16x8_t dst_n_p = vld1q_s16((dst_ptr + total_width)); tmp_n = vaddq_s16(tmp_n, dst_n_p); } tmp_n = vshrq_n_s16(tmp_n, 1); int16x8_t dst_n = vaddq_s16(tmp_n, h_n); vst1q_s16(dst_ptr, dst_n); h_ptr+=8; dst_ptr+=8; } dst_ptr+=total_width; } }
static INLINE void scaledconvolve_horiz_w4( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const x_filters, const int x0_q4, const int x_step_q4, const int w, const int h) { DECLARE_ALIGNED(16, uint8_t, temp[4 * 4]); int x, y, z; src -= SUBPEL_TAPS / 2 - 1; y = h; do { int x_q4 = x0_q4; x = 0; do { // process 4 src_x steps for (z = 0; z < 4; ++z) { const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; if (x_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(x_filters[x_q4 & SUBPEL_MASK]); const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3); const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0); uint8x8_t s[8], d; int16x8_t ss[4]; int16x4_t t[8], tt; load_u8_8x4(src_x, src_stride, &s[0], &s[1], &s[2], &s[3]); transpose_u8_8x4(&s[0], &s[1], &s[2], &s[3]); ss[0] = vreinterpretq_s16_u16(vmovl_u8(s[0])); ss[1] = vreinterpretq_s16_u16(vmovl_u8(s[1])); ss[2] = vreinterpretq_s16_u16(vmovl_u8(s[2])); ss[3] = vreinterpretq_s16_u16(vmovl_u8(s[3])); t[0] = vget_low_s16(ss[0]); t[1] = vget_low_s16(ss[1]); t[2] = vget_low_s16(ss[2]); t[3] = vget_low_s16(ss[3]); t[4] = vget_high_s16(ss[0]); t[5] = vget_high_s16(ss[1]); t[6] = vget_high_s16(ss[2]); t[7] = vget_high_s16(ss[3]); tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], filters, filter3, filter4); d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7); vst1_lane_u32((uint32_t *)&temp[4 * z], vreinterpret_u32_u8(d), 0); } else { int i; for (i = 0; i < 4; ++i) { temp[z * 4 + i] = src_x[i * src_stride + 3]; } } x_q4 += x_step_q4; } // transpose the 4x4 filters values back to dst { const uint8x8x4_t d4 = vld4_u8(temp); vst1_lane_u32((uint32_t *)&dst[x + 0 * dst_stride], vreinterpret_u32_u8(d4.val[0]), 0); vst1_lane_u32((uint32_t *)&dst[x + 1 * dst_stride], vreinterpret_u32_u8(d4.val[1]), 0); vst1_lane_u32((uint32_t *)&dst[x + 2 * dst_stride], vreinterpret_u32_u8(d4.val[2]), 0); vst1_lane_u32((uint32_t *)&dst[x + 3 * dst_stride], vreinterpret_u32_u8(d4.val[3]), 0); } x += 4; } while (x < w); src += src_stride * 4; dst += dst_stride * 4; y -= 4; } while (y > 0); }
void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8x8_t d26u8, d27u8; uint32x2_t d26u32, d27u32; uint16x8_t q8u16, q9u16; int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16; int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16; int16x8_t q8s16, q9s16, q13s16, q14s16; int32x4_t q1s32, q13s32, q14s32, q15s32; int16x4x2_t d0x2s16, d1x2s16; int32x4x2_t q0x2s32; uint8_t *d; d26u32 = d27u32 = vdup_n_u32(0); q8s16 = vld1q_s16(input); q9s16 = vld1q_s16(input + 8); d16s16 = vget_low_s16(q8s16); d17s16 = vget_high_s16(q8s16); d18s16 = vget_low_s16(q9s16); d19s16 = vget_high_s16(q9s16); d0x2s16 = vtrn_s16(d16s16, d17s16); d1x2s16 = vtrn_s16(d18s16, d19s16); q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); d20s16 = vdup_n_s16((int16_t)cospi_8_64); d21s16 = vdup_n_s16((int16_t)cospi_16_64); q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d22s16 = vdup_n_s16((int16_t)cospi_24_64); // stage 1 d23s16 = vadd_s16(d16s16, d18s16); d24s16 = vsub_s16(d16s16, d18s16); q15s32 = vmull_s16(d17s16, d22s16); q1s32 = vmull_s16(d17s16, d20s16); q13s32 = vmull_s16(d23s16, d21s16); q14s32 = vmull_s16(d24s16, d21s16); q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); q1s32 = vmlal_s16(q1s32, d19s16, d22s16); d26s16 = vqrshrn_n_s32(q13s32, 14); d27s16 = vqrshrn_n_s32(q14s32, 14); d29s16 = vqrshrn_n_s32(q15s32, 14); d28s16 = vqrshrn_n_s32(q1s32, 14); q13s16 = vcombine_s16(d26s16, d27s16); q14s16 = vcombine_s16(d28s16, d29s16); // stage 2 q8s16 = vaddq_s16(q13s16, q14s16); q9s16 = vsubq_s16(q13s16, q14s16); d16s16 = vget_low_s16(q8s16); d17s16 = vget_high_s16(q8s16); d18s16 = vget_high_s16(q9s16); // vswp d18 d19 d19s16 = vget_low_s16(q9s16); d0x2s16 = vtrn_s16(d16s16, d17s16); d1x2s16 = vtrn_s16(d18s16, d19s16); q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); // do the transform on columns // stage 1 d23s16 = vadd_s16(d16s16, d18s16); d24s16 = vsub_s16(d16s16, d18s16); q15s32 = vmull_s16(d17s16, d22s16); q1s32 = vmull_s16(d17s16, d20s16); q13s32 = vmull_s16(d23s16, d21s16); q14s32 = vmull_s16(d24s16, d21s16); q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); q1s32 = vmlal_s16(q1s32, d19s16, d22s16); d26s16 = vqrshrn_n_s32(q13s32, 14); d27s16 = vqrshrn_n_s32(q14s32, 14); d29s16 = vqrshrn_n_s32(q15s32, 14); d28s16 = vqrshrn_n_s32(q1s32, 14); q13s16 = vcombine_s16(d26s16, d27s16); q14s16 = vcombine_s16(d28s16, d29s16); // stage 2 q8s16 = vaddq_s16(q13s16, q14s16); q9s16 = vsubq_s16(q13s16, q14s16); q8s16 = vrshrq_n_s16(q8s16, 4); q9s16 = vrshrq_n_s16(q9s16, 4); d = dest; d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0); d += dest_stride; d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1); d += dest_stride; d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1); d += dest_stride; d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0); q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32)); q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32)); d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); d = dest; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0); return; }