void test_vreinterpretQu16_u32 (void) { uint16x8_t out_uint16x8_t; uint32x4_t arg0_uint32x4_t; out_uint16x8_t = vreinterpretq_u16_u32 (arg0_uint32x4_t); }
void computeNetwork0new_neon(const float *dataf, const float *weightsf, uint8_t *d) { const int16_t *data = (const int16_t *)dataf; const int16_t *weights = (const int16_t *)weightsf; int32x4_t accum0 = { 0, 0, 0, 0 }; int32x4_t accum1 = accum0; int32x4_t accum2 = accum0; int32x4_t accum3 = accum0; for (int i = 0; i < 128/2; i += 8) { int16x4x2_t d0 = vld2_s16(data + i); int16x4x2_t w0 = vld2_s16(weights + i * 4); int16x4x2_t w1 = vld2_s16(weights + i * 4 + 8); int16x4x2_t w2 = vld2_s16(weights + i * 4 + 16); int16x4x2_t w3 = vld2_s16(weights + i * 4 + 24); accum0 = vmlal_s16(accum0, d0.val[0], w0.val[0]); accum0 = vmlal_s16(accum0, d0.val[1], w0.val[1]); accum1 = vmlal_s16(accum1, d0.val[0], w1.val[0]); accum1 = vmlal_s16(accum1, d0.val[1], w1.val[1]); accum2 = vmlal_s16(accum2, d0.val[0], w2.val[0]); accum2 = vmlal_s16(accum2, d0.val[1], w2.val[1]); accum3 = vmlal_s16(accum3, d0.val[0], w3.val[0]); accum3 = vmlal_s16(accum3, d0.val[1], w3.val[1]); } int32x2_t sum0 = vpadd_s32(vget_low_s32(accum0), vget_high_s32(accum0)); int32x2_t sum1 = vpadd_s32(vget_low_s32(accum1), vget_high_s32(accum1)); int32x2_t sum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2)); int32x2_t sum3 = vpadd_s32(vget_low_s32(accum3), vget_high_s32(accum3)); sum0 = vpadd_s32(sum0, sum1); sum1 = vpadd_s32(sum2, sum3); int32x4_t sum = vcombine_s32(sum0, sum1); float32x4_t m0 = vcvtq_f32_s32(sum); m0 = vmulq_f32(m0, vld1q_f32(weightsf + 512/4)); m0 = vaddq_f32(m0, vld1q_f32(weightsf + 528/4)); float32x4_t m1, m2, m3, m4; m1 = m0; m0 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(m0), sign_bits_f)); m0 = vaddq_f32(m0, ones_f); m0 = vmulq_f32(reciprocal(m0), m1); m1 = vdupq_lane_f32(vget_low_f32(m0), 0); m2 = vdupq_lane_f32(vget_low_f32(m0), 1); m3 = vdupq_lane_f32(vget_high_f32(m0), 0); m4 = vdupq_lane_f32(vget_high_f32(m0), 1); m1 = vmulq_f32(m1, vld1q_f32(weightsf + 544/4)); m2 = vmulq_f32(m2, vld1q_f32(weightsf + 560/4)); m3 = vmulq_f32(m3, vld1q_f32(weightsf + 576/4)); m4 = vmulq_f32(m4, vld1q_f32(weightsf + 592/4)); m1 = vaddq_f32(m1, m2); m3 = vaddq_f32(m3, m4); m1 = vaddq_f32(m1, m3); m1 = vaddq_f32(m1, vld1q_f32(weightsf + 608/4)); uint32x4_t gte = vcgeq_f32(m1, zeroes_f); uint16x4_t gte_u16 = vmovn_u32(gte); uint8x8_t gte_u8 = vmovn_u16(vcombine_u16(gte_u16, vget_low_u16(vreinterpretq_u16_u32(sign_bits_f)))); gte_u8 = vshr_n_u8(gte_u8, 7); vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(gte_u8), 0); }
static void neon_w32_split_4_32_multiply_region(gf_t *gf, uint32_t *src, uint32_t *dst, uint32_t *d_end, uint8_t btable[8][4][16], uint32_t val, int xor, int altmap) { int i, j; #ifdef ARCH_AARCH64 uint8x16_t tables[8][4]; #else uint8x8x2_t tables[8][4]; #endif uint32x4_t v0, v1, v2, v3, s0, s1, s2, s3; uint8x16_t p0, p1, p2, p3, si, mask1; uint16x8x2_t r0, r1; uint8x16x2_t q0, q1; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) { #ifdef ARCH_AARCH64 tables[i][j] = vld1q_u8(btable[i][j]); #else tables[i][j].val[0] = vld1_u8(btable[i][j]); tables[i][j].val[1] = vld1_u8(btable[i][j] + 8); #endif } } mask1 = vdupq_n_u8(0xf); while (dst < d_end) { v0 = vld1q_u32(src); src += 4; v1 = vld1q_u32(src); src += 4; v2 = vld1q_u32(src); src += 4; v3 = vld1q_u32(src); src += 4; if (altmap) { q0.val[0] = vreinterpretq_u8_u32(v0); q0.val[1] = vreinterpretq_u8_u32(v1); q1.val[0] = vreinterpretq_u8_u32(v2); q1.val[1] = vreinterpretq_u8_u32(v3); } else { r0 = vtrnq_u16(vreinterpretq_u16_u32(v0), vreinterpretq_u16_u32(v2)); r1 = vtrnq_u16(vreinterpretq_u16_u32(v1), vreinterpretq_u16_u32(v3)); q0 = vtrnq_u8(vreinterpretq_u8_u16(r0.val[0]), vreinterpretq_u8_u16(r1.val[0])); q1 = vtrnq_u8(vreinterpretq_u8_u16(r0.val[1]), vreinterpretq_u8_u16(r1.val[1])); } si = vandq_u8(q0.val[0], mask1); p0 = vqtbl1q_u8(tables[0][0], si); p1 = vqtbl1q_u8(tables[0][1], si); p2 = vqtbl1q_u8(tables[0][2], si); p3 = vqtbl1q_u8(tables[0][3], si); si = vshrq_n_u8(q0.val[0], 4); p0 = veorq_u8(p0, vqtbl1q_u8(tables[1][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[1][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[1][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[1][3], si)); si = vandq_u8(q0.val[1], mask1); p0 = veorq_u8(p0, vqtbl1q_u8(tables[2][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[2][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[2][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[2][3], si)); si = vshrq_n_u8(q0.val[1], 4); p0 = veorq_u8(p0, vqtbl1q_u8(tables[3][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[3][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[3][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[3][3], si)); si = vandq_u8(q1.val[0], mask1); p0 = veorq_u8(p0, vqtbl1q_u8(tables[4][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[4][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[4][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[4][3], si)); si = vshrq_n_u8(q1.val[0], 4); p0 = veorq_u8(p0, vqtbl1q_u8(tables[5][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[5][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[5][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[5][3], si)); si = vandq_u8(q1.val[1], mask1); p0 = veorq_u8(p0, vqtbl1q_u8(tables[6][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[6][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[6][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[6][3], si)); si = vshrq_n_u8(q1.val[1], 4); p0 = veorq_u8(p0, vqtbl1q_u8(tables[7][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[7][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[7][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[7][3], si)); if (altmap) { s0 = vreinterpretq_u32_u8(p0); s1 = vreinterpretq_u32_u8(p1); s2 = vreinterpretq_u32_u8(p2); s3 = vreinterpretq_u32_u8(p3); } else { q0 = vtrnq_u8(p0, p1); q1 = vtrnq_u8(p2, p3); r0 = vtrnq_u16(vreinterpretq_u16_u8(q0.val[0]), vreinterpretq_u16_u8(q1.val[0])); r1 = vtrnq_u16(vreinterpretq_u16_u8(q0.val[1]), vreinterpretq_u16_u8(q1.val[1])); s0 = vreinterpretq_u32_u16(r0.val[0]); s1 = vreinterpretq_u32_u16(r1.val[0]); s2 = vreinterpretq_u32_u16(r0.val[1]); s3 = vreinterpretq_u32_u16(r1.val[1]); } if (xor) { v0 = vld1q_u32(dst); v1 = vld1q_u32(dst + 4); v2 = vld1q_u32(dst + 8); v3 = vld1q_u32(dst + 12); s0 = veorq_u32(s0, v0); s1 = veorq_u32(s1, v1); s2 = veorq_u32(s2, v2); s3 = veorq_u32(s3, v3); } vst1q_u32(dst, s0); vst1q_u32(dst + 4, s1); vst1q_u32(dst + 8, s2); vst1q_u32(dst + 12, s3); dst += 16; } }
void vp8_mbloop_filter_vertical_edge_uv_neon( unsigned char *u, int pitch, unsigned char blimit, unsigned char limit, unsigned char thresh, unsigned char *v) { unsigned char *us, *ud; unsigned char *vs, *vd; uint8x16_t qblimit, qlimit, qthresh, q3, q4; uint8x16_t q5, q6, q7, q8, q9, q10; uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; uint8x8_t d15, d16, d17, d18, d19, d20, d21; uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; qblimit = vdupq_n_u8(blimit); qlimit = vdupq_n_u8(limit); qthresh = vdupq_n_u8(thresh); us = u - 4; vs = v - 4; d6 = vld1_u8(us); us += pitch; d7 = vld1_u8(vs); vs += pitch; d8 = vld1_u8(us); us += pitch; d9 = vld1_u8(vs); vs += pitch; d10 = vld1_u8(us); us += pitch; d11 = vld1_u8(vs); vs += pitch; d12 = vld1_u8(us); us += pitch; d13 = vld1_u8(vs); vs += pitch; d14 = vld1_u8(us); us += pitch; d15 = vld1_u8(vs); vs += pitch; d16 = vld1_u8(us); us += pitch; d17 = vld1_u8(vs); vs += pitch; d18 = vld1_u8(us); us += pitch; d19 = vld1_u8(vs); vs += pitch; d20 = vld1_u8(us); d21 = vld1_u8(vs); q3 = vcombine_u8(d6, d7); q4 = vcombine_u8(d8, d9); q5 = vcombine_u8(d10, d11); q6 = vcombine_u8(d12, d13); q7 = vcombine_u8(d14, d15); q8 = vcombine_u8(d16, d17); q9 = vcombine_u8(d18, d19); q10 = vcombine_u8(d20, d21); q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), vreinterpretq_u16_u32(q2tmp2.val[0])); q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), vreinterpretq_u16_u32(q2tmp3.val[0])); q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), vreinterpretq_u16_u32(q2tmp2.val[1])); q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), vreinterpretq_u16_u32(q2tmp3.val[1])); q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), vreinterpretq_u8_u16(q2tmp5.val[0])); q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), vreinterpretq_u8_u16(q2tmp5.val[1])); q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), vreinterpretq_u8_u16(q2tmp7.val[0])); q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), vreinterpretq_u8_u16(q2tmp7.val[1])); q3 = q2tmp8.val[0]; q4 = q2tmp8.val[1]; q5 = q2tmp9.val[0]; q6 = q2tmp9.val[1]; q7 = q2tmp10.val[0]; q8 = q2tmp10.val[1]; q9 = q2tmp11.val[0]; q10 = q2tmp11.val[1]; vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, q10, &q4, &q5, &q6, &q7, &q8, &q9); q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), vreinterpretq_u16_u32(q2tmp2.val[0])); q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), vreinterpretq_u16_u32(q2tmp3.val[0])); q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), vreinterpretq_u16_u32(q2tmp2.val[1])); q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), vreinterpretq_u16_u32(q2tmp3.val[1])); q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), vreinterpretq_u8_u16(q2tmp5.val[0])); q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), vreinterpretq_u8_u16(q2tmp5.val[1])); q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), vreinterpretq_u8_u16(q2tmp7.val[0])); q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), vreinterpretq_u8_u16(q2tmp7.val[1])); q3 = q2tmp8.val[0]; q4 = q2tmp8.val[1]; q5 = q2tmp9.val[0]; q6 = q2tmp9.val[1]; q7 = q2tmp10.val[0]; q8 = q2tmp10.val[1]; q9 = q2tmp11.val[0]; q10 = q2tmp11.val[1]; ud = u - 4; vst1_u8(ud, vget_low_u8(q3)); ud += pitch; vst1_u8(ud, vget_low_u8(q4)); ud += pitch; vst1_u8(ud, vget_low_u8(q5)); ud += pitch; vst1_u8(ud, vget_low_u8(q6)); ud += pitch; vst1_u8(ud, vget_low_u8(q7)); ud += pitch; vst1_u8(ud, vget_low_u8(q8)); ud += pitch; vst1_u8(ud, vget_low_u8(q9)); ud += pitch; vst1_u8(ud, vget_low_u8(q10)); vd = v - 4; vst1_u8(vd, vget_high_u8(q3)); vd += pitch; vst1_u8(vd, vget_high_u8(q4)); vd += pitch; vst1_u8(vd, vget_high_u8(q5)); vd += pitch; vst1_u8(vd, vget_high_u8(q6)); vd += pitch; vst1_u8(vd, vget_high_u8(q7)); vd += pitch; vst1_u8(vd, vget_high_u8(q8)); vd += pitch; vst1_u8(vd, vget_high_u8(q9)); vd += pitch; vst1_u8(vd, vget_high_u8(q10)); return; }