void vp9_quantize_fp_neon(const tran_low_t *coeff_ptr, intptr_t count, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { // TODO(jingning) Decide the need of these arguments after the // quantization process is completed. (void)zbin_ptr; (void)quant_shift_ptr; (void)scan; if (!skip_block) { // Quantization pass: All coefficients with index >= zero_flag are // skippable. Note: zero_flag can be zero. int i; const int16x8_t v_zero = vdupq_n_s16(0); const int16x8_t v_one = vdupq_n_s16(1); int16x8_t v_eobmax_76543210 = vdupq_n_s16(-1); int16x8_t v_round = vmovq_n_s16(round_ptr[1]); int16x8_t v_quant = vmovq_n_s16(quant_ptr[1]); int16x8_t v_dequant = vmovq_n_s16(dequant_ptr[1]); // adjust for dc v_round = vsetq_lane_s16(round_ptr[0], v_round, 0); v_quant = vsetq_lane_s16(quant_ptr[0], v_quant, 0); v_dequant = vsetq_lane_s16(dequant_ptr[0], v_dequant, 0); // process dc and the first seven ac coeffs { const int16x8_t v_iscan = vld1q_s16(&iscan[0]); const int16x8_t v_coeff = load_tran_low_to_s16q(coeff_ptr); const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15); const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero); const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant)); const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant)); const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero); const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one); const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1); const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign); const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign); const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant); v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan); store_s16q_to_tran_low(qcoeff_ptr, v_qcoeff); store_s16q_to_tran_low(dqcoeff_ptr, v_dqcoeff); v_round = vmovq_n_s16(round_ptr[1]); v_quant = vmovq_n_s16(quant_ptr[1]); v_dequant = vmovq_n_s16(dequant_ptr[1]); } // now process the rest of the ac coeffs for (i = 8; i < count; i += 8) { const int16x8_t v_iscan = vld1q_s16(&iscan[i]); const int16x8_t v_coeff = load_tran_low_to_s16q(coeff_ptr + i); const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15); const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero); const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant)); const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant)); const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero); const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one); const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1); const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign); const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign); const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant); v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan); store_s16q_to_tran_low(qcoeff_ptr + i, v_qcoeff); store_s16q_to_tran_low(dqcoeff_ptr + i, v_dqcoeff); } { const int16x4_t v_eobmax_3210 = vmax_s16( vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210)); const int64x1_t v_eobmax_xx32 = vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32); const int16x4_t v_eobmax_tmp = vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32)); const int64x1_t v_eobmax_xxx3 = vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16); const int16x4_t v_eobmax_final = vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3)); *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0); } } else { memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr)); memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr)); *eob_ptr = 0; } }
// vp9/common/vp9_scan.c:vp9_default_iscan_32x32 arranges the first 34 non-zero // coefficients as follows: // 0 1 2 3 4 5 6 7 // 0 0 2 5 10 17 25 // 1 1 4 8 15 22 30 // 2 3 7 12 18 28 // 3 6 11 16 23 31 // 4 9 14 19 29 // 5 13 20 26 // 6 21 27 33 // 7 24 32 void vpx_idct32_6_neon(const tran_low_t *input, int16_t *output) { int16x8_t in[8], s1[32], s2[32], s3[32]; in[0] = load_tran_low_to_s16q(input); input += 32; in[1] = load_tran_low_to_s16q(input); input += 32; in[2] = load_tran_low_to_s16q(input); input += 32; in[3] = load_tran_low_to_s16q(input); input += 32; in[4] = load_tran_low_to_s16q(input); input += 32; in[5] = load_tran_low_to_s16q(input); input += 32; in[6] = load_tran_low_to_s16q(input); input += 32; in[7] = load_tran_low_to_s16q(input); transpose_s16_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6], &in[7]); // stage 1 // input[1] * cospi_31_64 - input[31] * cospi_1_64 (but input[31] == 0) s1[16] = multiply_shift_and_narrow_s16(in[1], cospi_31_64); // input[1] * cospi_1_64 + input[31] * cospi_31_64 (but input[31] == 0) s1[31] = multiply_shift_and_narrow_s16(in[1], cospi_1_64); s1[20] = multiply_shift_and_narrow_s16(in[5], cospi_27_64); s1[27] = multiply_shift_and_narrow_s16(in[5], cospi_5_64); s1[23] = multiply_shift_and_narrow_s16(in[3], -cospi_29_64); s1[24] = multiply_shift_and_narrow_s16(in[3], cospi_3_64); // stage 2 s2[8] = multiply_shift_and_narrow_s16(in[2], cospi_30_64); s2[15] = multiply_shift_and_narrow_s16(in[2], cospi_2_64); // stage 3 s1[4] = multiply_shift_and_narrow_s16(in[4], cospi_28_64); s1[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); s1[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31], cospi_28_64); s1[30] = multiply_accumulate_shift_and_narrow_s16(s1[16], cospi_28_64, s1[31], cospi_4_64); s1[21] = multiply_accumulate_shift_and_narrow_s16(s1[20], -cospi_20_64, s1[27], cospi_12_64); s1[26] = multiply_accumulate_shift_and_narrow_s16(s1[20], cospi_12_64, s1[27], cospi_20_64); s1[22] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_12_64, s1[24], -cospi_20_64); s1[25] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_20_64, s1[24], cospi_12_64); // stage 4 s1[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64); s2[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15], cospi_24_64); s2[14] = multiply_accumulate_shift_and_narrow_s16(s2[8], cospi_24_64, s2[15], cospi_8_64); s2[20] = vsubq_s16(s1[23], s1[20]); s2[21] = vsubq_s16(s1[22], s1[21]); s2[22] = vaddq_s16(s1[21], s1[22]); s2[23] = vaddq_s16(s1[20], s1[23]); s2[24] = vaddq_s16(s1[24], s1[27]); s2[25] = vaddq_s16(s1[25], s1[26]); s2[26] = vsubq_s16(s1[25], s1[26]); s2[27] = vsubq_s16(s1[24], s1[27]); // stage 5 s1[5] = sub_multiply_shift_and_narrow_s16(s1[7], s1[4], cospi_16_64); s1[6] = add_multiply_shift_and_narrow_s16(s1[4], s1[7], cospi_16_64); s1[18] = multiply_accumulate_shift_and_narrow_s16(s1[17], -cospi_8_64, s1[30], cospi_24_64); s1[29] = multiply_accumulate_shift_and_narrow_s16(s1[17], cospi_24_64, s1[30], cospi_8_64); s1[19] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_8_64, s1[31], cospi_24_64); s1[28] = multiply_accumulate_shift_and_narrow_s16(s1[16], cospi_24_64, s1[31], cospi_8_64); s1[20] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_24_64, s2[27], -cospi_8_64); s1[27] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_8_64, s2[27], cospi_24_64); s1[21] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_24_64, s2[26], -cospi_8_64); s1[26] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_8_64, s2[26], cospi_24_64); // stage 6 s2[0] = vaddq_s16(s1[0], s1[7]); s2[1] = vaddq_s16(s1[0], s1[6]); s2[2] = vaddq_s16(s1[0], s1[5]); s2[3] = vaddq_s16(s1[0], s1[4]); s2[4] = vsubq_s16(s1[0], s1[4]); s2[5] = vsubq_s16(s1[0], s1[5]); s2[6] = vsubq_s16(s1[0], s1[6]); s2[7] = vsubq_s16(s1[0], s1[7]); s2[10] = sub_multiply_shift_and_narrow_s16(s2[14], s2[9], cospi_16_64); s2[13] = add_multiply_shift_and_narrow_s16(s2[9], s2[14], cospi_16_64); s2[11] = sub_multiply_shift_and_narrow_s16(s2[15], s2[8], cospi_16_64); s2[12] = add_multiply_shift_and_narrow_s16(s2[8], s2[15], cospi_16_64); s2[16] = vaddq_s16(s1[16], s2[23]); s2[17] = vaddq_s16(s1[17], s2[22]); s2[18] = vaddq_s16(s1[18], s1[21]); s2[19] = vaddq_s16(s1[19], s1[20]); s2[20] = vsubq_s16(s1[19], s1[20]); s2[21] = vsubq_s16(s1[18], s1[21]); s2[22] = vsubq_s16(s1[17], s2[22]); s2[23] = vsubq_s16(s1[16], s2[23]); s3[24] = vsubq_s16(s1[31], s2[24]); s3[25] = vsubq_s16(s1[30], s2[25]); s3[26] = vsubq_s16(s1[29], s1[26]); s3[27] = vsubq_s16(s1[28], s1[27]); s2[28] = vaddq_s16(s1[27], s1[28]); s2[29] = vaddq_s16(s1[26], s1[29]); s2[30] = vaddq_s16(s2[25], s1[30]); s2[31] = vaddq_s16(s2[24], s1[31]); // stage 7 s1[0] = vaddq_s16(s2[0], s2[15]); s1[1] = vaddq_s16(s2[1], s2[14]); s1[2] = vaddq_s16(s2[2], s2[13]); s1[3] = vaddq_s16(s2[3], s2[12]); s1[4] = vaddq_s16(s2[4], s2[11]); s1[5] = vaddq_s16(s2[5], s2[10]); s1[6] = vaddq_s16(s2[6], s2[9]); s1[7] = vaddq_s16(s2[7], s2[8]); s1[8] = vsubq_s16(s2[7], s2[8]); s1[9] = vsubq_s16(s2[6], s2[9]); s1[10] = vsubq_s16(s2[5], s2[10]); s1[11] = vsubq_s16(s2[4], s2[11]); s1[12] = vsubq_s16(s2[3], s2[12]); s1[13] = vsubq_s16(s2[2], s2[13]); s1[14] = vsubq_s16(s2[1], s2[14]); s1[15] = vsubq_s16(s2[0], s2[15]); s1[20] = sub_multiply_shift_and_narrow_s16(s3[27], s2[20], cospi_16_64); s1[27] = add_multiply_shift_and_narrow_s16(s2[20], s3[27], cospi_16_64); s1[21] = sub_multiply_shift_and_narrow_s16(s3[26], s2[21], cospi_16_64); s1[26] = add_multiply_shift_and_narrow_s16(s2[21], s3[26], cospi_16_64); s1[22] = sub_multiply_shift_and_narrow_s16(s3[25], s2[22], cospi_16_64); s1[25] = add_multiply_shift_and_narrow_s16(s2[22], s3[25], cospi_16_64); s1[23] = sub_multiply_shift_and_narrow_s16(s3[24], s2[23], cospi_16_64); s1[24] = add_multiply_shift_and_narrow_s16(s2[23], s3[24], cospi_16_64); // final stage vst1q_s16(output, vaddq_s16(s1[0], s2[31])); output += 8; vst1q_s16(output, vaddq_s16(s1[1], s2[30])); output += 8; vst1q_s16(output, vaddq_s16(s1[2], s2[29])); output += 8; vst1q_s16(output, vaddq_s16(s1[3], s2[28])); output += 8; vst1q_s16(output, vaddq_s16(s1[4], s1[27])); output += 8; vst1q_s16(output, vaddq_s16(s1[5], s1[26])); output += 8; vst1q_s16(output, vaddq_s16(s1[6], s1[25])); output += 8; vst1q_s16(output, vaddq_s16(s1[7], s1[24])); output += 8; vst1q_s16(output, vaddq_s16(s1[8], s1[23])); output += 8; vst1q_s16(output, vaddq_s16(s1[9], s1[22])); output += 8; vst1q_s16(output, vaddq_s16(s1[10], s1[21])); output += 8; vst1q_s16(output, vaddq_s16(s1[11], s1[20])); output += 8; vst1q_s16(output, vaddq_s16(s1[12], s2[19])); output += 8; vst1q_s16(output, vaddq_s16(s1[13], s2[18])); output += 8; vst1q_s16(output, vaddq_s16(s1[14], s2[17])); output += 8; vst1q_s16(output, vaddq_s16(s1[15], s2[16])); output += 8; vst1q_s16(output, vsubq_s16(s1[15], s2[16])); output += 8; vst1q_s16(output, vsubq_s16(s1[14], s2[17])); output += 8; vst1q_s16(output, vsubq_s16(s1[13], s2[18])); output += 8; vst1q_s16(output, vsubq_s16(s1[12], s2[19])); output += 8; vst1q_s16(output, vsubq_s16(s1[11], s1[20])); output += 8; vst1q_s16(output, vsubq_s16(s1[10], s1[21])); output += 8; vst1q_s16(output, vsubq_s16(s1[9], s1[22])); output += 8; vst1q_s16(output, vsubq_s16(s1[8], s1[23])); output += 8; vst1q_s16(output, vsubq_s16(s1[7], s1[24])); output += 8; vst1q_s16(output, vsubq_s16(s1[6], s1[25])); output += 8; vst1q_s16(output, vsubq_s16(s1[5], s1[26])); output += 8; vst1q_s16(output, vsubq_s16(s1[4], s1[27])); output += 8; vst1q_s16(output, vsubq_s16(s1[3], s2[28])); output += 8; vst1q_s16(output, vsubq_s16(s1[2], s2[29])); output += 8; vst1q_s16(output, vsubq_s16(s1[1], s2[30])); output += 8; vst1q_s16(output, vsubq_s16(s1[0], s2[31])); }