Beispiel #1
0
/* s16x8 mv mul */
void mw_neon_mv_mul_s16x8(short * A, int Row, int T, short * B, short * C)
{
	int i = 0;
	int k = 0;

	int16x8_t neon_b, neon_c;
	int16x8_t neon_a0, neon_a1, neon_a2, neon_a3, neon_a4, neon_a5, neon_a6, neon_a7;
	int16x8_t neon_b0, neon_b1, neon_b2, neon_b3, neon_b4, neon_b5, neon_b6, neon_b7;

	for (i = 0; i < Row; i+=8)
	{
		neon_c = vmovq_n_s16(0);

		for (k = 0; k < T; k+=8)
		{
			int j = k * T + i;

			neon_a0 = vld1q_s16(A + j);
			j+=Row;
			neon_a1 = vld1q_s16(A + j);
			j+=Row;
			neon_a2 = vld1q_s16(A + j);
			j+=Row;
			neon_a3 = vld1q_s16(A + j);
			j+=Row;
			neon_a4 = vld1q_s16(A + j);
			j+=Row;
			neon_a5 = vld1q_s16(A + j);
			j+=Row;
			neon_a6 = vld1q_s16(A + j);
			j+=Row;
			neon_a7 = vld1q_s16(A + j);

			neon_b = vld1q_s16(B + k);
			neon_b0 = vdupq_n_s16(vgetq_lane_s16(neon_b, 0));
			neon_b1 = vdupq_n_s16(vgetq_lane_s16(neon_b, 1));
			neon_b2 = vdupq_n_s16(vgetq_lane_s16(neon_b, 2));
			neon_b3 = vdupq_n_s16(vgetq_lane_s16(neon_b, 3));
			neon_b4 = vdupq_n_s16(vgetq_lane_s16(neon_b, 4));
			neon_b5 = vdupq_n_s16(vgetq_lane_s16(neon_b, 5));
			neon_b6 = vdupq_n_s16(vgetq_lane_s16(neon_b, 6));
			neon_b7 = vdupq_n_s16(vgetq_lane_s16(neon_b, 7));

			neon_c = vaddq_s16(vmulq_s16(neon_a0, neon_b0), neon_c);
			neon_c = vaddq_s16(vmulq_s16(neon_a1, neon_b1), neon_c);
			neon_c = vaddq_s16(vmulq_s16(neon_a2, neon_b2), neon_c);
			neon_c = vaddq_s16(vmulq_s16(neon_a3, neon_b3), neon_c);
			neon_c = vaddq_s16(vmulq_s16(neon_a4, neon_b4), neon_c);
			neon_c = vaddq_s16(vmulq_s16(neon_a5, neon_b5), neon_c);
			neon_c = vaddq_s16(vmulq_s16(neon_a6, neon_b6), neon_c);
			neon_c = vaddq_s16(vmulq_s16(neon_a7, neon_b7), neon_c);

		}

		vst1q_s16(C + i, neon_c);
	}
}
Beispiel #2
0
int16x8_t test_vmovq_n_s16(int16_t v1) {
  // CHECK: test_vmovq_n_s16
  return vmovq_n_s16(v1);
  // CHECK: dup {{v[0-9]+}}.8h, {{w[0-9]+}}
}
Beispiel #3
0
void vp9_quantize_fp_neon(const tran_low_t *coeff_ptr, intptr_t count,
                          int skip_block, const int16_t *zbin_ptr,
                          const int16_t *round_ptr, const int16_t *quant_ptr,
                          const int16_t *quant_shift_ptr,
                          tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                          const int16_t *dequant_ptr, uint16_t *eob_ptr,
                          const int16_t *scan, const int16_t *iscan) {
  // TODO(jingning) Decide the need of these arguments after the
  // quantization process is completed.
  (void)zbin_ptr;
  (void)quant_shift_ptr;
  (void)scan;

  if (!skip_block) {
    // Quantization pass: All coefficients with index >= zero_flag are
    // skippable. Note: zero_flag can be zero.
    int i;
    const int16x8_t v_zero = vdupq_n_s16(0);
    const int16x8_t v_one = vdupq_n_s16(1);
    int16x8_t v_eobmax_76543210 = vdupq_n_s16(-1);
    int16x8_t v_round = vmovq_n_s16(round_ptr[1]);
    int16x8_t v_quant = vmovq_n_s16(quant_ptr[1]);
    int16x8_t v_dequant = vmovq_n_s16(dequant_ptr[1]);
    // adjust for dc
    v_round = vsetq_lane_s16(round_ptr[0], v_round, 0);
    v_quant = vsetq_lane_s16(quant_ptr[0], v_quant, 0);
    v_dequant = vsetq_lane_s16(dequant_ptr[0], v_dequant, 0);
    // process dc and the first seven ac coeffs
    {
      const int16x8_t v_iscan = vld1q_s16(&iscan[0]);
      const int16x8_t v_coeff = load_tran_low_to_s16q(coeff_ptr);
      const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
      const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
      const int32x4_t v_tmp_lo =
          vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
      const int32x4_t v_tmp_hi =
          vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
      const int16x8_t v_tmp2 =
          vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
      const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
      const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
      const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
      const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
      const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
      const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant);
      v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan);
      store_s16q_to_tran_low(qcoeff_ptr, v_qcoeff);
      store_s16q_to_tran_low(dqcoeff_ptr, v_dqcoeff);
      v_round = vmovq_n_s16(round_ptr[1]);
      v_quant = vmovq_n_s16(quant_ptr[1]);
      v_dequant = vmovq_n_s16(dequant_ptr[1]);
    }
    // now process the rest of the ac coeffs
    for (i = 8; i < count; i += 8) {
      const int16x8_t v_iscan = vld1q_s16(&iscan[i]);
      const int16x8_t v_coeff = load_tran_low_to_s16q(coeff_ptr + i);
      const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
      const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
      const int32x4_t v_tmp_lo =
          vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
      const int32x4_t v_tmp_hi =
          vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
      const int16x8_t v_tmp2 =
          vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
      const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
      const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
      const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
      const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
      const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
      const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant);
      v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan);
      store_s16q_to_tran_low(qcoeff_ptr + i, v_qcoeff);
      store_s16q_to_tran_low(dqcoeff_ptr + i, v_dqcoeff);
    }
    {
      const int16x4_t v_eobmax_3210 = vmax_s16(
          vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210));
      const int64x1_t v_eobmax_xx32 =
          vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
      const int16x4_t v_eobmax_tmp =
          vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
      const int64x1_t v_eobmax_xxx3 =
          vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
      const int16x4_t v_eobmax_final =
          vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));

      *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0);
    }
  } else {
    memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
    memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
    *eob_ptr = 0;
  }
}
Beispiel #4
0
/* s16x8 mm mul */
void mw_neon_mm_mul_s16x8(short * A, int Row, int T, short * B, int Col, short * C)
{
	int i, k, j;

	int16x8_t neon_b, neon_c;
	int16x8_t neon_a0, neon_a1, neon_a2, neon_a3, neon_a4, neon_a5, neon_a6, neon_a7;
	int16x8_t neon_b0, neon_b1, neon_b2, neon_b3, neon_b4, neon_b5, neon_b6, neon_b7;

	for (i = 0; i < Row; i+=8)
	{

		for (k = 0; k < Col; k+=1)
		{
			neon_c = vmovq_n_s16(0);

			for (j = 0; j < T; j+=8)
			{

				int j_T = j * T + i;
				int k_Row = k * Row;

				neon_a0 = vld1q_s16(A + j_T);
				j_T+=Row;
				neon_a1 = vld1q_s16(A + j_T);
				j_T+=Row;
				neon_a2 = vld1q_s16(A + j_T);
				j_T+=Row;
				neon_a3 = vld1q_s16(A + j_T);
				j_T+=Row;
				neon_a4 = vld1q_s16(A + j_T);
				j_T+=Row;
				neon_a5 = vld1q_s16(A + j_T);
				j_T+=Row;
				neon_a6 = vld1q_s16(A + j_T);
				j_T+=Row;
				neon_a7 = vld1q_s16(A + j_T);

				neon_b = vld1q_s16(B + k_Row + j);
				neon_b0 = vdupq_n_s16(vgetq_lane_s16(neon_b, 0));
				neon_b1 = vdupq_n_s16(vgetq_lane_s16(neon_b, 1));
				neon_b2 = vdupq_n_s16(vgetq_lane_s16(neon_b, 2));
				neon_b3 = vdupq_n_s16(vgetq_lane_s16(neon_b, 3));
				neon_b4 = vdupq_n_s16(vgetq_lane_s16(neon_b, 4));
				neon_b5 = vdupq_n_s16(vgetq_lane_s16(neon_b, 5));
				neon_b6 = vdupq_n_s16(vgetq_lane_s16(neon_b, 6));
				neon_b7 = vdupq_n_s16(vgetq_lane_s16(neon_b, 7));

				neon_c = vaddq_s16(vmulq_s16(neon_a0, neon_b0), neon_c);
				neon_c = vaddq_s16(vmulq_s16(neon_a1, neon_b1), neon_c);
				neon_c = vaddq_s16(vmulq_s16(neon_a2, neon_b2), neon_c);
				neon_c = vaddq_s16(vmulq_s16(neon_a3, neon_b3), neon_c);
				neon_c = vaddq_s16(vmulq_s16(neon_a4, neon_b4), neon_c);
				neon_c = vaddq_s16(vmulq_s16(neon_a5, neon_b5), neon_c);
				neon_c = vaddq_s16(vmulq_s16(neon_a6, neon_b6), neon_c);
				neon_c = vaddq_s16(vmulq_s16(neon_a7, neon_b7), neon_c);

				vst1q_lane_s16(C + k_Row + i, neon_c, 0);
				vst1q_lane_s16(C + k_Row + i + 1, neon_c, 1);
				vst1q_lane_s16(C + k_Row + i + 2, neon_c, 2);
				vst1q_lane_s16(C + k_Row + i + 3, neon_c, 3);
				vst1q_lane_s16(C + k_Row + i + 4, neon_c, 4);
				vst1q_lane_s16(C + k_Row + i + 5, neon_c, 5);
				vst1q_lane_s16(C + k_Row + i + 6, neon_c, 6);
				vst1q_lane_s16(C + k_Row + i + 7, neon_c, 7);

			}
		}
	}
}