示例#1
0
文件: quant-c.c 项目: xing2fan/x264
static int32_t avc_coeff_last16_msa( int16_t *p_src )
{
    uint32_t u_res;
    v8i16 src0, src1;
    v8u16 tmp_h0;
    v16u8 tmp0;
    v8i16 out0, out1;
    v16i8 res0;
    v16u8 mask = { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };

    LD_SH2( p_src, 8, src0, src1 );

    out0 = __msa_ceqi_h( src0, 0 );
    out1 = __msa_ceqi_h( src1, 0 );

    tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) out1, ( v16i8 ) out0 );
    tmp0 = tmp0 & mask;
    tmp_h0 = __msa_hadd_u_h( tmp0, tmp0 );
    tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) tmp_h0, ( v16i8 ) tmp_h0 );
    tmp_h0 = __msa_hadd_u_h( tmp0, tmp0 );
    tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) tmp_h0, ( v16i8 ) tmp_h0 );
    tmp_h0 = __msa_hadd_u_h( tmp0, tmp0 );
    res0 = __msa_pckev_b( ( v16i8 ) tmp_h0, ( v16i8 ) tmp_h0 );
    out0 = __msa_nloc_h( ( v8i16 ) res0 );
    u_res = __msa_copy_u_h( out0, 0 );

    return ( 15 - u_res );
}
示例#2
0
void vpx_fdct4x4_msa(const int16_t *input, int16_t *output,
                     int32_t src_stride) {
  v8i16 in0, in1, in2, in3;

  LD_SH4(input, src_stride, in0, in1, in2, in3);

  /* fdct4 pre-process */
  {
    v8i16 vec, mask;
    v16i8 zero = { 0 };
    v16i8 one = __msa_ldi_b(1);

    mask = (v8i16)__msa_sldi_b(zero, one, 15);
    SLLI_4V(in0, in1, in2, in3, 4);
    vec = __msa_ceqi_h(in0, 0);
    vec = vec ^ 255;
    vec = mask & vec;
    in0 += vec;
  }

  VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
  VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
  ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
  SRA_4V(in0, in1, in2, in3, 2);
  PCKEV_D2_SH(in1, in0, in3, in2, in0, in2);
  ST_SH2(in0, in2, output, 8);
}
示例#3
0
void vp9_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
                    int32_t tx_type) {
  v8i16 in0, in1, in2, in3;

  LD_SH4(input, stride, in0, in1, in2, in3);

  /* fdct4 pre-process */
  {
    v8i16 temp, mask;
    v16i8 zero = { 0 };
    v16i8 one = __msa_ldi_b(1);

    mask = (v8i16)__msa_sldi_b(zero, one, 15);
    SLLI_4V(in0, in1, in2, in3, 4);
    temp = __msa_ceqi_h(in0, 0);
    temp = (v8i16)__msa_xori_b((v16u8)temp, 255);
    temp = mask & temp;
    in0 += temp;
  }

  switch (tx_type) {
    case DCT_DCT:
      VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
      VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
      break;
    case ADST_DCT:
      VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
      VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
      break;
    case DCT_ADST:
      VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
      VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
      break;
    case ADST_ADST:
      VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
      VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
      break;
    default:
      assert(0);
      break;
  }

  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
  ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
  SRA_4V(in0, in1, in2, in3, 2);
  PCKEV_D2_SH(in1, in0, in3, in2, in0, in2);
  ST_SH2(in0, in2, output, 8);
}
示例#4
0
文件: quant-c.c 项目: xing2fan/x264
static int32_t avc_coeff_last64_msa( int16_t *p_src )
{
    uint32_t u_res;
    v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
    v8i16 tmp_h0, tmp_h1, tmp_h2, tmp_h3, tmp_h4, tmp_h5, tmp_h6, tmp_h7;
    v16u8 tmp0, tmp1, tmp2, tmp3;
    v8u16 vec0, vec1, vec2, vec3;
    v4i32 out0;
    v16u8 mask = { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };

    LD_SH8( p_src, 8, src0, src1, src2, src3, src4, src5, src6, src7 );

    tmp_h0 = __msa_ceqi_h( src0, 0 );
    tmp_h1 = __msa_ceqi_h( src1, 0 );
    tmp_h2 = __msa_ceqi_h( src2, 0 );
    tmp_h3 = __msa_ceqi_h( src3, 0 );
    tmp_h4 = __msa_ceqi_h( src4, 0 );
    tmp_h5 = __msa_ceqi_h( src5, 0 );
    tmp_h6 = __msa_ceqi_h( src6, 0 );
    tmp_h7 = __msa_ceqi_h( src7, 0 );

    PCKEV_B4_UB( tmp_h1, tmp_h0, tmp_h3, tmp_h2, tmp_h5, tmp_h4, tmp_h7, tmp_h6,
                 tmp0, tmp1, tmp2, tmp3 );

    tmp0 = tmp0 & mask;
    tmp1 = tmp1 & mask;
    tmp2 = tmp2 & mask;
    tmp3 = tmp3 & mask;

    HADD_UB4_UH( tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3 );
    PCKEV_B2_UB( vec1, vec0, vec3, vec2, tmp0, tmp1 );
    HADD_UB2_UH( tmp0, tmp1, vec0, vec1 );

    tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) vec1, ( v16i8 ) vec0 );
    vec0 = __msa_hadd_u_h( tmp0, tmp0 );
    tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) vec0, ( v16i8 ) vec0 );
    out0 = ( v4i32 ) __msa_nloc_d( ( v2i64 ) tmp0 );
    u_res = __msa_copy_u_w( out0, 0 );

    return ( 63 - u_res );
}