static void pred16x16_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  uint8_t *src_top;  // top address
  unsigned int i;
  src_top = top;
  // load top
  S32LDD(xr1, src_top, 0x0);
  S32LDD(xr2, src_top, 0x4);
  S32LDD(xr3, src_top, 0x8);
  S32LDD(xr4, src_top, 0xc);
  // AVG
  D8SUMC(xr1, xr1, xr2);
  D8SUMC(xr2, xr3, xr4);
  Q16ADD_AA_WW(xr5, xr1, xr2, xr0);
  Q16ADD_AA_XW(xr7, xr5, xr5, xr0);
  D32SLR(xr8, xr7, xr0, xr0, 0x4);
  S32SFL(xr9, xr8, xr8, xr0, ptn0);
  S32SFL(xr0, xr9, xr9, xr1, ptn3);
  // store
  dst -= MB_LUMA_EDGED_WIDTH;
  for(i=0; i<16; i++){
    S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
    S32STD(xr1, dst, 0x4);
    S32STD(xr1, dst, 0x8);
    S32STD(xr1, dst, 0xc);
  }
}
// MODE 2
static void pred16x16_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  uint8_t *src_top;  // top address
  uint8_t *src_left; // left address
  unsigned int i;
  src_top = top;
  src_left = src - 0x4;
  // load top
  S32LDD(xr11, src_top, 0x0);
  S32LDD(xr12, src_top, 0x4);
  S32LDD(xr13, src_top, 0x8);
  S32LDD(xr14, src_top, 0xc);
  // load left (4 x 7 = 28 instructions)
  S32LDD(xr1, src_left, 0x0);          // xr1[31:24] <- src_left[3] (l0) ;
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[MB_LUMA_EDGED_WIDTH+3] (l1) ;
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ;
  S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_left[3*stride+3] (l3) ;
  S32SFL(xr5, xr2, xr1, xr0, ptn2);    // xr5[31:16] <- l1, l0 ;
  S32SFL(xr6, xr4, xr3, xr0, ptn2);    // xr4[31:16] <- l3, l2 ;
  S32SFL(xr7, xr6, xr5, xr0, ptn3);    // xr7[31: 0] <- l3, l2, l1, l0 ;
  S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SFL(xr5, xr2, xr1, xr0, ptn2);
  S32SFL(xr6, xr4, xr3, xr0, ptn2);
  S32SFL(xr8, xr6, xr5, xr0, ptn3); // xr8[31:0] <- l7, l6, l5, l4 ;
  S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SFL(xr5, xr2, xr1, xr0, ptn2);
  S32SFL(xr6, xr4, xr3, xr0, ptn2);
  S32SFL(xr9, xr6, xr5, xr0, ptn3); // xr9[31:0] <- l11, l10, l9, l8 ;
  S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SFL(xr5, xr2, xr1, xr0, ptn2);
  S32SFL(xr6, xr4, xr3, xr0, ptn2);
  S32SFL(xr10, xr6, xr5, xr0, ptn3); // xr10[31:0] <- l15, l14, l13, l12 ;
  // AVG
  D8SUMC(xr1, xr11, xr12);
  D8SUMC(xr2, xr13, xr14);
  D8SUMC(xr3, xr7, xr8);
  D8SUMC(xr4, xr9, xr10);
  Q16ADD_AA_WW(xr5, xr1, xr2, xr0);
  Q16ACC_AA(xr5, xr3, xr4, xr0);
  Q16ADD_AA_XW(xr7, xr5, xr5, xr0);
  D32SLR(xr8, xr7, xr0, xr0, 0x5);
  S32SFL(xr9, xr8, xr8, xr0, ptn0);
  S32SFL(xr0, xr9, xr9, xr1, ptn3);
  // store
  dst -= MB_LUMA_EDGED_WIDTH;
  for(i=0; i<16; i++){
    S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
    S32STD(xr1, dst, 0x4);
    S32STD(xr1, dst, 0x8);
    S32STD(xr1, dst, 0xc);
  }
}
static void pred8x8_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  uint8_t *src_top;  // top address
  unsigned int i;
  src_top = top;
  // load top
  S32LDD(xr7, src_top, 0x0);
  S32LDD(xr8, src_top, 0x4);
  // AVG
  D8SUMC(xr1, xr7, xr8);
  D32SLR(xr2, xr1, xr0, xr0, 0x2);
  S32SFL(xr3, xr2, xr2, xr4, ptn0);
  S32SFL(xr0, xr3, xr3, xr5, ptn3);
  S32SFL(xr0, xr4, xr4, xr6, ptn3);
  // store
  S32STD(xr5, dst, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
}
// MODE 2
static void pred8x8_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  uint8_t *src_left; // left address
  unsigned int i;
  src_left = src - 0x4;
  // load top
  S32LDD(xr11, top, 0x0);
  S32LDD(xr12, top, 0x4);
  // load left (4 x 7 = 28 instructions)
  S32LDD(xr1, src_left, 0x0);          // xr1[31:24] <- src_left[3] (l0) ;
  S32LDIV(xr2, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ;
  S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ;
  S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_left[3*stride+3] (l3) ;
  S32SFL(xr5, xr2, xr1, xr0, ptn2);    // xr5[31:16] <- l1, l0 ;
  S32SFL(xr6, xr4, xr3, xr0, ptn2);    // xr4[31:16] <- l3, l2 ;
  S32SFL(xr7, xr6, xr5, xr0, ptn3);    // xr7[31: 0] <- l3, l2, l1, l0 ;
  S32LDIV(xr1, src_left, MB_CHROM_EDGED_WIDTH, 0x0);
  S32LDIV(xr2, src_left, MB_CHROM_EDGED_WIDTH, 0x0);
  S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0);
  S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0);
  S32SFL(xr5, xr2, xr1, xr0, ptn2);
  S32SFL(xr6, xr4, xr3, xr0, ptn2);
  S32SFL(xr8, xr6, xr5, xr0, ptn3); // xr8[31:0] <- l7, l6, l5, l4 ;
  // AVG
  D8SUMC(xr1, xr11, xr7);
  Q16ADD_AA_XW(xr2, xr1, xr1, xr0);
  D32SLR(xr3, xr2, xr0, xr0, 0x3);
  S32SFL(xr4, xr3, xr3, xr0, ptn0);
  S32SFL(xr0, xr4, xr4, xr5, ptn3);

  D8SUMC(xr1, xr12, xr8);
  Q16ADD_AA_XW(xr2, xr1, xr1, xr0);
  D32SLR(xr3, xr2, xr0, xr0, 0x3);
  S32SFL(xr4, xr3, xr3, xr0, ptn0);
  S32SFL(xr0, xr4, xr4, xr6, ptn3);

  D32SLR(xr2, xr1, xr0, xr0, 0x2);
  S32SFL(xr3, xr2, xr2, xr4, ptn0);
  S32SFL(xr0, xr3, xr3, xr8, ptn3);
  S32SFL(xr0, xr4, xr4, xr9, ptn3);
  // store
  S32STD(xr5, dst, 0x0);
  S32STD(xr8, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr8, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr8, dst, 0x4);
  S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr8, dst, 0x4);

  S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);
  S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0);
  S32STD(xr6, dst, 0x4);

}
static void pred16x16_128_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  int i;
  //load
  S32LUI(xr1, 0x80, ptn7);
  // store
  dst -= MB_LUMA_EDGED_WIDTH;
  for(i=0; i<16; i++){
    S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
    S32STD(xr1, dst, 0x4);
    S32STD(xr1, dst, 0x8);
    S32STD(xr1, dst, 0xc);
  }
}
Exemple #6
0
static inline void ifft2(buf)
{
  S32LDD(xr1, buf, 0);
  S32LDD(xr3, buf, 8);              
  S32LDD(xr2, buf, 4);                             
  S32LDD(xr4, buf, 12);             
                                     
  D32ADD_AS(xr5, xr1, xr3, xr7);       
  D32ADD_AS(xr6, xr2, xr4, xr8);       
                                     
  S32STD(xr5, buf, 0); 
  S32STD(xr7, buf, 8);              
  S32STD(xr6, buf, 4);                           
  S32STD(xr8, buf, 12);              
} 
// MODE 5
static void pred4x4_vertical_right_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
				       uint8_t *top, uint8_t *topleft){
  uint8_t *src_left;  // left address
  src_left = src -0x4;
  // load right
  S32LDD(xr8, top, 0x0); // xr8: t3, t2, t1, t0 ;  high -> low, [31->0];
  // load left
  S32LDD(xr1, topleft, -0x4); // xr1[31:24] <- src_topleft[3] (lt) ;
  S32LDD(xr2, src_left, 0x0); // xr2[31:24] <- src_topleft[stride+3] (l0) ;
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ;
  S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_topleft[3*stride+3] (l2) ;
  S32SFL(xr5, xr2, xr1, xr0, ptn2);       // xr5[31:16] <- l0, lt ;
  S32SFL(xr6, xr4, xr3, xr0, ptn2);       // xr6[31:16] <- l2, l1 ;
  S32SFL(xr7, xr3, xr2, xr0, ptn2);       // xr7[31:16] <- l1, l0 ;
  // alni
  S32ALNI(xr3, xr8, xr1, ptn1); // xr3: t2, t1, t0, lt ;
  S32ALNI(xr4, xr3, xr2, ptn1); // xr4: t1, t0, lt, l0 ;
  // cal
  Q8AVGR(xr1, xr3, xr8); // xr1: 
  Q8AVG(xr9, xr4, xr8);
  Q8AVGR(xr2, xr9, xr3); // xr2:
  Q8AVG(xr10, xr5, xr6);
  Q8AVGR(xr11, xr10, xr7); // xr11: src[0,3], src[0,2], ~, ~ ;
  // alni
  S32ALNI(xr12, xr2, xr11, ptn1);
  D32SLL(xr13, xr11, xr0, xr0, 0x8);
  S32ALNI(xr14, xr1, xr13, ptn1);
  // store
  S32STD(xr1, dst, 0x0);
  S32SDIV(xr2, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr14, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr12, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
// MODE 1
static void pred16x16_horizontal_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  uint8_t *src_left;  // left address
  unsigned int i;
  src_left = src - 0x1;

  dst -= MB_LUMA_EDGED_WIDTH;
  for (i=0; i<16; i++) {
    S8LDD(xr1, src_left, 0x0, ptn7);
    src_left = src_left + MB_LUMA_EDGED_WIDTH;
    S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
    S32STD(xr1, dst, 0x4);
    S32STD(xr1, dst, 0x8);
    S32STD(xr1, dst, 0xc);
  }

}
// MODE 2
static void pred4x4_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
			   uint8_t *top, uint8_t *topleft){
  uint8_t *src_left; // left address
  src_left = src - 0x4;
  //load
  S32LDD(xr8, top, 0x0); //xr8 <- src_top[0] ;
  // xr8: t3, t2, t1, t0 ;  high -> low, [31->0];
  S32LDD(xr1, src_left, 0x0);          // xr1[31:24] <- src_left[3] (l0) ;
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ;
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ;
  S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ;
  S32SFL(xr5, xr2, xr1, xr0, ptn2);    // xr5[31:16] <- l1, l0 ;
  S32SFL(xr6, xr4, xr3, xr0, ptn2);    // xr6[31:16] <- l3, l2 ;
  S32SFL(xr1, xr6, xr5, xr0, ptn3);    // xr1[31: 0] <- l3, l2, l1, l0 ;
  //avg
  D8SUMC(xr2, xr1, xr8);
  Q16ADD_AA_XW(xr3, xr2, xr2, xr0);
  D32SLR(xr4, xr3, xr0, xr0, 0x3);
  S32SFL(xr6, xr4, xr4, xr0, ptn0);
  S32SFL(xr0, xr6, xr6, xr7, ptn3);
  //store
  S32STD(xr7, dst, 0x0);
  S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
// MODE 3
static void pred4x4_down_left_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
				  uint8_t *top, uint8_t *topleft){
  //load
  S32LDD(xr1, top, 0x0);        // xr1 <- t3, t2, t1, t0 ;
  S32LDD(xr2, topright, 0x0);   // xr2 <- t7, t6, t5, t4 ;
  S32LDDR(xr15, topright, 0x0); //xr15 <- t4, t5, t6, t7 ;

  S32ALNI(xr3, xr2, xr1, ptn2); // xr3: t5, t4, t3, t2 ;
  Q8AVG(xr4, xr1, xr3);
  S32ALNI(xr5, xr2, xr1, ptn3); // xr5: t4, t3, t2, t1 ;
  Q8AVGR(xr6, xr4, xr5);

  S32ALNI(xr7, xr2, xr1, ptn1); // xr7: t6, t5, t4, t3 ;
  S32ALNI(xr8, xr15, xr2, ptn3);// xr8: t7, t7, t6, t5 ;
  Q8AVG(xr9, xr7, xr8);
  Q8AVGR(xr10, xr9, xr2);

  D32SLL(xr11, xr6, xr0, xr0, 0x8);
  S32ALNI(xr12, xr10, xr11, ptn1);
  S32ALNI(xr13, xr10, xr11, ptn2);

  //store
  S32STD(xr6, dst, 0x0);
  S32SDIV(xr13, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr12, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr10, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
// luma4x4
// MODE 0
static void pred4x4_vertical_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
				 uint8_t *top, uint8_t *topleft){
  //load
  S32LDD(xr1, top, 0x0);
  //store
  S32STD(xr1, dst, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
static void pred4x4_128_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
			       uint8_t *top, uint8_t *topleft){
  // load
  S32LUI(xr1, 0x80, ptn7);
  // store
  S32STD(xr1, dst, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
//=---------- luma16x16 ----------
// MODE 0
static void pred16x16_vertical_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  uint8_t *src_top;  // top address
  unsigned int i;
  src_top = top;
  //load
  S32LDD(xr1, src_top, 0x0);
  S32LDD(xr2, src_top, 0x4);
  S32LDD(xr3, src_top, 0x8);
  S32LDD(xr4, src_top, 0xc);
  //store
  dst -= MB_LUMA_EDGED_WIDTH;
  for (i=0; i<16; i++) {
    S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH,0x0);
    S32STD(xr2, dst, 0x4);
    S32STD(xr3, dst, 0x8);
    S32STD(xr4, dst, 0xc);
  }

}
uint32_t
dequant_mpeg_intra_mxu(int16_t * data,
		       // const int16_t * coeff,
					 const uint32_t quant,
					 const uint32_t dcscalar,
					 const uint16_t * mpeg_quant_matrices)
{
	const uint16_t *intra_matrix = mpeg_quant_matrices;
	int32_t i = 0;
	/* deal with data[0] then save to xr6  */
	
	S32I2M(xr3,-2048);
	S32I2M(xr4,2047);
    	S32I2M(xr5,quant);

	S32MUL(xr0,xr6,(int32_t)data[0],dcscalar);
	S32LUI(xr9,1,0);
	D16MUL_WW(xr0,xr6,xr9,xr6);
	S32MIN(xr6,xr6,xr4);
	S32MAX(xr6,xr6,xr3);

       	data-=2;
	intra_matrix-=2;
	

	for (i = 0; i < 32; i++) {
	    S32LDI(xr1,data,4);
	    S32LDI(xr2,intra_matrix,4);

	    D16MUL_LW(xr13,xr9,xr1,xr14); // resave values of data[i] and data[i+1] 
	    D16CPS(xr1,xr1,xr1);         

	    /* abs(level) *( intra_matrix[i]*quant) >> 3   */
	    D16MUL_LW(xr7,xr5,xr2,xr8);
	    S32SFL(xr15,xr7,xr8,xr2,3);
	    D16MUL_WW(xr7,xr1,xr2,xr8);
	    D32SLR(xr7,xr7,xr8,xr8,3); 
	   
	    /* -2048 < data[i+1] < 2047  */
	    S32CPS(xr7,xr7,xr13);
	    S32MAX(xr10,xr7,xr3);
	    S32MIN(xr10,xr10,xr4);

            /* -2048 < data[i] < 2047  */
	    S32CPS(xr8,xr8,xr14);
	    S32MAX(xr11,xr8,xr3);
	    S32MIN(xr11,xr11,xr4);

            S32SFL(xr0,xr10,xr11,xr12,3);
	    S32STD(xr12,data,0);	    
        } 
 
	S16STD(xr6,data,-62*2,0);//xr6 to data[0]
	return(0);
}
static void pred8x8_128_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  int i;
  //load
  S32LUI(xr1, 0x80, ptn7);
  // store
  dst -= MB_CHROM_EDGED_WIDTH;
  for(i=0; i<8; i++){
    S32SDIV(xr1, dst, MB_CHROM_EDGED_WIDTH, 0x0);
    S32STD(xr1, dst, 0x4);
  }
}
//------------- luma8x8 --------------
// MODE 0
static void pred8x8_vertical_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  unsigned int i;
  //load
  S32LDD(xr1, top, 0x0);
  S32LDD(xr2, top, 0x4);
  //store
  dst -= MB_CHROM_EDGED_WIDTH;
  for(i=0; i<8; i++){
    S32SDIV(xr1, dst, MB_CHROM_EDGED_WIDTH, 0x0);
    S32STD(xr2, dst, 0x4);
  }
}
Exemple #17
0
static void MC_put_o_12_c (uint8_t *dest, const uint8_t *ref, const int stride, int height)
{
    uint32_t  ref_aln, ref_rs;
    ref_aln = ((uint32_t)ref - stride) & 0xfffffffc;
    ref_rs  = 4 - (((uint32_t)ref) & 3);
    dest -= stride;
    do {
        S32LDIV(xr1,ref_aln,stride,0x0);
	S32LDD(xr2,ref_aln,0x4);
	S32LDD(xr4,ref_aln,0x8);
	S32LDD(xr6,ref_aln,0xc);
	
	S32ALN(xr3,xr2,xr1,ref_rs);
	S32ALN(xr5,xr4,xr2,ref_rs);
	S32ALN(xr7,xr6,xr4,ref_rs);
	
	S32SDIV(xr3,dest,stride,0x0);
	S32STD(xr5,dest,0x4);
	S32STD(xr7,dest,0x8);
    } while (--height); 
}
static void pred4x4_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
			       uint8_t *top, uint8_t *topleft){
  //load
  S32LDD(xr1, top, 0x0); // xr1[31:24] <- src_top[0] ;
  //avg
  D8SUMC(xr2, xr0, xr1);
  D32SLR(xr3, xr2, xr0, xr0, 0x2);
  S32SFL(xr0, xr3, xr3, xr4, ptn0);
  S32SFL(xr0, xr4, xr4, xr7, ptn3);
  //store
  S32STD(xr7, dst, 0x0);
  S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
// MODE 1
static void pred4x4_horizontal_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
				   uint8_t *top, uint8_t *topleft){
  uint8_t *src_left;  // left address
  src_left = src - 0x1;
  S8LDD(xr1, src_left, 0x0, ptn7);
  src_left = src_left + MB_LUMA_EDGED_WIDTH;
  S8LDD(xr2, src_left, 0x0, ptn7);
  src_left = src_left + MB_LUMA_EDGED_WIDTH;
  S8LDD(xr3, src_left, 0x0, ptn7);
  src_left = src_left + MB_LUMA_EDGED_WIDTH;
  S8LDD(xr4, src_left, 0x0, ptn7);
  // store
  S32STD(xr1, dst, 0x0);
  S32SDIV(xr2, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr3, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr4, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
// MODE 8
static void pred4x4_horizontal_up_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
				      uint8_t *top, uint8_t *topleft){
  uint8_t *src_left; // left address
  src_left = src - 0x4;
  //load
  S32LDD(xr1, src_left, 0x0);          // xr1[31:24] <- src_left[3] (l0) ;
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ;
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ;
  S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ;
  S32SFL(xr5, xr2, xr1, xr0, ptn2);    // xr5[31:16] <- l1, l0 ;
  S32SFL(xr6, xr4, xr3, xr0, ptn2);    // xr6[31:16] <- l3, l2 ;
  S32SFL(xr1, xr6, xr5, xr0, ptn3);    // xr1[31: 0] <- l3, l2, l1, l0 ;

  D32SLL(xr2, xr1, xr0, xr0, 0x8);  // xr2: l2, l1, l0, 0 ;
  S32SFL(xr3, xr1, xr1, xr0, ptn0); // xr3: l3, l3, l2, l2;

  Q8AVGR(xr4, xr1, xr2); // xr4: src[2,1]/src[0,2], src[2,0]/src[0,1], src[0,0], ~ ;

  Q8AVG(xr5, xr2, xr3);
  Q8AVGR(xr6, xr5, xr1); // xr6: src[3,1]/src[1,2], src[3,0]/src[1,1], src[1,0], ~ ;

  S32SFL(xr7, xr6, xr4, xr0, ptn0); // xr7: src[3,1]/src[1,2], src[2,1]/src[0,2],
                                    //      src[3,0]/src[1,1], src[2,0]/src[0,1];

  D32SLR(xr8, xr4, xr6, xr9, 0x8); // xr8: 0, src[2,1]/src[0,2], src[2,0]/src[0,1], src[0,0] ;
                                   // xr9: 0, src[3,1]/src[1,2], src[3,0]/src[1,1], src[1,0] ;
  S32SFL(xr0, xr9, xr8, xr10, ptn0); // xr10: src[3,0], src[2,0], src[1,0], src[0,0] ;

  S32SFL(xr11, xr3, xr7, xr0, ptn3); // xr11: l3, l3, src[3,1]/src[1,2], src[2,1]/src[0,2] ;

  S32SFL(xr12, xr3, xr3, xr0, ptn3); // xr12: l3, l3, l3, l3 ;

  //store
  S32STD(xr10, dst, 0x0);
  S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr11, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr12, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
//---------- other DC modes ------------
static void pred4x4_left_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
				uint8_t *top, uint8_t *topleft){
  uint8_t *src_left; // left address
  src_left = src - 0x4;
  //load
  S32LDD(xr1, src_left, 0x0);          // xr1[31:24] <- src_left[3] (l0) ;
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ;
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ;
  S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ;
  S32SFL(xr5, xr2, xr1, xr0, ptn2);    // xr5[31:16] <- l1, l0 ;
  S32SFL(xr6, xr4, xr3, xr0, ptn2);    // xr6[31:16] <- l3, l2 ;
  S32SFL(xr7, xr6, xr5, xr0, ptn3);    // xr7[31: 0] <- l3, l2, l1, l0 ;
  //avg
  D8SUMC(xr2, xr0, xr7);
  D32SLR(xr8, xr2, xr0, xr0, 0x2);
  S32SFL(xr0, xr8, xr8, xr9, ptn0);
  S32SFL(xr0, xr9, xr9, xr1, ptn3);
  //store
  S32STD(xr1, dst, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
// MODE 7
static void pred4x4_vertical_left_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright,
				      uint8_t *top, uint8_t *topleft){
  //load
  S32LDD(xr1, top, 0x0);      // xr1 <- t3, t2, t1, t0 ;
  S32LDD(xr2, topright, 0x0); // xr2 <- t7, t6, t5, t4 ;

  S32ALNI(xr3, xr2, xr1, ptn3); // xr3: t4, t3, t2, t1 ;
  S32ALNI(xr4, xr2, xr1, ptn2); // xr4: t5, t4, t3, t2 ;
  Q8AVGR(xr11, xr1, xr3);
  Q8AVGR(xr5, xr4, xr3);

  Q8AVG(xr7, xr1, xr4);
  Q8AVGR(xr8, xr7, xr3);

  S32ALNI(xr6, xr2, xr1, ptn1); // xr6: t6, t5, t4, t3 ;
  Q8AVG(xr9, xr3, xr6);
  Q8AVGR(xr10, xr9, xr4);

  //store
  S32STD(xr11, dst, 0x0);
  S32SDIV(xr8, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr5, dst, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SDIV(xr10, dst, MB_LUMA_EDGED_WIDTH, 0x0);
}
Exemple #23
0
static void ff_vp3_idct_add_mxu(uint8_t *src, int stride, DCTELEM *input, uint8_t idct_row)
{
    int i;
    DCTELEM *blk;
    int32_t wf = (int32_t)whirl_idct;

    S32LDD(xr5, wf, 0x0);         // xr5(w7, w3)
    S32LDD(xr6, wf, 0x4);         // xr6(w9, w8)
    S32LDD(xr7, wf, 0x8);         // xr7(w11,w10)
    S32LDD(xr8, wf, 0xc);         // xr8(w13,w12)
    S32LDD(xr9, wf, 0x10);        // xr9(w6, w0)
    S32LDD(xr10,wf, 0x14);
    blk = input - 8;
    /* Inverse DCT on the rows now */
    for (i=0; i<idct_row; i++) {
        S32LDI(xr1, blk, 0x10);       //  xr1 (x4, x0)
	S32LDD(xr2, blk, 0x4);        //  xr2 (x7, x3)
	S32LDD(xr3, blk, 0x8);        //  xr3 (x6, x1)
	S32LDD(xr4, blk, 0xc);        //  xr4 (x5, x2)
	S32OR(xr12, xr2,xr3);
	S32OR(xr11,xr12,xr4);
	S32OR(xr12,xr11,xr1);
	if (S32M2I(xr12) == 0) {
	    continue;            //blk[0]= blk[1]=blk[2]=blk[3]=blk[4]=blk[5]=blk[6]=blk[7]=0
	}
	S32SFL(xr12,xr0,xr1,xr13,ptn3);
	S32OR(xr11,xr11,xr12);
	if (S32M2I(xr11) == 0 && S32M2I(xr13) != 0) {
	    D16MUL_HW(xr0,xr5,xr13,xr13);
	    D32SAR(xr0,xr0,xr13,xr13,15);
	    S32SFL(xr0,xr13,xr13,xr13,ptn3);
	    S32STD(xr13,blk, 0x0);
	    S32STD(xr13,blk, 0x4);
	    S32STD(xr13,blk, 0x8);
	    S32STD(xr13,blk, 0xc);
	    continue;            //blk[0]!=0, and blk[1]=blk[2]=blk[3]=blk[4]=blk[5]=blk[6]=blk[7]=0
	}

	S32SFL(xr1,xr1,xr2,xr2, ptn3);  //xr1:s1, s3, xr2: s0, s2
	S32SFL(xr3,xr3,xr4,xr4, ptn3);  //xr3:s5, s7, xr4: s4, s6

	D16MUL_WW(xr11, xr2, xr5, xr12);//xr11: s0*c4, xr12: s2*c2
	D16MAC_AA_WW(xr11,xr4,xr6,xr12);//xr11: s0*c4+s4*c4, xr12: s2*c2+s6*c6

	D16MUL_WW(xr13, xr2, xr6, xr14);//xr13: s0*c4, xr14: s2*c6
	D16MAC_SS_WW(xr13,xr4,xr5,xr14);//xr13: s0*c4 - s4*c4, xr14: s2*c6-s6*c2

	D16MUL_HW(xr2, xr1, xr7, xr4);  //xr2: s1*c1, xr4: s1*c3 
	D16MAC_AS_LW(xr2,xr1,xr9,xr4);  //xr2: s1*c1+s3*c3, xr4: s1*c3-s3*c7
	D16MAC_AS_HW(xr2,xr3,xr10,xr4); //xr2: s1*c1+s3*c3+s5*c5,
                                      // xr4: s1*c3-s3*c7-s5*c1
	D16MAC_AS_LW(xr2,xr3,xr8,xr4);  //xr2: s1*c1+s3*c3+s5*c5+s7*c7,
                                      //xr4: s1*c3-s3*c7-s5*c1-s7*c5
	D32SAR(xr11, xr11,xr13,xr13,15);
	S32SFL(xr0, xr11,xr13,xr11,ptn3);
	D32SAR(xr12,xr12,xr14,xr14,15);
	S32SFL(xr0, xr12,xr14,xr12,ptn3);
	D32SAR(xr2, xr2,xr4,xr4,15);
	S32SFL(xr0, xr2,xr4,xr2,ptn3);
      
	D16MUL_HW(xr4, xr1, xr8, xr15);     //xr4: s1*c7, xr15:s1*c5
	D16MAC_SS_LW(xr4,xr1,xr10,xr15);    //xr4: s1*c7-s3*c5, xr15: s1*c5-s3*c1
	D16MAC_AA_HW(xr4,xr3,xr9,xr15);     //xr4: s1*c7-s3*c5+s5*c3, xr15: s1*c5-s3*c1+s5*c7
	D16MAC_SA_LW(xr4,xr3,xr7,xr15);     //xr4: s1*c7-s3*c5+s5*c3-s7*c1
	                                    //xr15: s1*c5-s3*c1+s5*c7+s7*c3
	Q16ADD_AS_WW(xr11,xr11,xr12,xr12);  //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15
                                          //      rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15
                                          //xr12: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15
                                          //      rnd(s0*c4-s4*c4)>>15-rnd(s2*c6-s6*c2)>>15
	D32SAR(xr15,xr15,xr4,xr4,15);
	S32SFL(xr0,xr15,xr4,xr15,ptn3);
	Q16ADD_AS_WW(xr11, xr11, xr2, xr2);
              //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15
              //    : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15
              //xr2: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 - rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15
              //   : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 - rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15

	Q16ADD_AS_XW(xr12, xr12, xr15, xr15);
              //xr12: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15
              //    : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15
              //xr15: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15-rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15
              //    : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15-rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15

	S32SFL(xr11,xr11,xr12,xr12, ptn3);
              //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15
              //    : rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15
              //xr12: rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15
              //    : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15
	S32SFL(xr12,xr12,xr11,xr11, ptn3);

              //xr12: rnd(s0*c4-s4*c4)>>16+rnd(s2*c6-s6*c2)>>16 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>16
              //    : rnd(s0*c4+s4*c4)>>16+rnd(s2*c2+s6*c6)>>16 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16
              //xr11: rnd(s0*c4-s4*c4)>>16+rnd(s2*c6-s6*c2)>>16+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>16
              //    : rnd(s0*c4+s4*c4)>>16-rnd(s2*c2+s6*c6)>>16+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>16
	S32STD(xr12, blk, 0x0);
	S32STD(xr11, blk, 0x4);
	S32STD(xr15, blk, 0x8);
	S32STD(xr2, blk, 0xc);
    }
      
    blk = input - 2;
    for (i=0; i<4; i++)               /* idct columns */
    {
        S32I2M(xr5,wxr5);        //xr5: c4 , c2
	S32I2M(xr6,wxr6);        //xr5: c4 , c2
	S32LDI(xr1, blk, 0x4);   //xr1: ss0, s0
	S32LDD(xr3, blk, 0x20);  //xr3: ss2, s2
	S32LDD(xr11, blk, 0x40); //xr11: ss4, s4
	S32LDD(xr13, blk, 0x60); //xr13: ss6, s6

	D16MUL_HW(xr15, xr5, xr1, xr2);    //xr15: ss0*c4, xr9: s0*c4
	D16MAC_AA_HW(xr15,xr5,xr11,xr2);   //xr15: ss0*c4+ss4*c4, xr9: s0*c4+s4*c4
	D16MUL_LW(xr10, xr5, xr3, xr9);    //xr10: ss2*c2, xr9: s2*c2
	D16MAC_AA_LW(xr10,xr6,xr13,xr9);   //xr10: ss2*c2+ss6*c6, xr9: s2*c2+s6*c6
	D32SAR(xr15,xr15,xr2,xr2,15);      
	S32SFL(xr0,xr15,xr2,xr15,ptn3);    //xr15: (ss0*c4+ss4*c4)>>15
	D32SAR(xr10,xr10,xr9,xr9,15);      
	S32SFL(xr0,xr10,xr9,xr10,ptn3);    //xr10: (ss2*c2+ss6*c6)>>15

	S32LDD(xr2, blk, 0x10);            //xr2: ss1, s1
	S32LDD(xr4, blk, 0x30);            //xr4: ss3, s3
	Q16ADD_AS_WW(xr15,xr15,xr10,xr9);  //xr15: rnd(ss0*c4+ss4*c4)>>15+rnd(ss2*c2+ss6*c6)>>15
                                         //    :rnd(s0*c4+s4*c4)>>15 + rnd(s2*c2 + s6*c6)>>15
                                         //xr9: rnd(ss0*c4+ss4*c4)>>15 - rnd(ss2*c2+ss6*c6)>>15
                                         //   : rnd(s0*c4+s4*c4)>>15 - rnd(s2*c2 + s6*c6)>>15
	D16MUL_HW(xr10, xr5, xr1, xr1);    //xr10: ss0*c4, xr1: s0*c4
	D16MAC_SS_HW(xr10,xr5,xr11,xr1);   //xr10: ss0*c4-ss4*c4, xr1: s0*c4 - s4*c4
	D16MUL_LW(xr11, xr6, xr3, xr12);    //xr11: ss2*c6, xr1: s2*c6
	D16MAC_SS_LW(xr11,xr5,xr13,xr12);   //xr11: ss2*c6-ss6*c2, xr1: s2*c6-s6*c2
	D32SAR(xr10,xr10,xr1,xr1,15);
	S32SFL(xr0,xr10,xr1,xr10,ptn3);    //xr10: (ss0*c4-ss4*c4)>>15 //    : (s0*c4 - s4*c4)>>15
	D32SAR(xr11,xr11,xr12,xr12,15);      
	S32SFL(xr0,xr11,xr12,xr11,ptn3);    //xr11:(ss2*c6-ss6*c2)>>15
                                         //    :(s2*c6-s6*c2)>>15

	S32LDD(xr12, blk, 0x50);           //xr12: ss5, s5
	S32LDD(xr14, blk, 0x70);           //xr14: ss7, s7
	Q16ADD_AS_WW(xr10,xr10,xr11,xr1);  //xr10: rnd(ss0*c4-ss4*c4)>>15)+rnd(ss2*c6-ss6*c2)>>15
                                         //    : rnd(s0*c4 - s4*c4)>>15 +rnd(s2*c6 - s6*c2)>>15
                                         //xr1 : rnd(ss0*c4-ss4*c4)>>15-rnd(ss2*c6-ss6*c2)>>15
                                         //    : rnd(s0*c4 - s4*c4)>>15-rnd(s2*c6 - s6*c2)>>15

	D16MUL_HW(xr11, xr7, xr2, xr13);   //xr11: ss1*c1, xr13: s1*c1
	D16MAC_AA_LW(xr11,xr7,xr4,xr13);   //xr11: ss1*c1+ss3*c3, xr13: s1*c1+s3*c3
	D16MAC_AA_LW(xr11,xr8,xr12,xr13);  //xr11: ss1*c1+ss3*c3+ss5*c5 //xr13: s1*c1+s3*c3+s5*c5
	D16MAC_AA_HW(xr11,xr8,xr14,xr13);  //xr11: ss1*c1+ss3*c3+ss5*c5+ss7*c7
                                         //xr13: s1*c1+s3*c3+s5*c5+s7*c7
	D16MUL_LW(xr3, xr7, xr2, xr5);    //xr3: ss1*c3, xr13: s1*c3
	D16MAC_SS_HW(xr3,xr8,xr4,xr5);    //xr3: ss1*c3-ss3*c7, xr13: s1*c3-s3*c7
	D16MAC_SS_HW(xr3,xr7,xr12,xr5);   //xr3: ss1*c3-ss3*c7-ss5*c1
                                         //xr13: s1*c3-s3*c7-s5*c1
	D16MAC_SS_LW(xr3,xr8,xr14,xr5);   //xr3: ss1*c3-ss3*c7-ss5*c1-ss7*c5
                                         //xr13: s1*c3-s3*c7-s7*c5
	D32SAR(xr11,xr11,xr13,xr13,15); 
	S32SFL(xr0,xr11,xr13,xr11,ptn3);   //xr11: (ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>15 //    : (s1*c1+s3*c3+s5*c5+s7*c7)>>15
	D32SAR(xr3,xr3,xr5,xr5,15);
	S32SFL(xr0,xr3,xr5,xr3,ptn3);     //xr3: (ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>15
                                         //   : (s1*c3-s3*c7-s7*c5)>>15
	D16MUL_LW(xr5, xr8, xr2, xr13);    //xr5: ss1*c5, xr13:s1*c5
	D16MAC_SS_HW(xr5,xr7,xr4,xr13);    //xr5: ss1*c5-ss3*c1, xr13:s1*c5-s3*c1
	D16MAC_AA_HW(xr5,xr8,xr12,xr13);   //xr5: ss1*c5-ss3*c1+ss5*c7
                                         //   : s1*c5 - s3*c1+ s5*c7
	D16MAC_AA_LW(xr5,xr7,xr14,xr13);   //xr5: ss1*c5-ss3*c1+ss5*c7+ss7*c1
                                         //   : s1*c5 - s3*c1+ s5*c7+ s7*c1
	D16MUL_HW(xr2, xr8, xr2, xr6);    //xr2: ss1*c7, xr13: s1*c7
	D16MAC_SS_LW(xr2,xr8,xr4,xr6);    //xr2: ss1*c7-ss3*c5, xr13: s1*c7-s3*c5
	D16MAC_AA_LW(xr2,xr7,xr12,xr6);   //xr2: ss1*c7-ss3*c5+ss5*c1 //xr13: s1*c7-s3*c5+s5*c1
	D16MAC_SS_HW(xr2,xr7,xr14,xr6);   //xr2: ss1*c7-ss3*c5+ss5*c1-ss7*c3
                                         //xr13: s1*c7-s3*c5+s5*c1-s7*c3
	D32SAR(xr5,xr5,xr13,xr13,15);
	S32SFL(xr0,xr5,xr13,xr5,ptn3);     //xr5: (ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>15 //  :(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>15
	D32SAR(xr2,xr2,xr6,xr6,15);
	S32SFL(xr0,xr2,xr6,xr2,ptn3);     //xr2:(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>15
                                         //   :(s1*c7-s3*c5+s5*c1-s7*c3)>>15

	S32I2M(xr4, 0x00080008);//round value 8;
	Q16ADD_AS_WW(xr15,xr15,xr11,xr11); //xr15:rnd(ss0*c4+ss4*c4)>>16+rnd(ss2*c2+ss6*c6)>>16+
                                         //     rnd(ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>16
                                         //     rnd(s0*c4+s4*c4)>>16 + rnd(s2*c2 + s6*c6)>>16+
                                         //     rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16

                                         //xr11:rnd(ss0*c4+ss4*c4)>>16+rnd(ss2*c2+ss6*c6)>>16-
                                         //     rnd(ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>16
                                         //     rnd(s0*c4+s4*c4)>>16 + rnd(s2*c2 + s6*c6)>>16-
                                         //     rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16
	Q16ADD_AS_WW(xr10,xr10,xr3,xr3);   //xr10:rnd(ss0*c4-ss4*c4)>>16)+rnd(ss2*c6-ss6*c2)>>16+
                                         //     rnd(ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>16
                                         //     rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16+
                                         //     rnd(s1*c3-s3*c7-s7*c5)>>16
                                         //xr10:rnd(ss0*c4-ss4*c4)>>16)+rnd(ss2*c6-ss6*c2)>>16-
                                         //     rnd(ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>16
                                         //     rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16-
                                         //     rnd(s1*c3-s3*c7-s7*c5)>>16
	Q16ADD_AS_WW(xr1,xr1,xr5,xr5);     //xr1: rnd(ss0*c4-ss4*c4)>>16-rnd(ss2*c6-ss6*c2)>>16+
                                         //     rnd(ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>16
                                         //     rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16+
                                         //     rnd(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>16
                                         //xr1: rnd(ss0*c4-ss4*c4)>>16-rnd(ss2*c6-ss6*c2)>>16-
                                         //     rnd(ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>16
                                         //     rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16-
                                         //     rnd(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>16
	Q16ADD_AS_WW(xr9,xr9,xr2,xr2);     //xr9: rnd(ss0*c4+ss4*c4)>>16 - rnd(ss2*c2+ss6*c6)>>16+
                                         //     rnd(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>16
                                         //     rnd(s0*c4+s4*c4)>>16 - rnd(s2*c2 + s6*c6)>>16+
                                         //     rnd(s1*c7-s3*c5+s5*c1-s7*c3)>>16
                                         //xr9: rnd(ss0*c4+ss4*c4)>>16 - rnd(ss2*c2+ss6*c6)>>16-
                                         //     rnd(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>16
                                         //     rnd(s0*c4+s4*c4)>>16 - rnd(s2*c2 + s6*c6)>>16-
                                         //     rnd(s1*c7-s3*c5+s5*c1-s7*c3)>>16

	Q16ACCM_AA(xr15,xr4,xr4,xr10);
	Q16ACCM_AA(xr11,xr4,xr4,xr1);
	Q16ACCM_AA(xr9,xr4,xr4,xr2);
	Q16ACCM_AA(xr5,xr4,xr4,xr3);
	Q16SAR(xr15,xr15,xr10,xr10,4);
	Q16SAR(xr11,xr11,xr1,xr1,4);
	Q16SAR(xr9,xr9,xr2,xr2,4);
	Q16SAR(xr5,xr5,xr3,xr3,4);
	
	S32STD(xr15, blk, 0x00);
	S32STD(xr10, blk, 0x10);
	S32STD(xr1, blk, 0x20);
	S32STD(xr9, blk, 0x30);
	S32STD(xr2, blk, 0x40);
	S32STD(xr5, blk, 0x50);
	S32STD(xr3, blk, 0x60);
	S32STD(xr11, blk, 0x70);
    }

    blk = input - 8;
    src -= stride;
    for (i=0; i<8; i++) {
        S32LDIV(xr1, src, stride, 0x0);
	S32LDI(xr3, blk, 0x10);
	S32LDD(xr4, blk, 0x4);
	Q8ACCE_AA(xr4, xr1, xr0, xr3);
	S32LDD(xr2, src, 0x4);
	S32LDD(xr5, blk, 0x8);
	S32LDD(xr6, blk, 0xc);
	Q8ACCE_AA(xr6, xr2, xr0, xr5);
	Q16SAT(xr1, xr4, xr3);
	S32STD(xr1, src, 0x0);
	Q16SAT(xr2, xr6, xr5);
	S32STD(xr2, src, 0x4);
    }
}
Exemple #24
0
void fft_calc_fix_inverse(FFTContext_fix *s, FFTComplex_fix *z)
{
    int ln = s->nbits;
    int j, np, np2;
    int nblocks, nloops;
    register FFTComplex_fix *p, *q;
    FFTComplex_fix *exptab = s->exptab;
    int l;
    FFTSample_fix tmp_re, tmp_im;
    np = 1 << ln;
      /* function is :butterfly  all 4 step ,N=16 */
    /* pass 0 */
#if 0
    p=&z[0];
    j=(np >> 1);
    do {
      /*
	 X(k) = G(k)+H(k)*W  (= e j*0)
      */
        FFT_BF_fix(p[0].re, p[0].im, p[1].re, p[1].im,
           p[0].re, p[0].im, p[1].re, p[1].im);

        p+=2;
    } while (--j);
#endif

    /* pass 1 */
    p=&z[0];
    j=np >> 2;
    do {
#if 1
      S32LDD(xr1,p,0);
      S32LDD(xr2,p,4);
      S32LDD(xr3,p,8);
      S32LDD(xr4,p,12);
      S32LDD(xr5,p,16);
      S32LDD(xr6,p,20);
      S32LDD(xr7,p,24);
      S32LDD(xr8,p,28);
      D32ADD_AS(xr1,xr1,xr3,xr3);
      D32ADD_AS(xr2,xr2,xr4,xr4);
      D32ADD_AS(xr5,xr5,xr7,xr7);
      D32ADD_AS(xr6,xr6,xr8,xr8);
      D32ADD_AS(xr1,xr1,xr5,xr5);
      D32ADD_AS(xr2,xr2,xr6,xr6);
      D32ADD_SA(xr3,xr3,xr8,xr9);
      D32ADD_AS(xr4,xr4,xr7,xr8);
      S32STD(xr1,p,0);
      S32STD(xr2,p,4);
      S32STD(xr3,p,8);
      S32STD(xr4,p,12);
      S32STD(xr5,p,16);
      S32STD(xr6,p,20);
      S32STD(xr9,p,24);
      S32STD(xr8,p,28);
#else
      FFT_BF_fix(p[0].re, p[0].im, p[1].re, p[1].im,
		 p[0].re, p[0].im, p[1].re, p[1].im);
      FFT_BF_fix(p[2].re, p[2].im, p[3].re, p[3].im,
		 p[2].re, p[2].im, p[3].re, p[3].im);

      FFT_BF_fix(p[0].re, p[0].im, p[2].re, p[2].im,
		 p[0].re, p[0].im, p[2].re, p[2].im);
      FFT_BF_fix(p[1].re, p[1].im, p[3].re, p[3].im,
		 p[1].re, p[1].im, -p[3].im, p[3].re);
#endif
      p+=4;
    } while (--j);

    /* pass 2 .. ln-1 */
    nblocks = np >> 3;
    nloops = 1 << 2;
    np2 = np >> 1;
    do {
        p = z;
        q = z + nloops;
        for (j = 0; j < nblocks; ++j) {
#if 1
	  S32LDD(xr1,p,0);
	  S32LDD(xr2,p,4);
	  S32LDD(xr3,q,0);
	  S32LDD(xr4,q,4);
	  D32ADD_AS(xr1,xr1,xr3,xr3);
	  D32ADD_AS(xr2,xr2,xr4,xr4);
	  S32STD(xr1,p,0);
	  S32STD(xr2,p,4);
	  S32STD(xr3,q,0);
	  S32STD(xr4,q,4);
#else
	  FFT_BF_fix(p->re, p->im, q->re, q->im,
		     p->re, p->im, q->re, q->im);
#endif

	  p++;
	  q++;
	  for(l = nblocks; l < np2; l += nblocks) {
	    /* FFT_CMUL_fix( ) fuction is :
	       
	    (-j 2*PI/N *km)
	    H(i) * E
	    */
#if 1
	    FFTSample_fix _are = exptab[l].re;
	    FFTSample_fix _bre = q->re;
	    FFTSample_fix _aim = exptab[l].im;
	    FFTSample_fix _bim = q->im;

	    S32MUL(xr1, xr2, _are, _bre);	    
            S32MUL(xr5, xr6, _are, _bim);
	    S32LDD(xr7,p,0);	    
            S32MSUB(xr1, xr2, _aim, _bim);	
	    S32MADD(xr5, xr6, _aim, _bre);	
	    S32LDD(xr8,p,4);
	    D32SLL(xr1, xr1, xr5, xr5, 1);	

	    D32ADD_AS(xr7,xr7,xr1,xr1);
	    D32ADD_AS(xr8,xr8,xr5,xr5);
	    S32STD(xr7,p,0);
	    S32STD(xr8,p,4);
	    S32STD(xr1,q,0);
	    S32STD(xr5,q,4);

#else
	    FFT_CMUL_fix(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im);
	    FFT_BF_fix(p->re, p->im, q->re, q->im,
		       p->re, p->im, tmp_re, tmp_im);
#endif
	    p++;
	    q++;
	  }
	  p += nloops;
	  q += nloops;
        }
        nblocks = nblocks >> 1;
        nloops = nloops << 1;
    } while (nblocks);
}
uint32_t
dequant_h263_intra_mxu(int16_t * data,	uint8_t yuv_len,				
					 const uint32_t quant,
					 const uint32_t dcscalar,
					 const uint16_t * mpeg_quant_matrices)
{     
 	uint32_t i = 0; 
	
	S32LUI(xr9,1,0);
	S32I2M(xr1,quant);
	
	D32SLL(xr5,xr1,xr0,xr0,1);// quant_m_2

	/* quant_add  */
	S32AND(xr15,xr1,xr9);
	S32MOVN(xr2,xr15,xr1);
	D32ADD_SS(xr1,xr1,xr9,xr3);
	S32MOVZ(xr2,xr15,xr1);

	S32I2M(xr3,-2048);
	S32I2M(xr4,2047);

	/* part1 */
	//S32MUL(xr4,xr6,*data,dcscalar);
	S32MUL(xr0,xr6,(int32_t)data[0],dcscalar);
	D16MUL_WW(xr0,xr6,xr9,xr6);

	S32MIN(xr6,xr6,xr4);
	S32MAX(xr6,xr6,xr3);  

	/* part2 */
	yuv_len = ((yuv_len&~1)+3)>>1;
	data-=2;    
	for (i = 0; i < yuv_len; i++) {
	    S32LDI(xr1,data,4);
      
	    D16MUL_LW(xr13,xr9,xr1,xr14);// resave sign of data[i] and data[i+1] 	    
	    D16CPS(xr1,xr1,xr1); 

	    /*  quant_m_2 * acLevel + quant_add */
	    D16MUL_LW(xr7,xr5,xr1,xr8);
            D32ADD_AA(xr7,xr7,xr2,xr0);
	    D32ADD_AA(xr8,xr8,xr2,xr0);

#if 0	   
	    /* -2048 < data[i+1] <2047  */
            S32CPS(xr7,xr7,xr13);
	    S32MAX(xr10,xr7,xr3);
	    S32MIN(xr10,xr10,xr4);
	    S32MOVZ(xr10,xr13,xr13);
	    
	    /* -2048 < data[i] <2047  */
	    S32CPS(xr8,xr8,xr14);
	    S32MAX(xr11,xr8,xr3);
	    S32MIN(xr11,xr11,xr4);
	    S32MOVZ(xr11,xr14,xr14);
#else

	    /* -2048 < data[i+1] <2047  */
	    S32AND(xr7,xr7,xr4);
            S32CPS(xr10,xr7,xr13);
	    S32MOVZ(xr10,xr13,xr13);
	    
	    /* -2048 < data[i] <2047  */
	    S32AND(xr8,xr8,xr4);
	    S32CPS(xr11,xr8,xr14);
	    S32MOVZ(xr11,xr14,xr14);

#endif
	   
            S32SFL(xr0,xr10,xr11,xr12,3);
	   
	       S32STD(xr12,data,0);
        }  
	S16STD(xr6,data-(yuv_len*2-2),0,0);// data[0]

	return(0);
}
void
add_acdc(MACROBLOCK * pMB,
		 uint32_t block,
		 int16_t dct_codes[64],
		 uint32_t iDcScaler,
		 int16_t predictors[8],
		 const int bsversion)
{
	uint8_t acpred_direction = pMB->acpred_directions[block];
	int16_t *pCurrent = (int16_t*)pMB->pred_values[block];
	uint32_t i;

	DPRINTF(XVID_DEBUG_COEFF,"predictor[0] %i\n", predictors[0]);

	dct_codes[0] += predictors[0];	/* dc prediction */
#if 0
       	pCurrent[0] = dct_codes[0]*iDcScaler;
	if (!bsversion || bsversion > BS_VERSION_BUGGY_DC_CLIPPING) {
		pCurrent[0] = CLIP(pCurrent[0], -2048, 2047);
	}
#endif

	if (acpred_direction == 1) {
		for (i = 1; i < 8; i++) {
			int level = dct_codes[i] + predictors[i];

			DPRINTF(XVID_DEBUG_COEFF,"predictor[%i] %i\n",i, predictors[i]);

			dct_codes[i] = level;
			//	pCurrent[i] = level;
			//	pCurrent[i + 7] = dct_codes[i * 8];
		}
	} else if (acpred_direction == 2) {
		for (i = 1; i < 8; i++) {
			int level = dct_codes[i * 8] + predictors[i];
			DPRINTF(XVID_DEBUG_COEFF,"predictor[%i] %i\n",i*8, predictors[i]);

			dct_codes[i * 8] = level;
			//	pCurrent[i + 7] = level;
			//	pCurrent[i] = dct_codes[i];
		}
	} //else {
	  //	for (i = 1; i < 8; i++) {
		  //	pCurrent[i] = dct_codes[i];
	  //		pCurrent[i + 7] = dct_codes[i * 8];
	  //		}
	  //	}
	{
	  S32LDD(xr1,dct_codes,0);
	  S32LDD(xr2,dct_codes,4);
	  S32LDD(xr3,dct_codes,8);
	  S32LDD(xr4,dct_codes,12);


	  S16LDD(xr5,dct_codes,16,0);
	  S16LDD(xr6,dct_codes,48,0);
	  S16LDD(xr7,dct_codes,80,0);
	  S16LDD(xr8,dct_codes,112,0);
	  
	 
	  S16LDD(xr5,dct_codes,32,1);
	  S16LDD(xr6,dct_codes,64,1);
	  S16LDD(xr7,dct_codes,96,1);
	  
	  S32STD(xr1,pCurrent,0);
	  S32STD(xr2,pCurrent,4);
	  S32STD(xr3,pCurrent,8);
	  S32STD(xr4,pCurrent,12);
	  S32STD(xr5,pCurrent,16);
	  S32STD(xr6,pCurrent,20);
	  S32STD(xr7,pCurrent,24);
	  S32STD(xr8,pCurrent,28);
	 
	}
	
#if 1
	pCurrent[0] = dct_codes[0]*iDcScaler;
	if (!bsversion || bsversion > BS_VERSION_BUGGY_DC_CLIPPING) {
	  pCurrent[0] = CLIP(pCurrent[0], -2048, 2047);
	}
#endif
	
}
Exemple #27
0
static void rv40_dequant4x4(DCTELEM *block,uint32_t *dst, int n)
{
  int i;
  uint32_t src=block-4;
  uint32_t dst_t = dst-4;
#if 0
  for(i = 0; i < n; i++){   
    S32LDI(xr1,src,0x8);
    S32LDD(xr2,src,0x4);
    S32LDI(xr7,src,0x8);
    S32LDD(xr8,src,0x4);

    D16MUL_LW(xr4,xr12,xr1,xr3);
    D16MUL_LW(xr10,xr12,xr7,xr9);
    D16MUL_LW(xr14,xr12,xr8,xr15);
    D32ASUM_AA(xr3,xr13,xr13,xr4);
    D16MUL_LW(xr6,xr12,xr2,xr5);
    D32SLR(xr3,xr3,xr4,xr4,4);
    D32ASUM_AA(xr5,xr13,xr13,xr6);
    D32ASUM_AA(xr9,xr13,xr13,xr10);
    D32SLR(xr5,xr5,xr6,xr6,4);
    D32SLR(xr9,xr9,xr10,xr10,4);
    D32ASUM_AA(xr15,xr13,xr13,xr14);

    S32SDI(xr3,dst_t,0x10);
    S32STD(xr4,dst_t,0x4);
    S32STD(xr5,dst_t,0x8);
    S32STD(xr6,dst_t,0xc);

    D32SLR(xr15,xr15,xr14,xr14,4);
    S32SDI(xr9,dst_t,0x10);
    S32STD(xr10,dst_t,0x4);
    S32STD(xr15,dst_t,0x8);
    S32STD(xr14,dst_t,0xc);      
  }
#else
  /////////////////////     
  if(n == 1){
    S32LDI(xr1,src,0x8);
    S32LDI(xr2,src,0x8);
    S32LDI(xr7,src,0x8);
    S32LDI(xr8,src,0x8);
	  
    D16MUL_XW(xr4,xr12,xr1,xr3);
    D16MUL_LW(xr10,xr12,xr7,xr9);
    D16MUL_LW(xr14,xr12,xr8,xr15);
    D32ASUM_AA(xr3,xr13,xr13,xr4);
    D16MUL_LW(xr6,xr12,xr2,xr5);
    D32SLR(xr3,xr3,xr4,xr4,4);
    D32ASUM_AA(xr5,xr13,xr13,xr6);
    D32ASUM_AA(xr9,xr13,xr13,xr10);
    D32SLR(xr5,xr5,xr6,xr6,4);
    D32SLR(xr9,xr9,xr10,xr10,4);
    D32ASUM_AA(xr15,xr13,xr13,xr14);

    S32SDI(xr3,dst_t,0x10);
    S32STD(xr4,dst_t,0x4);
    S32STD(xr0,dst_t,0x8);
    S32STD(xr0,dst_t,0xc);

    S32SDI(xr5,dst_t,0x10);
    S32STD(xr6,dst_t,0x4);
    S32STD(xr0,dst_t,0x8);
    S32STD(xr0,dst_t,0xc);

    D32SLR(xr15,xr15,xr14,xr14,4);
    S32SDI(xr9,dst_t,0x10);
    S32STD(xr10,dst_t,0x4);
    S32STD(xr0,dst_t,0x8);
    S32STD(xr0,dst_t,0xc);
    //S32STD(xr15,dst_t,0x8);
    //S32STD(xr14,dst_t,0xc);
    S32SDI(xr15,dst_t,0x10);
    S32STD(xr14,dst_t,0x4);
    S32STD(xr0,dst_t,0x8);
    S32STD(xr0,dst_t,0xc);
  }
  else if(n==2)
    {
      S32LDI(xr1,src,0x8);
      S32LDD(xr2,src,0x4);
      S32LDI(xr7,src,0x8);
      S32LDD(xr8,src,0x4);
	  
      D16MUL_XW(xr4,xr12,xr1,xr3);
      D16MUL_LW(xr10,xr12,xr7,xr9);
      D16MUL_LW(xr14,xr12,xr8,xr15);
      D32ASUM_AA(xr3,xr13,xr13,xr4);
      D16MUL_LW(xr6,xr12,xr2,xr5);
      D32SLR(xr3,xr3,xr4,xr4,4);
      D32ASUM_AA(xr5,xr13,xr13,xr6);
      D32ASUM_AA(xr9,xr13,xr13,xr10);
      D32SLR(xr5,xr5,xr6,xr6,4);
      D32SLR(xr9,xr9,xr10,xr10,4);
      D32ASUM_AA(xr15,xr13,xr13,xr14);

      S32SDI(xr3,dst_t,0x10);
      S32STD(xr4,dst_t,0x4);
      S32STD(xr5,dst_t,0x8);
      S32STD(xr6,dst_t,0xc);

      D32SLR(xr15,xr15,xr14,xr14,4);
      S32SDI(xr9,dst_t,0x10);
      S32STD(xr10,dst_t,0x4);
      S32STD(xr15,dst_t,0x8);
      S32STD(xr14,dst_t,0xc);

      S32SDI(xr0,dst_t,0x10);
      S32STD(xr0,dst_t,0x4);
      S32STD(xr0,dst_t,0x8);
      S32STD(xr0,dst_t,0xc);
	  
      S32SDI(xr0,dst_t,0x10);
      S32STD(xr0,dst_t,0x4);
      S32STD(xr0,dst_t,0x8);
      S32STD(xr0,dst_t,0xc);	  
    }

  else
    {
      S32LDI(xr1,src,0x8);
      S32LDD(xr2,src,0x4);
      S32LDI(xr7,src,0x8);
      S32LDD(xr8,src,0x4);

      D16MUL_XW(xr4,xr12,xr1,xr3);
      D16MUL_LW(xr10,xr12,xr7,xr9);
      D16MUL_LW(xr14,xr12,xr8,xr15);
      D32ASUM_AA(xr3,xr13,xr13,xr4);
      D16MUL_LW(xr6,xr12,xr2,xr5);
      D32SLR(xr3,xr3,xr4,xr4,4);
      D32ASUM_AA(xr5,xr13,xr13,xr6);
      D32ASUM_AA(xr9,xr13,xr13,xr10);
      D32SLR(xr5,xr5,xr6,xr6,4);
      D32SLR(xr9,xr9,xr10,xr10,4);
      D32ASUM_AA(xr15,xr13,xr13,xr14);

      S32SDI(xr3,dst_t,0x10);
      S32STD(xr4,dst_t,0x4);
      S32STD(xr5,dst_t,0x8);
      S32STD(xr6,dst_t,0xc);

      D32SLR(xr15,xr15,xr14,xr14,4);
      S32SDI(xr9,dst_t,0x10);
      S32STD(xr10,dst_t,0x4);
      S32STD(xr15,dst_t,0x8);
      S32STD(xr14,dst_t,0xc);

      S32LDI(xr1,src,0x8);
      S32LDD(xr2,src,0x4);
      S32LDI(xr7,src,0x8);
      S32LDD(xr8,src,0x4);

      D16MUL_LW(xr4,xr12,xr1,xr3);
      D16MUL_LW(xr10,xr12,xr7,xr9);
      D16MUL_LW(xr14,xr12,xr8,xr15);
      D32ASUM_AA(xr3,xr13,xr13,xr4);
      D16MUL_LW(xr6,xr12,xr2,xr5);
      D32SLR(xr3,xr3,xr4,xr4,4);
      D32ASUM_AA(xr5,xr13,xr13,xr6);
      D32ASUM_AA(xr9,xr13,xr13,xr10);
      D32SLR(xr5,xr5,xr6,xr6,4);
      D32SLR(xr9,xr9,xr10,xr10,4);
      D32ASUM_AA(xr15,xr13,xr13,xr14);

      S32SDI(xr3,dst_t,0x10);
      S32STD(xr4,dst_t,0x4);
      S32STD(xr5,dst_t,0x8);
      S32STD(xr6,dst_t,0xc);

      D32SLR(xr15,xr15,xr14,xr14,4);
      S32SDI(xr9,dst_t,0x10);
      S32STD(xr10,dst_t,0x4);
      S32STD(xr15,dst_t,0x8);
      S32STD(xr14,dst_t,0xc);      
    }
#endif

}