Esempio n. 1
0
// MODE 3
static void pred16x16_plane_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  int i, j, k, a;
  uint8_t *src_top;  // top address
  uint8_t *src_topleft, *src_left;  // left address
  src_top = top;
  src_topleft = src_top - 0x14;
  src_left = src - 0x4;

  //----- H, LOAD -----
  S32LDD(xr1, src_top, -0x14);  // xr1 <- src_top[-4];  xr1: lt, 0, 0, 0 ;
  S32LDD(xr5, src_top, 0x0);   // xr5 <- src_top[0] ;  xr5: t3, t2, t1, t0 ;
  S32LDD(xr2, src_top, 0x4);   // xr2 <- src_top[4] ;  xr2: t7, t6, t5, t4 ;
  S32LDDR(xr3, src_top, 0x8);  // xr3 <- src_top[8] ;  xr3: t8, t9, t10, t11 ;
  S32LDDR(xr4, src_top, 0xc);  // xr4 <- src_top[12];  xr4: t12, t13, t14, t15 ;
  S32ALNI(xr1, xr5, xr1, ptn1);  //                    xr1: t2, t1, t0, lt ;
  S32ALNI(xr2, xr2, xr5, ptn1);  //                    xr2: t6, t5, t4, t3 ;   ---xr5 is free to use ;
  S32I2M(xr9, MUL_12);  // xr9 : 0x00010002 ;
  S32I2M(xr10, MUL_34); // xr10: 0x00030004 ;

  //----- H, SUM -----
  Q8ADDE_SS(xr5, xr3, xr2, xr6);  // xr5[31:16] <- t8-t6 ;  xr5[15:0] <- t9-t5 ;
                                  // xr6[31:16] <- t10-t4;  xr6[15:0] <- t11-t3;

  S32I2M(xr11, MUL_56); // xr11: 0x00050006 ;

  D16MUL_WW(xr13, xr9, xr5, xr14);     // xr13 <- 1*(t8-t6) ;  xr14 <- 2*(t9-t5) ;
  D16MAC_AA_WW(xr13, xr10, xr6, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4) ; xr14 <- 2*(t9-t5)+4*(t11-t3) ;
  Q8ADDE_SS(xr5, xr4, xr1, xr6);  // xr5[31:16] <- t12-t2;  xr5[15:0] <- t13-t1;
                                  // xr6[31:16] <- t14-t0;  xr6[15:0] <- t15-lt;

  S32I2M(xr12, MUL_78); // xr12: 0x00070008 ;

  D16MAC_AA_WW(xr13, xr11, xr5, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2) ;
                                       // xr14 <- 2*(t9-t5)+4*(t11-t3)+6*(t13-t1) ;
  D16MAC_AA_WW(xr13, xr12, xr6, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2)+7*(t14-t0) ;
                                       // xr14 <- 2*(t9-t5)+4*(t11-t3)+6*(t13-t1)+8*(t15-lt) ;
  S32LDD(xr1, src_topleft, 0x0);          // xr1[31:24] <- src_topleft[3] (lt) ;
  S32LDD(xr2, src_left, 0x0); // xr2[31:24] <- src_topleft[stride+3] (l0) ;
  D32ADD_AA(xr15, xr13, xr14, xr0); // xr15 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2)+7*(t14-t0)
                                    //       + 2*(t9-t5)+4*(t11-t3)+6*(t13-t1)+8*(t15-lt) ;
  //----- V, LOAD -----
  //  S32LDD(xr1, src_topleft, 0x0);          // xr1[31:24] <- src_topleft[3] (lt) ;
  //  S32LDIV(xr2, src_topleft, stride, 0x0); // xr2[31:24] <- src_topleft[stride+3] (l0) ;
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ;
  S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_topleft[3*stride+3] (l2) ;
  S32SFL(xr5, xr2, xr1, xr0, ptn2);       // xr5[31:16] <- l0, lt ;
  S32SFL(xr6, xr8, xr3, xr0, ptn2);       // xr8[31:16] <- l2, l1 ;
  S32SFL(xr7, xr6, xr5, xr0, ptn3);       // xr7[31: 0] <- l2, l1, l0, lt ;

  S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SFL(xr5, xr2, xr1, xr0, ptn2);
  S32SFL(xr6, xr8, xr3, xr0, ptn2);
  S32SFL(xr13, xr6, xr5, xr0, ptn3); // xr13[31:0] <- l6, l5, l4, l3 ;

  src_left += MB_LUMA_EDGED_WIDTH;

  S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SFL(xr6, xr8, xr3, xr0, ptn2);
  S32SFL(xr5, xr2, xr1, xr0, ptn2);
  S32SFL(xr14, xr6, xr5, xr0, ptn3); // xr14[31:0] <- l8, l9, l10, l11 ;

  S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0);
  S32SFL(xr6, xr8, xr3, xr0, ptn2);
  S32SFL(xr5, xr2, xr1, xr0, ptn2);
  S32SFL(xr1, xr6, xr5, xr0, ptn3); // xr1[31: 0] <- l12, l13, l14, l15 ;

  //----- V, SUM -----
  Q8ADDE_SS(xr5, xr14, xr13, xr6);
  Q8ADDE_SS(xr2, xr1, xr7, xr3);

  D16MUL_WW(xr13, xr9, xr5, xr14);
  D16MAC_AA_WW(xr13, xr10, xr6, xr14);

  D16MAC_AA_WW(xr13, xr11, xr2, xr14);
  D16MAC_AA_WW(xr13, xr12, xr3, xr14);

  D32SLR(xr2, xr11, xr12, xr3, 0x8); // xr2: 0x00000500 ;  xr3: 0x00000700 ;
  D32SLR(xr11, xr2, xr3, xr12, 0x8); //xr11: 0x00000005 ; xr12: 0x00000007 ;

  D32ADD_AA(xr14, xr13, xr14, xr0); // xr14 <- 1*(l8-l6)+3*(l10-l4)+5*(l12-l2)+7*(l14-l0)
                                    //       + 2*(l9-l5)+4*(l11-l3)+6*(l13-l1)+8*(l15-lt) ;
  //----- P, CAL -----
  //  D32SLR(xr2, xr11, xr12, xr3, 0x8); // xr2: 0x00000500 ;  xr3: 0x00000700 ;
  //  D32SLR(xr11, xr2, xr3, xr12, 0x8); //xr11: 0x00000005 ; xr12: 0x00000007 ;

  D16MUL_WW(xr0, xr15, xr11, xr2); // xr2: 5*H ;
  D16MUL_WW(xr0, xr14, xr11, xr3); // xr3: 5*V ;

  D32SLR(xr8, xr11, xr0, xr0, 0x2); // xr8: 0x00000001 ;
  D32SLL(xr13, xr8, xr0, xr0, 0x5); //xr13: 0x00000020 ;

  Q8ACCE_AA(xr0, xr1, xr4, xr8);   // xr8[15:0]: src1[0] + src2[16] + 1

  D32ADD_AA(xr5, xr2, xr13, xr0); // xr5: 5*H+32 ;
  D32ADD_AA(xr6, xr3, xr13, xr0); // xr6: 5*V+32 ;

  D32SLR(xr2, xr5, xr6, xr3, 0x6); // xr2: ( 5*H+32 ) >> 6 ;  xr3: ( 5*V+32 ) >> 6 ;

  //  Q8ACCE_AA(xr0, xr1, xr4, xr8);   // xr8[15:0]: src1[0] + src2[16] + 1
  D32SLL(xr5, xr8, xr0, xr0, 0x4); // xr5[15:0]: 16*(src1[0] + src2[16] + 1)

  Q16ADD_AA_WW(xr7, xr2, xr3, xr0); // xr7: V+H
  //  S32NOR(xr0, xr0, xr0); // idle
  S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ;
  D16MUL_WW(xr0, xr7, xr12, xr8);   // xr8: 7*(V+H)

  S32SFL(xr0, xr3, xr3, xr14, ptn3); // xr14[31:16]: V ;  xr14[15:0]: V ;
  D32SLL(xr7, xr2, xr0, xr0, 0x1);

  Q16ADD_SS_WW(xr9, xr5, xr8, xr0); // xr9: 16*(src1[0] + src2[16] + 1) - 7*(V+H)
  S32SFL(xr0, xr9, xr9, xr5, ptn3); // xr5[31:16]: a ;  xr5[15:0]: a ;
  //  S32SFL(xr0, xr3, xr3, xr14, ptn3); // xr14[31:16]: V ;  xr14[15:0]: V ;
  //  D32SLL(xr7, xr2, xr0, xr0, 0x1);
  S32SFL(xr0, xr7, xr7, xr8, ptn3);  // xr8[31:16]: 2H ;  xr8[15:0]: 2H ;

  S32AND(xr2, xr4, xr2);

  Q16ADD_AA_WW(xr15, xr5, xr2, xr0); // xr15[31:16]: a ;  xr15[15:0]: a + H ;

  dst -= MB_LUMA_EDGED_WIDTH;
  //----- SRC, STORE -----
  for (i=0; i<16; i++) {
    Q16ADD_AA_WW(xr1, xr15, xr8, xr0);
    Q16ADD_AA_WW(xr2, xr1, xr8, xr0);
    Q16SAR(xr9, xr15, xr1, xr1, 0x5);
    Q16ADD_AA_WW(xr3, xr2, xr8, xr0);
    Q16SAT(xr10, xr9, xr1);
    Q16ADD_AA_WW(xr4, xr3, xr8, xr0);
    Q16SAR(xr2, xr2, xr3, xr3, 0x5);
    Q16ADD_AA_WW(xr5, xr4, xr8, xr0);
    Q16SAT(xr11, xr2, xr3);
    Q16ADD_AA_WW(xr6, xr5, xr8, xr0);
    Q16SAR(xr4, xr4, xr5, xr5, 0x5);
    Q16ADD_AA_WW(xr7, xr6, xr8, xr0);
    Q16SAR(xr6, xr6, xr7, xr7, 0x5);
    Q16SAT(xr12, xr4, xr5);
    Q16SAT(xr13, xr6, xr7);

    S32SDIVR(xr10, dst, MB_LUMA_EDGED_WIDTH, 0x0);
    S32STDR(xr11, dst, 0x4);
    S32STDR(xr12, dst, 0x8);
    //    S32STDR(xr13, dst, 0xc);

    Q16ADD_AA_WW(xr15, xr15, xr14, xr0);

    S32STDR(xr13, dst, 0xc);
  }

}
Esempio n. 2
0
static void ff_vp3_idct_add_mxu(uint8_t *src, int stride, DCTELEM *input, uint8_t idct_row)
{
    int i;
    DCTELEM *blk;
    int32_t wf = (int32_t)whirl_idct;

    S32LDD(xr5, wf, 0x0);         // xr5(w7, w3)
    S32LDD(xr6, wf, 0x4);         // xr6(w9, w8)
    S32LDD(xr7, wf, 0x8);         // xr7(w11,w10)
    S32LDD(xr8, wf, 0xc);         // xr8(w13,w12)
    S32LDD(xr9, wf, 0x10);        // xr9(w6, w0)
    S32LDD(xr10,wf, 0x14);
    blk = input - 8;
    /* Inverse DCT on the rows now */
    for (i=0; i<idct_row; i++) {
        S32LDI(xr1, blk, 0x10);       //  xr1 (x4, x0)
	S32LDD(xr2, blk, 0x4);        //  xr2 (x7, x3)
	S32LDD(xr3, blk, 0x8);        //  xr3 (x6, x1)
	S32LDD(xr4, blk, 0xc);        //  xr4 (x5, x2)
	S32OR(xr12, xr2,xr3);
	S32OR(xr11,xr12,xr4);
	S32OR(xr12,xr11,xr1);
	if (S32M2I(xr12) == 0) {
	    continue;            //blk[0]= blk[1]=blk[2]=blk[3]=blk[4]=blk[5]=blk[6]=blk[7]=0
	}
	S32SFL(xr12,xr0,xr1,xr13,ptn3);
	S32OR(xr11,xr11,xr12);
	if (S32M2I(xr11) == 0 && S32M2I(xr13) != 0) {
	    D16MUL_HW(xr0,xr5,xr13,xr13);
	    D32SAR(xr0,xr0,xr13,xr13,15);
	    S32SFL(xr0,xr13,xr13,xr13,ptn3);
	    S32STD(xr13,blk, 0x0);
	    S32STD(xr13,blk, 0x4);
	    S32STD(xr13,blk, 0x8);
	    S32STD(xr13,blk, 0xc);
	    continue;            //blk[0]!=0, and blk[1]=blk[2]=blk[3]=blk[4]=blk[5]=blk[6]=blk[7]=0
	}

	S32SFL(xr1,xr1,xr2,xr2, ptn3);  //xr1:s1, s3, xr2: s0, s2
	S32SFL(xr3,xr3,xr4,xr4, ptn3);  //xr3:s5, s7, xr4: s4, s6

	D16MUL_WW(xr11, xr2, xr5, xr12);//xr11: s0*c4, xr12: s2*c2
	D16MAC_AA_WW(xr11,xr4,xr6,xr12);//xr11: s0*c4+s4*c4, xr12: s2*c2+s6*c6

	D16MUL_WW(xr13, xr2, xr6, xr14);//xr13: s0*c4, xr14: s2*c6
	D16MAC_SS_WW(xr13,xr4,xr5,xr14);//xr13: s0*c4 - s4*c4, xr14: s2*c6-s6*c2

	D16MUL_HW(xr2, xr1, xr7, xr4);  //xr2: s1*c1, xr4: s1*c3 
	D16MAC_AS_LW(xr2,xr1,xr9,xr4);  //xr2: s1*c1+s3*c3, xr4: s1*c3-s3*c7
	D16MAC_AS_HW(xr2,xr3,xr10,xr4); //xr2: s1*c1+s3*c3+s5*c5,
                                      // xr4: s1*c3-s3*c7-s5*c1
	D16MAC_AS_LW(xr2,xr3,xr8,xr4);  //xr2: s1*c1+s3*c3+s5*c5+s7*c7,
                                      //xr4: s1*c3-s3*c7-s5*c1-s7*c5
	D32SAR(xr11, xr11,xr13,xr13,15);
	S32SFL(xr0, xr11,xr13,xr11,ptn3);
	D32SAR(xr12,xr12,xr14,xr14,15);
	S32SFL(xr0, xr12,xr14,xr12,ptn3);
	D32SAR(xr2, xr2,xr4,xr4,15);
	S32SFL(xr0, xr2,xr4,xr2,ptn3);
      
	D16MUL_HW(xr4, xr1, xr8, xr15);     //xr4: s1*c7, xr15:s1*c5
	D16MAC_SS_LW(xr4,xr1,xr10,xr15);    //xr4: s1*c7-s3*c5, xr15: s1*c5-s3*c1
	D16MAC_AA_HW(xr4,xr3,xr9,xr15);     //xr4: s1*c7-s3*c5+s5*c3, xr15: s1*c5-s3*c1+s5*c7
	D16MAC_SA_LW(xr4,xr3,xr7,xr15);     //xr4: s1*c7-s3*c5+s5*c3-s7*c1
	                                    //xr15: s1*c5-s3*c1+s5*c7+s7*c3
	Q16ADD_AS_WW(xr11,xr11,xr12,xr12);  //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15
                                          //      rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15
                                          //xr12: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15
                                          //      rnd(s0*c4-s4*c4)>>15-rnd(s2*c6-s6*c2)>>15
	D32SAR(xr15,xr15,xr4,xr4,15);
	S32SFL(xr0,xr15,xr4,xr15,ptn3);
	Q16ADD_AS_WW(xr11, xr11, xr2, xr2);
              //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15
              //    : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15
              //xr2: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 - rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15
              //   : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 - rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15

	Q16ADD_AS_XW(xr12, xr12, xr15, xr15);
              //xr12: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15
              //    : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15
              //xr15: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15-rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15
              //    : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15-rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15

	S32SFL(xr11,xr11,xr12,xr12, ptn3);
              //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15
              //    : rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15
              //xr12: rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15
              //    : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15
	S32SFL(xr12,xr12,xr11,xr11, ptn3);

              //xr12: rnd(s0*c4-s4*c4)>>16+rnd(s2*c6-s6*c2)>>16 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>16
              //    : rnd(s0*c4+s4*c4)>>16+rnd(s2*c2+s6*c6)>>16 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16
              //xr11: rnd(s0*c4-s4*c4)>>16+rnd(s2*c6-s6*c2)>>16+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>16
              //    : rnd(s0*c4+s4*c4)>>16-rnd(s2*c2+s6*c6)>>16+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>16
	S32STD(xr12, blk, 0x0);
	S32STD(xr11, blk, 0x4);
	S32STD(xr15, blk, 0x8);
	S32STD(xr2, blk, 0xc);
    }
      
    blk = input - 2;
    for (i=0; i<4; i++)               /* idct columns */
    {
        S32I2M(xr5,wxr5);        //xr5: c4 , c2
	S32I2M(xr6,wxr6);        //xr5: c4 , c2
	S32LDI(xr1, blk, 0x4);   //xr1: ss0, s0
	S32LDD(xr3, blk, 0x20);  //xr3: ss2, s2
	S32LDD(xr11, blk, 0x40); //xr11: ss4, s4
	S32LDD(xr13, blk, 0x60); //xr13: ss6, s6

	D16MUL_HW(xr15, xr5, xr1, xr2);    //xr15: ss0*c4, xr9: s0*c4
	D16MAC_AA_HW(xr15,xr5,xr11,xr2);   //xr15: ss0*c4+ss4*c4, xr9: s0*c4+s4*c4
	D16MUL_LW(xr10, xr5, xr3, xr9);    //xr10: ss2*c2, xr9: s2*c2
	D16MAC_AA_LW(xr10,xr6,xr13,xr9);   //xr10: ss2*c2+ss6*c6, xr9: s2*c2+s6*c6
	D32SAR(xr15,xr15,xr2,xr2,15);      
	S32SFL(xr0,xr15,xr2,xr15,ptn3);    //xr15: (ss0*c4+ss4*c4)>>15
	D32SAR(xr10,xr10,xr9,xr9,15);      
	S32SFL(xr0,xr10,xr9,xr10,ptn3);    //xr10: (ss2*c2+ss6*c6)>>15

	S32LDD(xr2, blk, 0x10);            //xr2: ss1, s1
	S32LDD(xr4, blk, 0x30);            //xr4: ss3, s3
	Q16ADD_AS_WW(xr15,xr15,xr10,xr9);  //xr15: rnd(ss0*c4+ss4*c4)>>15+rnd(ss2*c2+ss6*c6)>>15
                                         //    :rnd(s0*c4+s4*c4)>>15 + rnd(s2*c2 + s6*c6)>>15
                                         //xr9: rnd(ss0*c4+ss4*c4)>>15 - rnd(ss2*c2+ss6*c6)>>15
                                         //   : rnd(s0*c4+s4*c4)>>15 - rnd(s2*c2 + s6*c6)>>15
	D16MUL_HW(xr10, xr5, xr1, xr1);    //xr10: ss0*c4, xr1: s0*c4
	D16MAC_SS_HW(xr10,xr5,xr11,xr1);   //xr10: ss0*c4-ss4*c4, xr1: s0*c4 - s4*c4
	D16MUL_LW(xr11, xr6, xr3, xr12);    //xr11: ss2*c6, xr1: s2*c6
	D16MAC_SS_LW(xr11,xr5,xr13,xr12);   //xr11: ss2*c6-ss6*c2, xr1: s2*c6-s6*c2
	D32SAR(xr10,xr10,xr1,xr1,15);
	S32SFL(xr0,xr10,xr1,xr10,ptn3);    //xr10: (ss0*c4-ss4*c4)>>15 //    : (s0*c4 - s4*c4)>>15
	D32SAR(xr11,xr11,xr12,xr12,15);      
	S32SFL(xr0,xr11,xr12,xr11,ptn3);    //xr11:(ss2*c6-ss6*c2)>>15
                                         //    :(s2*c6-s6*c2)>>15

	S32LDD(xr12, blk, 0x50);           //xr12: ss5, s5
	S32LDD(xr14, blk, 0x70);           //xr14: ss7, s7
	Q16ADD_AS_WW(xr10,xr10,xr11,xr1);  //xr10: rnd(ss0*c4-ss4*c4)>>15)+rnd(ss2*c6-ss6*c2)>>15
                                         //    : rnd(s0*c4 - s4*c4)>>15 +rnd(s2*c6 - s6*c2)>>15
                                         //xr1 : rnd(ss0*c4-ss4*c4)>>15-rnd(ss2*c6-ss6*c2)>>15
                                         //    : rnd(s0*c4 - s4*c4)>>15-rnd(s2*c6 - s6*c2)>>15

	D16MUL_HW(xr11, xr7, xr2, xr13);   //xr11: ss1*c1, xr13: s1*c1
	D16MAC_AA_LW(xr11,xr7,xr4,xr13);   //xr11: ss1*c1+ss3*c3, xr13: s1*c1+s3*c3
	D16MAC_AA_LW(xr11,xr8,xr12,xr13);  //xr11: ss1*c1+ss3*c3+ss5*c5 //xr13: s1*c1+s3*c3+s5*c5
	D16MAC_AA_HW(xr11,xr8,xr14,xr13);  //xr11: ss1*c1+ss3*c3+ss5*c5+ss7*c7
                                         //xr13: s1*c1+s3*c3+s5*c5+s7*c7
	D16MUL_LW(xr3, xr7, xr2, xr5);    //xr3: ss1*c3, xr13: s1*c3
	D16MAC_SS_HW(xr3,xr8,xr4,xr5);    //xr3: ss1*c3-ss3*c7, xr13: s1*c3-s3*c7
	D16MAC_SS_HW(xr3,xr7,xr12,xr5);   //xr3: ss1*c3-ss3*c7-ss5*c1
                                         //xr13: s1*c3-s3*c7-s5*c1
	D16MAC_SS_LW(xr3,xr8,xr14,xr5);   //xr3: ss1*c3-ss3*c7-ss5*c1-ss7*c5
                                         //xr13: s1*c3-s3*c7-s7*c5
	D32SAR(xr11,xr11,xr13,xr13,15); 
	S32SFL(xr0,xr11,xr13,xr11,ptn3);   //xr11: (ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>15 //    : (s1*c1+s3*c3+s5*c5+s7*c7)>>15
	D32SAR(xr3,xr3,xr5,xr5,15);
	S32SFL(xr0,xr3,xr5,xr3,ptn3);     //xr3: (ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>15
                                         //   : (s1*c3-s3*c7-s7*c5)>>15
	D16MUL_LW(xr5, xr8, xr2, xr13);    //xr5: ss1*c5, xr13:s1*c5
	D16MAC_SS_HW(xr5,xr7,xr4,xr13);    //xr5: ss1*c5-ss3*c1, xr13:s1*c5-s3*c1
	D16MAC_AA_HW(xr5,xr8,xr12,xr13);   //xr5: ss1*c5-ss3*c1+ss5*c7
                                         //   : s1*c5 - s3*c1+ s5*c7
	D16MAC_AA_LW(xr5,xr7,xr14,xr13);   //xr5: ss1*c5-ss3*c1+ss5*c7+ss7*c1
                                         //   : s1*c5 - s3*c1+ s5*c7+ s7*c1
	D16MUL_HW(xr2, xr8, xr2, xr6);    //xr2: ss1*c7, xr13: s1*c7
	D16MAC_SS_LW(xr2,xr8,xr4,xr6);    //xr2: ss1*c7-ss3*c5, xr13: s1*c7-s3*c5
	D16MAC_AA_LW(xr2,xr7,xr12,xr6);   //xr2: ss1*c7-ss3*c5+ss5*c1 //xr13: s1*c7-s3*c5+s5*c1
	D16MAC_SS_HW(xr2,xr7,xr14,xr6);   //xr2: ss1*c7-ss3*c5+ss5*c1-ss7*c3
                                         //xr13: s1*c7-s3*c5+s5*c1-s7*c3
	D32SAR(xr5,xr5,xr13,xr13,15);
	S32SFL(xr0,xr5,xr13,xr5,ptn3);     //xr5: (ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>15 //  :(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>15
	D32SAR(xr2,xr2,xr6,xr6,15);
	S32SFL(xr0,xr2,xr6,xr2,ptn3);     //xr2:(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>15
                                         //   :(s1*c7-s3*c5+s5*c1-s7*c3)>>15

	S32I2M(xr4, 0x00080008);//round value 8;
	Q16ADD_AS_WW(xr15,xr15,xr11,xr11); //xr15:rnd(ss0*c4+ss4*c4)>>16+rnd(ss2*c2+ss6*c6)>>16+
                                         //     rnd(ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>16
                                         //     rnd(s0*c4+s4*c4)>>16 + rnd(s2*c2 + s6*c6)>>16+
                                         //     rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16

                                         //xr11:rnd(ss0*c4+ss4*c4)>>16+rnd(ss2*c2+ss6*c6)>>16-
                                         //     rnd(ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>16
                                         //     rnd(s0*c4+s4*c4)>>16 + rnd(s2*c2 + s6*c6)>>16-
                                         //     rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16
	Q16ADD_AS_WW(xr10,xr10,xr3,xr3);   //xr10:rnd(ss0*c4-ss4*c4)>>16)+rnd(ss2*c6-ss6*c2)>>16+
                                         //     rnd(ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>16
                                         //     rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16+
                                         //     rnd(s1*c3-s3*c7-s7*c5)>>16
                                         //xr10:rnd(ss0*c4-ss4*c4)>>16)+rnd(ss2*c6-ss6*c2)>>16-
                                         //     rnd(ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>16
                                         //     rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16-
                                         //     rnd(s1*c3-s3*c7-s7*c5)>>16
	Q16ADD_AS_WW(xr1,xr1,xr5,xr5);     //xr1: rnd(ss0*c4-ss4*c4)>>16-rnd(ss2*c6-ss6*c2)>>16+
                                         //     rnd(ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>16
                                         //     rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16+
                                         //     rnd(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>16
                                         //xr1: rnd(ss0*c4-ss4*c4)>>16-rnd(ss2*c6-ss6*c2)>>16-
                                         //     rnd(ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>16
                                         //     rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16-
                                         //     rnd(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>16
	Q16ADD_AS_WW(xr9,xr9,xr2,xr2);     //xr9: rnd(ss0*c4+ss4*c4)>>16 - rnd(ss2*c2+ss6*c6)>>16+
                                         //     rnd(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>16
                                         //     rnd(s0*c4+s4*c4)>>16 - rnd(s2*c2 + s6*c6)>>16+
                                         //     rnd(s1*c7-s3*c5+s5*c1-s7*c3)>>16
                                         //xr9: rnd(ss0*c4+ss4*c4)>>16 - rnd(ss2*c2+ss6*c6)>>16-
                                         //     rnd(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>16
                                         //     rnd(s0*c4+s4*c4)>>16 - rnd(s2*c2 + s6*c6)>>16-
                                         //     rnd(s1*c7-s3*c5+s5*c1-s7*c3)>>16

	Q16ACCM_AA(xr15,xr4,xr4,xr10);
	Q16ACCM_AA(xr11,xr4,xr4,xr1);
	Q16ACCM_AA(xr9,xr4,xr4,xr2);
	Q16ACCM_AA(xr5,xr4,xr4,xr3);
	Q16SAR(xr15,xr15,xr10,xr10,4);
	Q16SAR(xr11,xr11,xr1,xr1,4);
	Q16SAR(xr9,xr9,xr2,xr2,4);
	Q16SAR(xr5,xr5,xr3,xr3,4);
	
	S32STD(xr15, blk, 0x00);
	S32STD(xr10, blk, 0x10);
	S32STD(xr1, blk, 0x20);
	S32STD(xr9, blk, 0x30);
	S32STD(xr2, blk, 0x40);
	S32STD(xr5, blk, 0x50);
	S32STD(xr3, blk, 0x60);
	S32STD(xr11, blk, 0x70);
    }

    blk = input - 8;
    src -= stride;
    for (i=0; i<8; i++) {
        S32LDIV(xr1, src, stride, 0x0);
	S32LDI(xr3, blk, 0x10);
	S32LDD(xr4, blk, 0x4);
	Q8ACCE_AA(xr4, xr1, xr0, xr3);
	S32LDD(xr2, src, 0x4);
	S32LDD(xr5, blk, 0x8);
	S32LDD(xr6, blk, 0xc);
	Q8ACCE_AA(xr6, xr2, xr0, xr5);
	Q16SAT(xr1, xr4, xr3);
	S32STD(xr1, src, 0x0);
	Q16SAT(xr2, xr6, xr5);
	S32STD(xr2, src, 0x4);
    }
}
Esempio n. 3
0
// MODE 3
static void pred8x8_plane_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){
  unsigned int i;
  uint8_t *src_top;  // top address
  uint8_t *src_topleft, *src_left;  // left address
  src_top = top;
  src_topleft = src_top - 0x1c;
  src_left = src - 0x4;

  //----- H, LOAD -----
  S32LDD(xr1, src_top, -0x1c);  // xr1 <- src_top[-4];  xr1: lt, 0, 0, 0 ;
  S32LDD(xr3, src_top, 0x0);   // xr3 <- src_top[0] ;  xr3: t3, t2, t1, t0 ;
  S32LDDR(xr2, src_top, 0x4);  // xr2 <- src_top[4] ;  xr2: t4, t5, t6, t7 ;
  S32ALNI(xr1, xr3, xr1, ptn1);//                      xr1: t2, t1, t0, lt ;
  S32I2M(xr8, MUL_12); // xr8: 0x00010002 ;
  S32I2M(xr9, MUL_34); // xr9: 0x00030004 ;
  //----- H, SUM -----
  Q8ADDE_SS(xr3, xr2, xr1, xr4);  // xr3[31:16] <- t4-t2 ;  xr3[15:0] <- t5-t1 ;
                                  // xr4[31:16] <- t6-t0 ;  xr4[15:0] <- t7-lt;

  S32LDD(xr1, src_topleft, 0x0);          // xr1[31:24] <- src_topleft[3] (lt) ;

  D16MUL_WW(xr5, xr8, xr3, xr6);    // xr5 <- 1*(t4-t2) ;  xr6 <- 2*(t5-t1) ;
  D16MAC_AA_WW(xr5, xr9, xr4, xr6); // xr5 <- 1*(t4-t2)+3*(t6-t0) ; xr6 <- 2*(t5-t1)+4*(t7-lt) ;

  S32LDD(xr12, src_left, 0x0);//xr12[31:24] <- src_topleft[stride+3] (l0) ;
  S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ;

  D32ADD_AA(xr7, xr5, xr6, xr0); // xr7 <- 1*(t4-t2)+3*(t6-t0)+2*(t5-t1)+4*(t7-lt) ;
  //----- V, LOAD -----
  //  S32LDD(xr1, src_topleft, 0x0);          // xr1[31:24] <- src_topleft[3] (lt) ;
  //  S32LDIV(xr12, src_topleft, stride, 0x0);//xr12[31:24] <- src_topleft[stride+3] (l0) ;
  //  S32LDIV(xr3, src_topleft, stride, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ;
  S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_topleft[3*stride+3] (l2) ;
  S32SFL(xr5, xr12, xr1, xr0, ptn2);      // xr5[31:16] <- l0, lt ;
  S32SFL(xr6, xr4, xr3, xr0, ptn2);       // xr8[31:16] <- l2, l1 ;
  S32SFL(xr10, xr6, xr5, xr0, ptn3);      // xr10[31:0] <- l2, l1, l0, lt ;
  src_left += MB_CHROM_EDGED_WIDTH;
  S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0);
  S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0);
  S32LDIV(xr12, src_left, MB_CHROM_EDGED_WIDTH, 0x0);
  S32LDIV(xr1, src_left, MB_CHROM_EDGED_WIDTH, 0x0);
  S32SFL(xr6, xr4, xr3, xr0, ptn2);
  S32SFL(xr5, xr12, xr1, xr0, ptn2);
  S32SFL(xr11, xr6, xr5, xr0, ptn3); // xr11[31:0] <- l4, l5, l6, l7 ;
  //----- V, SUM -----
  Q8ADDE_SS(xr3, xr11, xr10, xr4);

  S32LUI(xr1, 0x1, ptn0); // xr1[31:0]: 0x00000001 ;

  D16MUL_WW(xr5, xr8, xr3, xr6);
  D16MAC_AA_WW(xr5, xr9, xr4, xr6);

  D32ADD_AA(xr13, xr5, xr6, xr0); // xr13 <- 1*(l4-l2)+3*(l6-l0)+2*(l5-l1)+4*(l7-lt) ;

  //----- P, CAL ----- useful XRs:xr13, xr7, xr2, xr11;
  //  S32LUI(xr1, 0x1, ptn0); // xr1[31:0]: 0x00000001 ;
  D32SLL(xr5, xr1, xr1, xr6, 0x4); // xr5: 0x00000010;  xr6: 0x00000010; 
  D32SLL(xr3, xr13, xr7, xr4, 0x4);
  D32ACC_AA(xr5, xr13, xr3, xr0); // xr5: 17*V+16
  D32ACC_AA(xr6, xr7, xr4, xr0);  // xr6: 17*H+16

  Q8ACCE_AA(xr0, xr2, xr11, xr1);  // xr1[15:0]: src1[0] + src2[8] + 1

  D32SLR(xr8, xr5, xr6, xr9, 0x5); // xr8: (17*V+16) >> 5 ;  xr9: (17*H+16) >> 5 ;

  //  Q8ACCE_AA(xr0, xr2, xr11, xr1);  // xr1[15:0]: src1[0] + src2[8] + 1
  D32SLL(xr2, xr1, xr0, xr0, 0x4); // xr2[15:0]: 16*(src1[0] + src2[16] + 1)

  Q16ADD_AA_WW(xr7, xr8, xr9, xr0); // xr7: V+H
  S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ;
  D32SLL(xr12, xr7, xr0, xr0, 0x1);
  D32ADD_AA(xr5, xr12, xr7, xr0);   // xr5: 3*(V+H)
  //  S32LUI(xr12, 0x3, ptn0); // xr12[31:0]: 0x00000003 ;
  //  D16MUL_WW(xr0, xr7, xr12, xr5);   // xr5: 3*(V+H)

  //  S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ;

  Q16ADD_SS_WW(xr6, xr2, xr5, xr0); // xr6: 16*(src1[0] + src2[16] + 1) - 3*(V+H)

  //  S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ;

  S32SFL(xr0, xr8, xr8, xr14, ptn3);// xr14[31:16]: V ;  xr14[15:0]: V ;
  S32SFL(xr0, xr6, xr6, xr5, ptn3); // xr5[31:16]: a ;  xr5[15:0]: a ;
  D32SLL(xr7, xr9, xr0, xr0, 0x1);
  S32SFL(xr0, xr7, xr7, xr8, ptn3); // xr8[31:16]: 2H ;  xr8[15:0]: 2H ;

  //  S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ;
  S32AND(xr9, xr4, xr9);

  Q16ADD_AA_WW(xr15, xr5, xr9, xr0);   // xr15[31:16]: a ;  xr15[15:0]: a + H ;

  dst -= MB_CHROM_EDGED_WIDTH;
  //----- SRC, STORE -----
  for (i=0; i<8; i++) {
    Q16ADD_AA_WW(xr1, xr15, xr8, xr0);
    Q16ADD_AA_WW(xr2, xr1, xr8, xr0);
    Q16SAR(xr9, xr15, xr1, xr1, 0x5);
    Q16ADD_AA_WW(xr3, xr2, xr8, xr0);

    Q16SAT(xr10, xr9, xr1);
    //    Q16SAR(xr9, xr15, xr1, xr1, 0x5);
    Q16SAR(xr2, xr2, xr3, xr3, 0x5);

    //    Q16SAT(xr10, xr9, xr1);
    Q16SAT(xr11, xr2, xr3);

    S32SDIVR(xr10, dst, MB_CHROM_EDGED_WIDTH, 0x0);

    Q16ADD_AA_WW(xr15, xr15, xr14, xr0);

    S32STDR(xr11, dst, 0x4);
  }

}