// MODE 5 static void pred4x4_vertical_right_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src -0x4; // load right S32LDD(xr8, top, 0x0); // xr8: t3, t2, t1, t0 ; high -> low, [31->0]; // load left S32LDD(xr1, topleft, -0x4); // xr1[31:24] <- src_topleft[3] (lt) ; S32LDD(xr2, src_left, 0x0); // xr2[31:24] <- src_topleft[stride+3] (l0) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_topleft[3*stride+3] (l2) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l0, lt ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr6[31:16] <- l2, l1 ; S32SFL(xr7, xr3, xr2, xr0, ptn2); // xr7[31:16] <- l1, l0 ; // alni S32ALNI(xr3, xr8, xr1, ptn1); // xr3: t2, t1, t0, lt ; S32ALNI(xr4, xr3, xr2, ptn1); // xr4: t1, t0, lt, l0 ; // cal Q8AVGR(xr1, xr3, xr8); // xr1: Q8AVG(xr9, xr4, xr8); Q8AVGR(xr2, xr9, xr3); // xr2: Q8AVG(xr10, xr5, xr6); Q8AVGR(xr11, xr10, xr7); // xr11: src[0,3], src[0,2], ~, ~ ; // alni S32ALNI(xr12, xr2, xr11, ptn1); D32SLL(xr13, xr11, xr0, xr0, 0x8); S32ALNI(xr14, xr1, xr13, ptn1); // store S32STD(xr1, dst, 0x0); S32SDIV(xr2, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr14, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr12, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 2 static void pred4x4_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src - 0x4; //load S32LDD(xr8, top, 0x0); //xr8 <- src_top[0] ; // xr8: t3, t2, t1, t0 ; high -> low, [31->0]; S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr6[31:16] <- l3, l2 ; S32SFL(xr1, xr6, xr5, xr0, ptn3); // xr1[31: 0] <- l3, l2, l1, l0 ; //avg D8SUMC(xr2, xr1, xr8); Q16ADD_AA_XW(xr3, xr2, xr2, xr0); D32SLR(xr4, xr3, xr0, xr0, 0x3); S32SFL(xr6, xr4, xr4, xr0, ptn0); S32SFL(xr0, xr6, xr6, xr7, ptn3); //store S32STD(xr7, dst, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
static void pred16x16_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_top; // top address unsigned int i; src_top = top; // load top S32LDD(xr1, src_top, 0x0); S32LDD(xr2, src_top, 0x4); S32LDD(xr3, src_top, 0x8); S32LDD(xr4, src_top, 0xc); // AVG D8SUMC(xr1, xr1, xr2); D8SUMC(xr2, xr3, xr4); Q16ADD_AA_WW(xr5, xr1, xr2, xr0); Q16ADD_AA_XW(xr7, xr5, xr5, xr0); D32SLR(xr8, xr7, xr0, xr0, 0x4); S32SFL(xr9, xr8, xr8, xr0, ptn0); S32SFL(xr0, xr9, xr9, xr1, ptn3); // store dst -= MB_LUMA_EDGED_WIDTH; for(i=0; i<16; i++){ S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32STD(xr1, dst, 0x4); S32STD(xr1, dst, 0x8); S32STD(xr1, dst, 0xc); } }
static void pred8x8_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_top; // top address unsigned int i; src_top = top; // load top S32LDD(xr7, src_top, 0x0); S32LDD(xr8, src_top, 0x4); // AVG D8SUMC(xr1, xr7, xr8); D32SLR(xr2, xr1, xr0, xr0, 0x2); S32SFL(xr3, xr2, xr2, xr4, ptn0); S32SFL(xr0, xr3, xr3, xr5, ptn3); S32SFL(xr0, xr4, xr4, xr6, ptn3); // store S32STD(xr5, dst, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); }
// MODE 2 static void pred16x16_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_top; // top address uint8_t *src_left; // left address unsigned int i; src_top = top; src_left = src - 0x4; // load top S32LDD(xr11, src_top, 0x0); S32LDD(xr12, src_top, 0x4); S32LDD(xr13, src_top, 0x8); S32LDD(xr14, src_top, 0xc); // load left (4 x 7 = 28 instructions) S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[MB_LUMA_EDGED_WIDTH+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr4[31:16] <- l3, l2 ; S32SFL(xr7, xr6, xr5, xr0, ptn3); // xr7[31: 0] <- l3, l2, l1, l0 ; S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr8, xr6, xr5, xr0, ptn3); // xr8[31:0] <- l7, l6, l5, l4 ; S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr9, xr6, xr5, xr0, ptn3); // xr9[31:0] <- l11, l10, l9, l8 ; S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr10, xr6, xr5, xr0, ptn3); // xr10[31:0] <- l15, l14, l13, l12 ; // AVG D8SUMC(xr1, xr11, xr12); D8SUMC(xr2, xr13, xr14); D8SUMC(xr3, xr7, xr8); D8SUMC(xr4, xr9, xr10); Q16ADD_AA_WW(xr5, xr1, xr2, xr0); Q16ACC_AA(xr5, xr3, xr4, xr0); Q16ADD_AA_XW(xr7, xr5, xr5, xr0); D32SLR(xr8, xr7, xr0, xr0, 0x5); S32SFL(xr9, xr8, xr8, xr0, ptn0); S32SFL(xr0, xr9, xr9, xr1, ptn3); // store dst -= MB_LUMA_EDGED_WIDTH; for(i=0; i<16; i++){ S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32STD(xr1, dst, 0x4); S32STD(xr1, dst, 0x8); S32STD(xr1, dst, 0xc); } }
// MODE 3 static void pred4x4_down_left_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ //load S32LDD(xr1, top, 0x0); // xr1 <- t3, t2, t1, t0 ; S32LDD(xr2, topright, 0x0); // xr2 <- t7, t6, t5, t4 ; S32LDDR(xr15, topright, 0x0); //xr15 <- t4, t5, t6, t7 ; S32ALNI(xr3, xr2, xr1, ptn2); // xr3: t5, t4, t3, t2 ; Q8AVG(xr4, xr1, xr3); S32ALNI(xr5, xr2, xr1, ptn3); // xr5: t4, t3, t2, t1 ; Q8AVGR(xr6, xr4, xr5); S32ALNI(xr7, xr2, xr1, ptn1); // xr7: t6, t5, t4, t3 ; S32ALNI(xr8, xr15, xr2, ptn3);// xr8: t7, t7, t6, t5 ; Q8AVG(xr9, xr7, xr8); Q8AVGR(xr10, xr9, xr2); D32SLL(xr11, xr6, xr0, xr0, 0x8); S32ALNI(xr12, xr10, xr11, ptn1); S32ALNI(xr13, xr10, xr11, ptn2); //store S32STD(xr6, dst, 0x0); S32SDIV(xr13, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr12, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr10, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 2 static void pred8x8_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_left; // left address unsigned int i; src_left = src - 0x4; // load top S32LDD(xr11, top, 0x0); S32LDD(xr12, top, 0x4); // load left (4 x 7 = 28 instructions) S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr4[31:16] <- l3, l2 ; S32SFL(xr7, xr6, xr5, xr0, ptn3); // xr7[31: 0] <- l3, l2, l1, l0 ; S32LDIV(xr1, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr8, xr6, xr5, xr0, ptn3); // xr8[31:0] <- l7, l6, l5, l4 ; // AVG D8SUMC(xr1, xr11, xr7); Q16ADD_AA_XW(xr2, xr1, xr1, xr0); D32SLR(xr3, xr2, xr0, xr0, 0x3); S32SFL(xr4, xr3, xr3, xr0, ptn0); S32SFL(xr0, xr4, xr4, xr5, ptn3); D8SUMC(xr1, xr12, xr8); Q16ADD_AA_XW(xr2, xr1, xr1, xr0); D32SLR(xr3, xr2, xr0, xr0, 0x3); S32SFL(xr4, xr3, xr3, xr0, ptn0); S32SFL(xr0, xr4, xr4, xr6, ptn3); D32SLR(xr2, xr1, xr0, xr0, 0x2); S32SFL(xr3, xr2, xr2, xr4, ptn0); S32SFL(xr0, xr3, xr3, xr8, ptn3); S32SFL(xr0, xr4, xr4, xr9, ptn3); // store S32STD(xr5, dst, 0x0); S32STD(xr8, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr8, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr8, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr8, dst, 0x4); S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); }
//------------- luma8x8 -------------- // MODE 0 static void pred8x8_vertical_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ unsigned int i; //load S32LDD(xr1, top, 0x0); S32LDD(xr2, top, 0x4); //store dst -= MB_CHROM_EDGED_WIDTH; for(i=0; i<8; i++){ S32SDIV(xr1, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr2, dst, 0x4); } }
static inline void ifft2(buf) { S32LDD(xr1, buf, 0); S32LDD(xr3, buf, 8); S32LDD(xr2, buf, 4); S32LDD(xr4, buf, 12); D32ADD_AS(xr5, xr1, xr3, xr7); D32ADD_AS(xr6, xr2, xr4, xr8); S32STD(xr5, buf, 0); S32STD(xr7, buf, 8); S32STD(xr6, buf, 4); S32STD(xr8, buf, 12); }
static void MC_put_o_8_c (uint8_t *dest, const uint8_t *ref, const int stride, int height) { uint32_t ref_aln, ref_rs; ref_aln = (uint32_t)(ref-stride) & 0xfffffffc; ref_rs = 4 - ((uint32_t)(ref-stride) & 3); dest -= stride; do { S32LDIV(xr1,ref_aln,stride,0x0); S32LDD(xr2,ref_aln,0x4); S32LDD(xr4,ref_aln,0x8); S32ALN(xr3,xr2,xr1,ref_rs); S32ALN(xr5,xr4,xr2,ref_rs); S32SDIV(xr3,dest,stride,0x0); S32STD(xr5,dest,0x4); } while (--height); }
//=---------- luma16x16 ---------- // MODE 0 static void pred16x16_vertical_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_top; // top address unsigned int i; src_top = top; //load S32LDD(xr1, src_top, 0x0); S32LDD(xr2, src_top, 0x4); S32LDD(xr3, src_top, 0x8); S32LDD(xr4, src_top, 0xc); //store dst -= MB_LUMA_EDGED_WIDTH; for (i=0; i<16; i++) { S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH,0x0); S32STD(xr2, dst, 0x4); S32STD(xr3, dst, 0x8); S32STD(xr4, dst, 0xc); } }
// luma4x4 // MODE 0 static void pred4x4_vertical_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ //load S32LDD(xr1, top, 0x0); //store S32STD(xr1, dst, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 4 static void pred4x4_down_right_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src - 0x4; // load right S32LDDR(xr8, top, 0x0); // xr8: t0, t1, t2, t3 ; high -> low, [31->0]; S32LDDR(xr9, topleft, -0x4); // xr9[7:0]: lt ; // load left S32LDD(xr7, topleft, -0x4); // xr7[31:24]: lt ; S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr6[31:16] <- l3, l2 ; S32SFL(xr1, xr6, xr5, xr0, ptn3); // xr1[31: 0] <- l3, l2, l1, l0 ; // alni S32ALNI(xr10, xr9, xr8, ptn3); // xr10: lt, t0, t1, t2 ; S32ALNI(xr11, xr1, xr7, ptn1); // xr11: l2, l1, l0, lt ; S32ALNI(xr12, xr11, xr8, ptn2);// xr12: l0, lt, t0, t1 ; S32ALNI(xr13, xr1, xr10, ptn2);// xr13: l1, l0, lt, t0 ; // cal Q8AVG(xr3, xr1, xr13); Q8AVGR(xr4, xr3, xr11); // xr4: src[0,3], src[0,2]/src[1,3], src[0,1]/src[1,2]/src[2,3], // src[0,0]/src[1,1]/src[2,2]/src[3,3] ; Q8AVG(xr5, xr8, xr12); Q8AVGR(xr6, xr5, xr10); // xr6: src[0,0]/src[1,1]/src[2,2]/src[3,3], // src[1,0]/src[2,1]/src[3,2], src[2,0]/src[3,1], src[3,0] ; // alni for store D32SLL(xr7, xr6, xr0, xr0, 0x8); // xr7: src[1,0]/src[2,1]/src[3,2], src[2,0]/src[3,1], src[3,0] ; S32ALNI(xr8, xr4, xr7, ptn1); S32ALNI(xr9, xr4, xr7, ptn2); //store S32STDR(xr6, dst, 0x0); S32SDIVR(xr9, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIVR(xr8, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIVR(xr4, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 7 static void pred4x4_vertical_left_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ //load S32LDD(xr1, top, 0x0); // xr1 <- t3, t2, t1, t0 ; S32LDD(xr2, topright, 0x0); // xr2 <- t7, t6, t5, t4 ; S32ALNI(xr3, xr2, xr1, ptn3); // xr3: t4, t3, t2, t1 ; S32ALNI(xr4, xr2, xr1, ptn2); // xr4: t5, t4, t3, t2 ; Q8AVGR(xr11, xr1, xr3); Q8AVGR(xr5, xr4, xr3); Q8AVG(xr7, xr1, xr4); Q8AVGR(xr8, xr7, xr3); S32ALNI(xr6, xr2, xr1, ptn1); // xr6: t6, t5, t4, t3 ; Q8AVG(xr9, xr3, xr6); Q8AVGR(xr10, xr9, xr4); //store S32STD(xr11, dst, 0x0); S32SDIV(xr8, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr5, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr10, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
static void pred4x4_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ //load S32LDD(xr1, top, 0x0); // xr1[31:24] <- src_top[0] ; //avg D8SUMC(xr2, xr0, xr1); D32SLR(xr3, xr2, xr0, xr0, 0x2); S32SFL(xr0, xr3, xr3, xr4, ptn0); S32SFL(xr0, xr4, xr4, xr7, ptn3); //store S32STD(xr7, dst, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 6 static void pred4x4_horizontal_down_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src - 0x4; // load TOP S32LDDR(xr8, top, 0x0); // xr8[31:0]: t0, t1, t2, t3 ; S32LDDR(xr15, topleft, -0x4);// xr15[7:0]: lt ; S32LDD(xr9, topleft, -0x4); // xr9[31:24]: lt ; S32ALNI(xr10, xr15, xr8, ptn3); //xr10[31:0]: lt, t0, t1, t2 ; // load LEFT S32LDDR(xr1, src_left, 0x0); // xr1[7:0] <- src_left[3] (l0) ; S32LDIVR(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[7:0] <- src_left[stride+3] (l1) ; S32LDIVR(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[7:0] <- src_left[2*stride+3] (l2) ; S32LDIVR(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[7:0] <- src_left[3*stride+3] (l3) ; S32SFL(xr0, xr2, xr1, xr5, ptn2); // xr5[15:0] <- l1, l0 ; S32SFL(xr0, xr4, xr3, xr6, ptn2); // xr6[15:0] <- l3, l2 ; S32SFL(xr0, xr6, xr5, xr7, ptn3); // xr7[31:0] <- l3, l2, l1, l0 ; // ALNI for CAL S32ALNI(xr11, xr7, xr9, ptn1); // xr11: l2, l1, l0, lt ; S32ALNI(xr12, xr1, xr10, ptn3); // xr12: l0, lt, t0, t1 ; D32SLL(xr0, xr0, xr11, xr13, 0x8); // xr13: l1, l0, lt, 0 ; // CAL Q8AVGR(xr1, xr11, xr7); // xr1: src[0,3], src[0,2]/src[2,3], src[0,1]/src[2,2], src[0,0]/src[2,1] ; Q8AVG(xr2, xr12, xr8); Q8AVGR(xr3, xr2, xr10); // xr3: src[1,0]/src[3,1], src[2,0], src[3,0], ~ ; Q8AVG(xr4, xr13, xr7); Q8AVGR(xr5, xr4, xr11); // xr5: src[1,3], src[1,2]/src[3,3], src[1,1]/src[3,2], ~ ; // ALNI for STORE S32ALNI(xr8, xr1, xr3, ptn3); // xr8: src[0,0]/src[2,1], src[1,0]/src[3,1], src[2,0], src[3,0] ; S32SFL(xr9, xr1, xr5, xr10, ptn0); // xr9: src[0,3], src[1,3], src[0,2]/src[2,3], src[1,2]/src[3,3] ; //xr10: src[0,1]/src[2,2], src[1,1]/src[3,2], src[0,0]/src[2,1], ~ ; S32SFL(xr11, xr10, xr8, xr0, ptn3); // xr11: src[0,1]/src[2,2], src[1,1]/src[3,2], // src[0,0]/src[2,1], src[1,0]/src[3,1] ; S32ALNI(xr12, xr9, xr10, ptn2); // xr12: src[0,2]/src[2,3], src[1,2]/src[3,3], // src[0,1]/src[2,2], src[1,1]/src[3,2] ; // STORE S32STDR(xr8, dst, 0x0); S32SDIVR(xr11, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIVR(xr12, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIVR(xr9, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 8 static void pred4x4_horizontal_up_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src - 0x4; //load S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr6[31:16] <- l3, l2 ; S32SFL(xr1, xr6, xr5, xr0, ptn3); // xr1[31: 0] <- l3, l2, l1, l0 ; D32SLL(xr2, xr1, xr0, xr0, 0x8); // xr2: l2, l1, l0, 0 ; S32SFL(xr3, xr1, xr1, xr0, ptn0); // xr3: l3, l3, l2, l2; Q8AVGR(xr4, xr1, xr2); // xr4: src[2,1]/src[0,2], src[2,0]/src[0,1], src[0,0], ~ ; Q8AVG(xr5, xr2, xr3); Q8AVGR(xr6, xr5, xr1); // xr6: src[3,1]/src[1,2], src[3,0]/src[1,1], src[1,0], ~ ; S32SFL(xr7, xr6, xr4, xr0, ptn0); // xr7: src[3,1]/src[1,2], src[2,1]/src[0,2], // src[3,0]/src[1,1], src[2,0]/src[0,1]; D32SLR(xr8, xr4, xr6, xr9, 0x8); // xr8: 0, src[2,1]/src[0,2], src[2,0]/src[0,1], src[0,0] ; // xr9: 0, src[3,1]/src[1,2], src[3,0]/src[1,1], src[1,0] ; S32SFL(xr0, xr9, xr8, xr10, ptn0); // xr10: src[3,0], src[2,0], src[1,0], src[0,0] ; S32SFL(xr11, xr3, xr7, xr0, ptn3); // xr11: l3, l3, src[3,1]/src[1,2], src[2,1]/src[0,2] ; S32SFL(xr12, xr3, xr3, xr0, ptn3); // xr12: l3, l3, l3, l3 ; //store S32STD(xr10, dst, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr11, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr12, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
//---------- other DC modes ------------ static void pred4x4_left_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src - 0x4; //load S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr6[31:16] <- l3, l2 ; S32SFL(xr7, xr6, xr5, xr0, ptn3); // xr7[31: 0] <- l3, l2, l1, l0 ; //avg D8SUMC(xr2, xr0, xr7); D32SLR(xr8, xr2, xr0, xr0, 0x2); S32SFL(xr0, xr8, xr8, xr9, ptn0); S32SFL(xr0, xr9, xr9, xr1, ptn3); //store S32STD(xr1, dst, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 3 static void pred16x16_plane_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ int i, j, k, a; uint8_t *src_top; // top address uint8_t *src_topleft, *src_left; // left address src_top = top; src_topleft = src_top - 0x14; src_left = src - 0x4; //----- H, LOAD ----- S32LDD(xr1, src_top, -0x14); // xr1 <- src_top[-4]; xr1: lt, 0, 0, 0 ; S32LDD(xr5, src_top, 0x0); // xr5 <- src_top[0] ; xr5: t3, t2, t1, t0 ; S32LDD(xr2, src_top, 0x4); // xr2 <- src_top[4] ; xr2: t7, t6, t5, t4 ; S32LDDR(xr3, src_top, 0x8); // xr3 <- src_top[8] ; xr3: t8, t9, t10, t11 ; S32LDDR(xr4, src_top, 0xc); // xr4 <- src_top[12]; xr4: t12, t13, t14, t15 ; S32ALNI(xr1, xr5, xr1, ptn1); // xr1: t2, t1, t0, lt ; S32ALNI(xr2, xr2, xr5, ptn1); // xr2: t6, t5, t4, t3 ; ---xr5 is free to use ; S32I2M(xr9, MUL_12); // xr9 : 0x00010002 ; S32I2M(xr10, MUL_34); // xr10: 0x00030004 ; //----- H, SUM ----- Q8ADDE_SS(xr5, xr3, xr2, xr6); // xr5[31:16] <- t8-t6 ; xr5[15:0] <- t9-t5 ; // xr6[31:16] <- t10-t4; xr6[15:0] <- t11-t3; S32I2M(xr11, MUL_56); // xr11: 0x00050006 ; D16MUL_WW(xr13, xr9, xr5, xr14); // xr13 <- 1*(t8-t6) ; xr14 <- 2*(t9-t5) ; D16MAC_AA_WW(xr13, xr10, xr6, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4) ; xr14 <- 2*(t9-t5)+4*(t11-t3) ; Q8ADDE_SS(xr5, xr4, xr1, xr6); // xr5[31:16] <- t12-t2; xr5[15:0] <- t13-t1; // xr6[31:16] <- t14-t0; xr6[15:0] <- t15-lt; S32I2M(xr12, MUL_78); // xr12: 0x00070008 ; D16MAC_AA_WW(xr13, xr11, xr5, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2) ; // xr14 <- 2*(t9-t5)+4*(t11-t3)+6*(t13-t1) ; D16MAC_AA_WW(xr13, xr12, xr6, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2)+7*(t14-t0) ; // xr14 <- 2*(t9-t5)+4*(t11-t3)+6*(t13-t1)+8*(t15-lt) ; S32LDD(xr1, src_topleft, 0x0); // xr1[31:24] <- src_topleft[3] (lt) ; S32LDD(xr2, src_left, 0x0); // xr2[31:24] <- src_topleft[stride+3] (l0) ; D32ADD_AA(xr15, xr13, xr14, xr0); // xr15 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2)+7*(t14-t0) // + 2*(t9-t5)+4*(t11-t3)+6*(t13-t1)+8*(t15-lt) ; //----- V, LOAD ----- // S32LDD(xr1, src_topleft, 0x0); // xr1[31:24] <- src_topleft[3] (lt) ; // S32LDIV(xr2, src_topleft, stride, 0x0); // xr2[31:24] <- src_topleft[stride+3] (l0) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ; S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_topleft[3*stride+3] (l2) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l0, lt ; S32SFL(xr6, xr8, xr3, xr0, ptn2); // xr8[31:16] <- l2, l1 ; S32SFL(xr7, xr6, xr5, xr0, ptn3); // xr7[31: 0] <- l2, l1, l0, lt ; S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr8, xr3, xr0, ptn2); S32SFL(xr13, xr6, xr5, xr0, ptn3); // xr13[31:0] <- l6, l5, l4, l3 ; src_left += MB_LUMA_EDGED_WIDTH; S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr6, xr8, xr3, xr0, ptn2); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr14, xr6, xr5, xr0, ptn3); // xr14[31:0] <- l8, l9, l10, l11 ; S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr6, xr8, xr3, xr0, ptn2); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr1, xr6, xr5, xr0, ptn3); // xr1[31: 0] <- l12, l13, l14, l15 ; //----- V, SUM ----- Q8ADDE_SS(xr5, xr14, xr13, xr6); Q8ADDE_SS(xr2, xr1, xr7, xr3); D16MUL_WW(xr13, xr9, xr5, xr14); D16MAC_AA_WW(xr13, xr10, xr6, xr14); D16MAC_AA_WW(xr13, xr11, xr2, xr14); D16MAC_AA_WW(xr13, xr12, xr3, xr14); D32SLR(xr2, xr11, xr12, xr3, 0x8); // xr2: 0x00000500 ; xr3: 0x00000700 ; D32SLR(xr11, xr2, xr3, xr12, 0x8); //xr11: 0x00000005 ; xr12: 0x00000007 ; D32ADD_AA(xr14, xr13, xr14, xr0); // xr14 <- 1*(l8-l6)+3*(l10-l4)+5*(l12-l2)+7*(l14-l0) // + 2*(l9-l5)+4*(l11-l3)+6*(l13-l1)+8*(l15-lt) ; //----- P, CAL ----- // D32SLR(xr2, xr11, xr12, xr3, 0x8); // xr2: 0x00000500 ; xr3: 0x00000700 ; // D32SLR(xr11, xr2, xr3, xr12, 0x8); //xr11: 0x00000005 ; xr12: 0x00000007 ; D16MUL_WW(xr0, xr15, xr11, xr2); // xr2: 5*H ; D16MUL_WW(xr0, xr14, xr11, xr3); // xr3: 5*V ; D32SLR(xr8, xr11, xr0, xr0, 0x2); // xr8: 0x00000001 ; D32SLL(xr13, xr8, xr0, xr0, 0x5); //xr13: 0x00000020 ; Q8ACCE_AA(xr0, xr1, xr4, xr8); // xr8[15:0]: src1[0] + src2[16] + 1 D32ADD_AA(xr5, xr2, xr13, xr0); // xr5: 5*H+32 ; D32ADD_AA(xr6, xr3, xr13, xr0); // xr6: 5*V+32 ; D32SLR(xr2, xr5, xr6, xr3, 0x6); // xr2: ( 5*H+32 ) >> 6 ; xr3: ( 5*V+32 ) >> 6 ; // Q8ACCE_AA(xr0, xr1, xr4, xr8); // xr8[15:0]: src1[0] + src2[16] + 1 D32SLL(xr5, xr8, xr0, xr0, 0x4); // xr5[15:0]: 16*(src1[0] + src2[16] + 1) Q16ADD_AA_WW(xr7, xr2, xr3, xr0); // xr7: V+H // S32NOR(xr0, xr0, xr0); // idle S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; D16MUL_WW(xr0, xr7, xr12, xr8); // xr8: 7*(V+H) S32SFL(xr0, xr3, xr3, xr14, ptn3); // xr14[31:16]: V ; xr14[15:0]: V ; D32SLL(xr7, xr2, xr0, xr0, 0x1); Q16ADD_SS_WW(xr9, xr5, xr8, xr0); // xr9: 16*(src1[0] + src2[16] + 1) - 7*(V+H) S32SFL(xr0, xr9, xr9, xr5, ptn3); // xr5[31:16]: a ; xr5[15:0]: a ; // S32SFL(xr0, xr3, xr3, xr14, ptn3); // xr14[31:16]: V ; xr14[15:0]: V ; // D32SLL(xr7, xr2, xr0, xr0, 0x1); S32SFL(xr0, xr7, xr7, xr8, ptn3); // xr8[31:16]: 2H ; xr8[15:0]: 2H ; S32AND(xr2, xr4, xr2); Q16ADD_AA_WW(xr15, xr5, xr2, xr0); // xr15[31:16]: a ; xr15[15:0]: a + H ; dst -= MB_LUMA_EDGED_WIDTH; //----- SRC, STORE ----- for (i=0; i<16; i++) { Q16ADD_AA_WW(xr1, xr15, xr8, xr0); Q16ADD_AA_WW(xr2, xr1, xr8, xr0); Q16SAR(xr9, xr15, xr1, xr1, 0x5); Q16ADD_AA_WW(xr3, xr2, xr8, xr0); Q16SAT(xr10, xr9, xr1); Q16ADD_AA_WW(xr4, xr3, xr8, xr0); Q16SAR(xr2, xr2, xr3, xr3, 0x5); Q16ADD_AA_WW(xr5, xr4, xr8, xr0); Q16SAT(xr11, xr2, xr3); Q16ADD_AA_WW(xr6, xr5, xr8, xr0); Q16SAR(xr4, xr4, xr5, xr5, 0x5); Q16ADD_AA_WW(xr7, xr6, xr8, xr0); Q16SAR(xr6, xr6, xr7, xr7, 0x5); Q16SAT(xr12, xr4, xr5); Q16SAT(xr13, xr6, xr7); S32SDIVR(xr10, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32STDR(xr11, dst, 0x4); S32STDR(xr12, dst, 0x8); // S32STDR(xr13, dst, 0xc); Q16ADD_AA_WW(xr15, xr15, xr14, xr0); S32STDR(xr13, dst, 0xc); } }
void fft_calc_fix_inverse(FFTContext_fix *s, FFTComplex_fix *z) { int ln = s->nbits; int j, np, np2; int nblocks, nloops; register FFTComplex_fix *p, *q; FFTComplex_fix *exptab = s->exptab; int l; FFTSample_fix tmp_re, tmp_im; np = 1 << ln; /* function is :butterfly all 4 step ,N=16 */ /* pass 0 */ #if 0 p=&z[0]; j=(np >> 1); do { /* X(k) = G(k)+H(k)*W (= e j*0) */ FFT_BF_fix(p[0].re, p[0].im, p[1].re, p[1].im, p[0].re, p[0].im, p[1].re, p[1].im); p+=2; } while (--j); #endif /* pass 1 */ p=&z[0]; j=np >> 2; do { #if 1 S32LDD(xr1,p,0); S32LDD(xr2,p,4); S32LDD(xr3,p,8); S32LDD(xr4,p,12); S32LDD(xr5,p,16); S32LDD(xr6,p,20); S32LDD(xr7,p,24); S32LDD(xr8,p,28); D32ADD_AS(xr1,xr1,xr3,xr3); D32ADD_AS(xr2,xr2,xr4,xr4); D32ADD_AS(xr5,xr5,xr7,xr7); D32ADD_AS(xr6,xr6,xr8,xr8); D32ADD_AS(xr1,xr1,xr5,xr5); D32ADD_AS(xr2,xr2,xr6,xr6); D32ADD_SA(xr3,xr3,xr8,xr9); D32ADD_AS(xr4,xr4,xr7,xr8); S32STD(xr1,p,0); S32STD(xr2,p,4); S32STD(xr3,p,8); S32STD(xr4,p,12); S32STD(xr5,p,16); S32STD(xr6,p,20); S32STD(xr9,p,24); S32STD(xr8,p,28); #else FFT_BF_fix(p[0].re, p[0].im, p[1].re, p[1].im, p[0].re, p[0].im, p[1].re, p[1].im); FFT_BF_fix(p[2].re, p[2].im, p[3].re, p[3].im, p[2].re, p[2].im, p[3].re, p[3].im); FFT_BF_fix(p[0].re, p[0].im, p[2].re, p[2].im, p[0].re, p[0].im, p[2].re, p[2].im); FFT_BF_fix(p[1].re, p[1].im, p[3].re, p[3].im, p[1].re, p[1].im, -p[3].im, p[3].re); #endif p+=4; } while (--j); /* pass 2 .. ln-1 */ nblocks = np >> 3; nloops = 1 << 2; np2 = np >> 1; do { p = z; q = z + nloops; for (j = 0; j < nblocks; ++j) { #if 1 S32LDD(xr1,p,0); S32LDD(xr2,p,4); S32LDD(xr3,q,0); S32LDD(xr4,q,4); D32ADD_AS(xr1,xr1,xr3,xr3); D32ADD_AS(xr2,xr2,xr4,xr4); S32STD(xr1,p,0); S32STD(xr2,p,4); S32STD(xr3,q,0); S32STD(xr4,q,4); #else FFT_BF_fix(p->re, p->im, q->re, q->im, p->re, p->im, q->re, q->im); #endif p++; q++; for(l = nblocks; l < np2; l += nblocks) { /* FFT_CMUL_fix( ) fuction is : (-j 2*PI/N *km) H(i) * E */ #if 1 FFTSample_fix _are = exptab[l].re; FFTSample_fix _bre = q->re; FFTSample_fix _aim = exptab[l].im; FFTSample_fix _bim = q->im; S32MUL(xr1, xr2, _are, _bre); S32MUL(xr5, xr6, _are, _bim); S32LDD(xr7,p,0); S32MSUB(xr1, xr2, _aim, _bim); S32MADD(xr5, xr6, _aim, _bre); S32LDD(xr8,p,4); D32SLL(xr1, xr1, xr5, xr5, 1); D32ADD_AS(xr7,xr7,xr1,xr1); D32ADD_AS(xr8,xr8,xr5,xr5); S32STD(xr7,p,0); S32STD(xr8,p,4); S32STD(xr1,q,0); S32STD(xr5,q,4); #else FFT_CMUL_fix(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im); FFT_BF_fix(p->re, p->im, q->re, q->im, p->re, p->im, tmp_re, tmp_im); #endif p++; q++; } p += nloops; q += nloops; } nblocks = nblocks >> 1; nloops = nloops << 1; } while (nblocks); }
// MODE 3 static void pred8x8_plane_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ unsigned int i; uint8_t *src_top; // top address uint8_t *src_topleft, *src_left; // left address src_top = top; src_topleft = src_top - 0x1c; src_left = src - 0x4; //----- H, LOAD ----- S32LDD(xr1, src_top, -0x1c); // xr1 <- src_top[-4]; xr1: lt, 0, 0, 0 ; S32LDD(xr3, src_top, 0x0); // xr3 <- src_top[0] ; xr3: t3, t2, t1, t0 ; S32LDDR(xr2, src_top, 0x4); // xr2 <- src_top[4] ; xr2: t4, t5, t6, t7 ; S32ALNI(xr1, xr3, xr1, ptn1);// xr1: t2, t1, t0, lt ; S32I2M(xr8, MUL_12); // xr8: 0x00010002 ; S32I2M(xr9, MUL_34); // xr9: 0x00030004 ; //----- H, SUM ----- Q8ADDE_SS(xr3, xr2, xr1, xr4); // xr3[31:16] <- t4-t2 ; xr3[15:0] <- t5-t1 ; // xr4[31:16] <- t6-t0 ; xr4[15:0] <- t7-lt; S32LDD(xr1, src_topleft, 0x0); // xr1[31:24] <- src_topleft[3] (lt) ; D16MUL_WW(xr5, xr8, xr3, xr6); // xr5 <- 1*(t4-t2) ; xr6 <- 2*(t5-t1) ; D16MAC_AA_WW(xr5, xr9, xr4, xr6); // xr5 <- 1*(t4-t2)+3*(t6-t0) ; xr6 <- 2*(t5-t1)+4*(t7-lt) ; S32LDD(xr12, src_left, 0x0);//xr12[31:24] <- src_topleft[stride+3] (l0) ; S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ; D32ADD_AA(xr7, xr5, xr6, xr0); // xr7 <- 1*(t4-t2)+3*(t6-t0)+2*(t5-t1)+4*(t7-lt) ; //----- V, LOAD ----- // S32LDD(xr1, src_topleft, 0x0); // xr1[31:24] <- src_topleft[3] (lt) ; // S32LDIV(xr12, src_topleft, stride, 0x0);//xr12[31:24] <- src_topleft[stride+3] (l0) ; // S32LDIV(xr3, src_topleft, stride, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ; S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_topleft[3*stride+3] (l2) ; S32SFL(xr5, xr12, xr1, xr0, ptn2); // xr5[31:16] <- l0, lt ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr8[31:16] <- l2, l1 ; S32SFL(xr10, xr6, xr5, xr0, ptn3); // xr10[31:0] <- l2, l1, l0, lt ; src_left += MB_CHROM_EDGED_WIDTH; S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr12, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr1, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr5, xr12, xr1, xr0, ptn2); S32SFL(xr11, xr6, xr5, xr0, ptn3); // xr11[31:0] <- l4, l5, l6, l7 ; //----- V, SUM ----- Q8ADDE_SS(xr3, xr11, xr10, xr4); S32LUI(xr1, 0x1, ptn0); // xr1[31:0]: 0x00000001 ; D16MUL_WW(xr5, xr8, xr3, xr6); D16MAC_AA_WW(xr5, xr9, xr4, xr6); D32ADD_AA(xr13, xr5, xr6, xr0); // xr13 <- 1*(l4-l2)+3*(l6-l0)+2*(l5-l1)+4*(l7-lt) ; //----- P, CAL ----- useful XRs:xr13, xr7, xr2, xr11; // S32LUI(xr1, 0x1, ptn0); // xr1[31:0]: 0x00000001 ; D32SLL(xr5, xr1, xr1, xr6, 0x4); // xr5: 0x00000010; xr6: 0x00000010; D32SLL(xr3, xr13, xr7, xr4, 0x4); D32ACC_AA(xr5, xr13, xr3, xr0); // xr5: 17*V+16 D32ACC_AA(xr6, xr7, xr4, xr0); // xr6: 17*H+16 Q8ACCE_AA(xr0, xr2, xr11, xr1); // xr1[15:0]: src1[0] + src2[8] + 1 D32SLR(xr8, xr5, xr6, xr9, 0x5); // xr8: (17*V+16) >> 5 ; xr9: (17*H+16) >> 5 ; // Q8ACCE_AA(xr0, xr2, xr11, xr1); // xr1[15:0]: src1[0] + src2[8] + 1 D32SLL(xr2, xr1, xr0, xr0, 0x4); // xr2[15:0]: 16*(src1[0] + src2[16] + 1) Q16ADD_AA_WW(xr7, xr8, xr9, xr0); // xr7: V+H S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; D32SLL(xr12, xr7, xr0, xr0, 0x1); D32ADD_AA(xr5, xr12, xr7, xr0); // xr5: 3*(V+H) // S32LUI(xr12, 0x3, ptn0); // xr12[31:0]: 0x00000003 ; // D16MUL_WW(xr0, xr7, xr12, xr5); // xr5: 3*(V+H) // S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; Q16ADD_SS_WW(xr6, xr2, xr5, xr0); // xr6: 16*(src1[0] + src2[16] + 1) - 3*(V+H) // S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; S32SFL(xr0, xr8, xr8, xr14, ptn3);// xr14[31:16]: V ; xr14[15:0]: V ; S32SFL(xr0, xr6, xr6, xr5, ptn3); // xr5[31:16]: a ; xr5[15:0]: a ; D32SLL(xr7, xr9, xr0, xr0, 0x1); S32SFL(xr0, xr7, xr7, xr8, ptn3); // xr8[31:16]: 2H ; xr8[15:0]: 2H ; // S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; S32AND(xr9, xr4, xr9); Q16ADD_AA_WW(xr15, xr5, xr9, xr0); // xr15[31:16]: a ; xr15[15:0]: a + H ; dst -= MB_CHROM_EDGED_WIDTH; //----- SRC, STORE ----- for (i=0; i<8; i++) { Q16ADD_AA_WW(xr1, xr15, xr8, xr0); Q16ADD_AA_WW(xr2, xr1, xr8, xr0); Q16SAR(xr9, xr15, xr1, xr1, 0x5); Q16ADD_AA_WW(xr3, xr2, xr8, xr0); Q16SAT(xr10, xr9, xr1); // Q16SAR(xr9, xr15, xr1, xr1, 0x5); Q16SAR(xr2, xr2, xr3, xr3, 0x5); // Q16SAT(xr10, xr9, xr1); Q16SAT(xr11, xr2, xr3); S32SDIVR(xr10, dst, MB_CHROM_EDGED_WIDTH, 0x0); Q16ADD_AA_WW(xr15, xr15, xr14, xr0); S32STDR(xr11, dst, 0x4); } }
static void ff_vp3_idct_add_mxu(uint8_t *src, int stride, DCTELEM *input, uint8_t idct_row) { int i; DCTELEM *blk; int32_t wf = (int32_t)whirl_idct; S32LDD(xr5, wf, 0x0); // xr5(w7, w3) S32LDD(xr6, wf, 0x4); // xr6(w9, w8) S32LDD(xr7, wf, 0x8); // xr7(w11,w10) S32LDD(xr8, wf, 0xc); // xr8(w13,w12) S32LDD(xr9, wf, 0x10); // xr9(w6, w0) S32LDD(xr10,wf, 0x14); blk = input - 8; /* Inverse DCT on the rows now */ for (i=0; i<idct_row; i++) { S32LDI(xr1, blk, 0x10); // xr1 (x4, x0) S32LDD(xr2, blk, 0x4); // xr2 (x7, x3) S32LDD(xr3, blk, 0x8); // xr3 (x6, x1) S32LDD(xr4, blk, 0xc); // xr4 (x5, x2) S32OR(xr12, xr2,xr3); S32OR(xr11,xr12,xr4); S32OR(xr12,xr11,xr1); if (S32M2I(xr12) == 0) { continue; //blk[0]= blk[1]=blk[2]=blk[3]=blk[4]=blk[5]=blk[6]=blk[7]=0 } S32SFL(xr12,xr0,xr1,xr13,ptn3); S32OR(xr11,xr11,xr12); if (S32M2I(xr11) == 0 && S32M2I(xr13) != 0) { D16MUL_HW(xr0,xr5,xr13,xr13); D32SAR(xr0,xr0,xr13,xr13,15); S32SFL(xr0,xr13,xr13,xr13,ptn3); S32STD(xr13,blk, 0x0); S32STD(xr13,blk, 0x4); S32STD(xr13,blk, 0x8); S32STD(xr13,blk, 0xc); continue; //blk[0]!=0, and blk[1]=blk[2]=blk[3]=blk[4]=blk[5]=blk[6]=blk[7]=0 } S32SFL(xr1,xr1,xr2,xr2, ptn3); //xr1:s1, s3, xr2: s0, s2 S32SFL(xr3,xr3,xr4,xr4, ptn3); //xr3:s5, s7, xr4: s4, s6 D16MUL_WW(xr11, xr2, xr5, xr12);//xr11: s0*c4, xr12: s2*c2 D16MAC_AA_WW(xr11,xr4,xr6,xr12);//xr11: s0*c4+s4*c4, xr12: s2*c2+s6*c6 D16MUL_WW(xr13, xr2, xr6, xr14);//xr13: s0*c4, xr14: s2*c6 D16MAC_SS_WW(xr13,xr4,xr5,xr14);//xr13: s0*c4 - s4*c4, xr14: s2*c6-s6*c2 D16MUL_HW(xr2, xr1, xr7, xr4); //xr2: s1*c1, xr4: s1*c3 D16MAC_AS_LW(xr2,xr1,xr9,xr4); //xr2: s1*c1+s3*c3, xr4: s1*c3-s3*c7 D16MAC_AS_HW(xr2,xr3,xr10,xr4); //xr2: s1*c1+s3*c3+s5*c5, // xr4: s1*c3-s3*c7-s5*c1 D16MAC_AS_LW(xr2,xr3,xr8,xr4); //xr2: s1*c1+s3*c3+s5*c5+s7*c7, //xr4: s1*c3-s3*c7-s5*c1-s7*c5 D32SAR(xr11, xr11,xr13,xr13,15); S32SFL(xr0, xr11,xr13,xr11,ptn3); D32SAR(xr12,xr12,xr14,xr14,15); S32SFL(xr0, xr12,xr14,xr12,ptn3); D32SAR(xr2, xr2,xr4,xr4,15); S32SFL(xr0, xr2,xr4,xr2,ptn3); D16MUL_HW(xr4, xr1, xr8, xr15); //xr4: s1*c7, xr15:s1*c5 D16MAC_SS_LW(xr4,xr1,xr10,xr15); //xr4: s1*c7-s3*c5, xr15: s1*c5-s3*c1 D16MAC_AA_HW(xr4,xr3,xr9,xr15); //xr4: s1*c7-s3*c5+s5*c3, xr15: s1*c5-s3*c1+s5*c7 D16MAC_SA_LW(xr4,xr3,xr7,xr15); //xr4: s1*c7-s3*c5+s5*c3-s7*c1 //xr15: s1*c5-s3*c1+s5*c7+s7*c3 Q16ADD_AS_WW(xr11,xr11,xr12,xr12); //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 // rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 //xr12: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15 // rnd(s0*c4-s4*c4)>>15-rnd(s2*c6-s6*c2)>>15 D32SAR(xr15,xr15,xr4,xr4,15); S32SFL(xr0,xr15,xr4,xr15,ptn3); Q16ADD_AS_WW(xr11, xr11, xr2, xr2); //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15 // : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15 //xr2: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 - rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15 // : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 - rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15 Q16ADD_AS_XW(xr12, xr12, xr15, xr15); //xr12: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15 // : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15 //xr15: rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15-rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15 // : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15-rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15 S32SFL(xr11,xr11,xr12,xr12, ptn3); //xr11: rnd(s0*c4+s4*c4)>>15+rnd(s2*c2+s6*c6)>>15 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>15 // : rnd(s0*c4+s4*c4)>>15-rnd(s2*c2+s6*c6)>>15+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>15 //xr12: rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>15 // : rnd(s0*c4-s4*c4)>>15+rnd(s2*c6-s6*c2)>>15+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>15 S32SFL(xr12,xr12,xr11,xr11, ptn3); //xr12: rnd(s0*c4-s4*c4)>>16+rnd(s2*c6-s6*c2)>>16 + rnd(s1*c3-s3*c7-s5*c1-s7*c5)>>16 // : rnd(s0*c4+s4*c4)>>16+rnd(s2*c2+s6*c6)>>16 + rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16 //xr11: rnd(s0*c4-s4*c4)>>16+rnd(s2*c6-s6*c2)>>16+rnd(s1*c7-s3*c5+s5*c3-s7*c1)>>16 // : rnd(s0*c4+s4*c4)>>16-rnd(s2*c2+s6*c6)>>16+rnd(s1*c5-s3*c1+s5*c7+s7*c3)>>16 S32STD(xr12, blk, 0x0); S32STD(xr11, blk, 0x4); S32STD(xr15, blk, 0x8); S32STD(xr2, blk, 0xc); } blk = input - 2; for (i=0; i<4; i++) /* idct columns */ { S32I2M(xr5,wxr5); //xr5: c4 , c2 S32I2M(xr6,wxr6); //xr5: c4 , c2 S32LDI(xr1, blk, 0x4); //xr1: ss0, s0 S32LDD(xr3, blk, 0x20); //xr3: ss2, s2 S32LDD(xr11, blk, 0x40); //xr11: ss4, s4 S32LDD(xr13, blk, 0x60); //xr13: ss6, s6 D16MUL_HW(xr15, xr5, xr1, xr2); //xr15: ss0*c4, xr9: s0*c4 D16MAC_AA_HW(xr15,xr5,xr11,xr2); //xr15: ss0*c4+ss4*c4, xr9: s0*c4+s4*c4 D16MUL_LW(xr10, xr5, xr3, xr9); //xr10: ss2*c2, xr9: s2*c2 D16MAC_AA_LW(xr10,xr6,xr13,xr9); //xr10: ss2*c2+ss6*c6, xr9: s2*c2+s6*c6 D32SAR(xr15,xr15,xr2,xr2,15); S32SFL(xr0,xr15,xr2,xr15,ptn3); //xr15: (ss0*c4+ss4*c4)>>15 D32SAR(xr10,xr10,xr9,xr9,15); S32SFL(xr0,xr10,xr9,xr10,ptn3); //xr10: (ss2*c2+ss6*c6)>>15 S32LDD(xr2, blk, 0x10); //xr2: ss1, s1 S32LDD(xr4, blk, 0x30); //xr4: ss3, s3 Q16ADD_AS_WW(xr15,xr15,xr10,xr9); //xr15: rnd(ss0*c4+ss4*c4)>>15+rnd(ss2*c2+ss6*c6)>>15 // :rnd(s0*c4+s4*c4)>>15 + rnd(s2*c2 + s6*c6)>>15 //xr9: rnd(ss0*c4+ss4*c4)>>15 - rnd(ss2*c2+ss6*c6)>>15 // : rnd(s0*c4+s4*c4)>>15 - rnd(s2*c2 + s6*c6)>>15 D16MUL_HW(xr10, xr5, xr1, xr1); //xr10: ss0*c4, xr1: s0*c4 D16MAC_SS_HW(xr10,xr5,xr11,xr1); //xr10: ss0*c4-ss4*c4, xr1: s0*c4 - s4*c4 D16MUL_LW(xr11, xr6, xr3, xr12); //xr11: ss2*c6, xr1: s2*c6 D16MAC_SS_LW(xr11,xr5,xr13,xr12); //xr11: ss2*c6-ss6*c2, xr1: s2*c6-s6*c2 D32SAR(xr10,xr10,xr1,xr1,15); S32SFL(xr0,xr10,xr1,xr10,ptn3); //xr10: (ss0*c4-ss4*c4)>>15 // : (s0*c4 - s4*c4)>>15 D32SAR(xr11,xr11,xr12,xr12,15); S32SFL(xr0,xr11,xr12,xr11,ptn3); //xr11:(ss2*c6-ss6*c2)>>15 // :(s2*c6-s6*c2)>>15 S32LDD(xr12, blk, 0x50); //xr12: ss5, s5 S32LDD(xr14, blk, 0x70); //xr14: ss7, s7 Q16ADD_AS_WW(xr10,xr10,xr11,xr1); //xr10: rnd(ss0*c4-ss4*c4)>>15)+rnd(ss2*c6-ss6*c2)>>15 // : rnd(s0*c4 - s4*c4)>>15 +rnd(s2*c6 - s6*c2)>>15 //xr1 : rnd(ss0*c4-ss4*c4)>>15-rnd(ss2*c6-ss6*c2)>>15 // : rnd(s0*c4 - s4*c4)>>15-rnd(s2*c6 - s6*c2)>>15 D16MUL_HW(xr11, xr7, xr2, xr13); //xr11: ss1*c1, xr13: s1*c1 D16MAC_AA_LW(xr11,xr7,xr4,xr13); //xr11: ss1*c1+ss3*c3, xr13: s1*c1+s3*c3 D16MAC_AA_LW(xr11,xr8,xr12,xr13); //xr11: ss1*c1+ss3*c3+ss5*c5 //xr13: s1*c1+s3*c3+s5*c5 D16MAC_AA_HW(xr11,xr8,xr14,xr13); //xr11: ss1*c1+ss3*c3+ss5*c5+ss7*c7 //xr13: s1*c1+s3*c3+s5*c5+s7*c7 D16MUL_LW(xr3, xr7, xr2, xr5); //xr3: ss1*c3, xr13: s1*c3 D16MAC_SS_HW(xr3,xr8,xr4,xr5); //xr3: ss1*c3-ss3*c7, xr13: s1*c3-s3*c7 D16MAC_SS_HW(xr3,xr7,xr12,xr5); //xr3: ss1*c3-ss3*c7-ss5*c1 //xr13: s1*c3-s3*c7-s5*c1 D16MAC_SS_LW(xr3,xr8,xr14,xr5); //xr3: ss1*c3-ss3*c7-ss5*c1-ss7*c5 //xr13: s1*c3-s3*c7-s7*c5 D32SAR(xr11,xr11,xr13,xr13,15); S32SFL(xr0,xr11,xr13,xr11,ptn3); //xr11: (ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>15 // : (s1*c1+s3*c3+s5*c5+s7*c7)>>15 D32SAR(xr3,xr3,xr5,xr5,15); S32SFL(xr0,xr3,xr5,xr3,ptn3); //xr3: (ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>15 // : (s1*c3-s3*c7-s7*c5)>>15 D16MUL_LW(xr5, xr8, xr2, xr13); //xr5: ss1*c5, xr13:s1*c5 D16MAC_SS_HW(xr5,xr7,xr4,xr13); //xr5: ss1*c5-ss3*c1, xr13:s1*c5-s3*c1 D16MAC_AA_HW(xr5,xr8,xr12,xr13); //xr5: ss1*c5-ss3*c1+ss5*c7 // : s1*c5 - s3*c1+ s5*c7 D16MAC_AA_LW(xr5,xr7,xr14,xr13); //xr5: ss1*c5-ss3*c1+ss5*c7+ss7*c1 // : s1*c5 - s3*c1+ s5*c7+ s7*c1 D16MUL_HW(xr2, xr8, xr2, xr6); //xr2: ss1*c7, xr13: s1*c7 D16MAC_SS_LW(xr2,xr8,xr4,xr6); //xr2: ss1*c7-ss3*c5, xr13: s1*c7-s3*c5 D16MAC_AA_LW(xr2,xr7,xr12,xr6); //xr2: ss1*c7-ss3*c5+ss5*c1 //xr13: s1*c7-s3*c5+s5*c1 D16MAC_SS_HW(xr2,xr7,xr14,xr6); //xr2: ss1*c7-ss3*c5+ss5*c1-ss7*c3 //xr13: s1*c7-s3*c5+s5*c1-s7*c3 D32SAR(xr5,xr5,xr13,xr13,15); S32SFL(xr0,xr5,xr13,xr5,ptn3); //xr5: (ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>15 // :(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>15 D32SAR(xr2,xr2,xr6,xr6,15); S32SFL(xr0,xr2,xr6,xr2,ptn3); //xr2:(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>15 // :(s1*c7-s3*c5+s5*c1-s7*c3)>>15 S32I2M(xr4, 0x00080008);//round value 8; Q16ADD_AS_WW(xr15,xr15,xr11,xr11); //xr15:rnd(ss0*c4+ss4*c4)>>16+rnd(ss2*c2+ss6*c6)>>16+ // rnd(ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>16 // rnd(s0*c4+s4*c4)>>16 + rnd(s2*c2 + s6*c6)>>16+ // rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16 //xr11:rnd(ss0*c4+ss4*c4)>>16+rnd(ss2*c2+ss6*c6)>>16- // rnd(ss1*c1+ss3*c3+ss5*c5+ss7*c7)>>16 // rnd(s0*c4+s4*c4)>>16 + rnd(s2*c2 + s6*c6)>>16- // rnd(s1*c1+s3*c3+s5*c5+s7*c7)>>16 Q16ADD_AS_WW(xr10,xr10,xr3,xr3); //xr10:rnd(ss0*c4-ss4*c4)>>16)+rnd(ss2*c6-ss6*c2)>>16+ // rnd(ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>16 // rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16+ // rnd(s1*c3-s3*c7-s7*c5)>>16 //xr10:rnd(ss0*c4-ss4*c4)>>16)+rnd(ss2*c6-ss6*c2)>>16- // rnd(ss1*c3-ss3*c7-ss5*c1-ss7*c5)>>16 // rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16- // rnd(s1*c3-s3*c7-s7*c5)>>16 Q16ADD_AS_WW(xr1,xr1,xr5,xr5); //xr1: rnd(ss0*c4-ss4*c4)>>16-rnd(ss2*c6-ss6*c2)>>16+ // rnd(ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>16 // rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16+ // rnd(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>16 //xr1: rnd(ss0*c4-ss4*c4)>>16-rnd(ss2*c6-ss6*c2)>>16- // rnd(ss1*c5-ss3*c1+ss5*c7+ss7*c1)>>16 // rnd(s0*c4 - s4*c4)>>16 +rnd(s2*c6 - s6*c2)>>16- // rnd(s1*c5 - s3*c1+ s5*c7+ s7*c1)>>16 Q16ADD_AS_WW(xr9,xr9,xr2,xr2); //xr9: rnd(ss0*c4+ss4*c4)>>16 - rnd(ss2*c2+ss6*c6)>>16+ // rnd(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>16 // rnd(s0*c4+s4*c4)>>16 - rnd(s2*c2 + s6*c6)>>16+ // rnd(s1*c7-s3*c5+s5*c1-s7*c3)>>16 //xr9: rnd(ss0*c4+ss4*c4)>>16 - rnd(ss2*c2+ss6*c6)>>16- // rnd(ss1*c7-ss3*c5+ss5*c1-ss7*c3)>>16 // rnd(s0*c4+s4*c4)>>16 - rnd(s2*c2 + s6*c6)>>16- // rnd(s1*c7-s3*c5+s5*c1-s7*c3)>>16 Q16ACCM_AA(xr15,xr4,xr4,xr10); Q16ACCM_AA(xr11,xr4,xr4,xr1); Q16ACCM_AA(xr9,xr4,xr4,xr2); Q16ACCM_AA(xr5,xr4,xr4,xr3); Q16SAR(xr15,xr15,xr10,xr10,4); Q16SAR(xr11,xr11,xr1,xr1,4); Q16SAR(xr9,xr9,xr2,xr2,4); Q16SAR(xr5,xr5,xr3,xr3,4); S32STD(xr15, blk, 0x00); S32STD(xr10, blk, 0x10); S32STD(xr1, blk, 0x20); S32STD(xr9, blk, 0x30); S32STD(xr2, blk, 0x40); S32STD(xr5, blk, 0x50); S32STD(xr3, blk, 0x60); S32STD(xr11, blk, 0x70); } blk = input - 8; src -= stride; for (i=0; i<8; i++) { S32LDIV(xr1, src, stride, 0x0); S32LDI(xr3, blk, 0x10); S32LDD(xr4, blk, 0x4); Q8ACCE_AA(xr4, xr1, xr0, xr3); S32LDD(xr2, src, 0x4); S32LDD(xr5, blk, 0x8); S32LDD(xr6, blk, 0xc); Q8ACCE_AA(xr6, xr2, xr0, xr5); Q16SAT(xr1, xr4, xr3); S32STD(xr1, src, 0x0); Q16SAT(xr2, xr6, xr5); S32STD(xr2, src, 0x4); } }
void add_acdc(MACROBLOCK * pMB, uint32_t block, int16_t dct_codes[64], uint32_t iDcScaler, int16_t predictors[8], const int bsversion) { uint8_t acpred_direction = pMB->acpred_directions[block]; int16_t *pCurrent = (int16_t*)pMB->pred_values[block]; uint32_t i; DPRINTF(XVID_DEBUG_COEFF,"predictor[0] %i\n", predictors[0]); dct_codes[0] += predictors[0]; /* dc prediction */ #if 0 pCurrent[0] = dct_codes[0]*iDcScaler; if (!bsversion || bsversion > BS_VERSION_BUGGY_DC_CLIPPING) { pCurrent[0] = CLIP(pCurrent[0], -2048, 2047); } #endif if (acpred_direction == 1) { for (i = 1; i < 8; i++) { int level = dct_codes[i] + predictors[i]; DPRINTF(XVID_DEBUG_COEFF,"predictor[%i] %i\n",i, predictors[i]); dct_codes[i] = level; // pCurrent[i] = level; // pCurrent[i + 7] = dct_codes[i * 8]; } } else if (acpred_direction == 2) { for (i = 1; i < 8; i++) { int level = dct_codes[i * 8] + predictors[i]; DPRINTF(XVID_DEBUG_COEFF,"predictor[%i] %i\n",i*8, predictors[i]); dct_codes[i * 8] = level; // pCurrent[i + 7] = level; // pCurrent[i] = dct_codes[i]; } } //else { // for (i = 1; i < 8; i++) { // pCurrent[i] = dct_codes[i]; // pCurrent[i + 7] = dct_codes[i * 8]; // } // } { S32LDD(xr1,dct_codes,0); S32LDD(xr2,dct_codes,4); S32LDD(xr3,dct_codes,8); S32LDD(xr4,dct_codes,12); S16LDD(xr5,dct_codes,16,0); S16LDD(xr6,dct_codes,48,0); S16LDD(xr7,dct_codes,80,0); S16LDD(xr8,dct_codes,112,0); S16LDD(xr5,dct_codes,32,1); S16LDD(xr6,dct_codes,64,1); S16LDD(xr7,dct_codes,96,1); S32STD(xr1,pCurrent,0); S32STD(xr2,pCurrent,4); S32STD(xr3,pCurrent,8); S32STD(xr4,pCurrent,12); S32STD(xr5,pCurrent,16); S32STD(xr6,pCurrent,20); S32STD(xr7,pCurrent,24); S32STD(xr8,pCurrent,28); } #if 1 pCurrent[0] = dct_codes[0]*iDcScaler; if (!bsversion || bsversion > BS_VERSION_BUGGY_DC_CLIPPING) { pCurrent[0] = CLIP(pCurrent[0], -2048, 2047); } #endif }
static void rv40_dequant4x4(DCTELEM *block,uint32_t *dst, int n) { int i; uint32_t src=block-4; uint32_t dst_t = dst-4; #if 0 for(i = 0; i < n; i++){ S32LDI(xr1,src,0x8); S32LDD(xr2,src,0x4); S32LDI(xr7,src,0x8); S32LDD(xr8,src,0x4); D16MUL_LW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr5,dst_t,0x8); S32STD(xr6,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr15,dst_t,0x8); S32STD(xr14,dst_t,0xc); } #else ///////////////////// if(n == 1){ S32LDI(xr1,src,0x8); S32LDI(xr2,src,0x8); S32LDI(xr7,src,0x8); S32LDI(xr8,src,0x8); D16MUL_XW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); S32SDI(xr5,dst_t,0x10); S32STD(xr6,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); //S32STD(xr15,dst_t,0x8); //S32STD(xr14,dst_t,0xc); S32SDI(xr15,dst_t,0x10); S32STD(xr14,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); } else if(n==2) { S32LDI(xr1,src,0x8); S32LDD(xr2,src,0x4); S32LDI(xr7,src,0x8); S32LDD(xr8,src,0x4); D16MUL_XW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr5,dst_t,0x8); S32STD(xr6,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr15,dst_t,0x8); S32STD(xr14,dst_t,0xc); S32SDI(xr0,dst_t,0x10); S32STD(xr0,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); S32SDI(xr0,dst_t,0x10); S32STD(xr0,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); } else { S32LDI(xr1,src,0x8); S32LDD(xr2,src,0x4); S32LDI(xr7,src,0x8); S32LDD(xr8,src,0x4); D16MUL_XW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr5,dst_t,0x8); S32STD(xr6,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr15,dst_t,0x8); S32STD(xr14,dst_t,0xc); S32LDI(xr1,src,0x8); S32LDD(xr2,src,0x4); S32LDI(xr7,src,0x8); S32LDD(xr8,src,0x4); D16MUL_LW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr5,dst_t,0x8); S32STD(xr6,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr15,dst_t,0x8); S32STD(xr14,dst_t,0xc); } #endif }