// MODE 2 static void pred8x8_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_left; // left address unsigned int i; src_left = src - 0x4; // load top S32LDD(xr11, top, 0x0); S32LDD(xr12, top, 0x4); // load left (4 x 7 = 28 instructions) S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr4[31:16] <- l3, l2 ; S32SFL(xr7, xr6, xr5, xr0, ptn3); // xr7[31: 0] <- l3, l2, l1, l0 ; S32LDIV(xr1, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr8, xr6, xr5, xr0, ptn3); // xr8[31:0] <- l7, l6, l5, l4 ; // AVG D8SUMC(xr1, xr11, xr7); Q16ADD_AA_XW(xr2, xr1, xr1, xr0); D32SLR(xr3, xr2, xr0, xr0, 0x3); S32SFL(xr4, xr3, xr3, xr0, ptn0); S32SFL(xr0, xr4, xr4, xr5, ptn3); D8SUMC(xr1, xr12, xr8); Q16ADD_AA_XW(xr2, xr1, xr1, xr0); D32SLR(xr3, xr2, xr0, xr0, 0x3); S32SFL(xr4, xr3, xr3, xr0, ptn0); S32SFL(xr0, xr4, xr4, xr6, ptn3); D32SLR(xr2, xr1, xr0, xr0, 0x2); S32SFL(xr3, xr2, xr2, xr4, ptn0); S32SFL(xr0, xr3, xr3, xr8, ptn3); S32SFL(xr0, xr4, xr4, xr9, ptn3); // store S32STD(xr5, dst, 0x0); S32STD(xr8, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr8, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr8, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr8, dst, 0x4); S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr9, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); }
static void pred8x8_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_top; // top address unsigned int i; src_top = top; // load top S32LDD(xr7, src_top, 0x0); S32LDD(xr8, src_top, 0x4); // AVG D8SUMC(xr1, xr7, xr8); D32SLR(xr2, xr1, xr0, xr0, 0x2); S32SFL(xr3, xr2, xr2, xr4, ptn0); S32SFL(xr0, xr3, xr3, xr5, ptn3); S32SFL(xr0, xr4, xr4, xr6, ptn3); // store S32STD(xr5, dst, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); S32SDIV(xr5, dst, MB_CHROM_EDGED_WIDTH, 0x0); S32STD(xr6, dst, 0x4); }
static void pred16x16_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_top; // top address unsigned int i; src_top = top; // load top S32LDD(xr1, src_top, 0x0); S32LDD(xr2, src_top, 0x4); S32LDD(xr3, src_top, 0x8); S32LDD(xr4, src_top, 0xc); // AVG D8SUMC(xr1, xr1, xr2); D8SUMC(xr2, xr3, xr4); Q16ADD_AA_WW(xr5, xr1, xr2, xr0); Q16ADD_AA_XW(xr7, xr5, xr5, xr0); D32SLR(xr8, xr7, xr0, xr0, 0x4); S32SFL(xr9, xr8, xr8, xr0, ptn0); S32SFL(xr0, xr9, xr9, xr1, ptn3); // store dst -= MB_LUMA_EDGED_WIDTH; for(i=0; i<16; i++){ S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32STD(xr1, dst, 0x4); S32STD(xr1, dst, 0x8); S32STD(xr1, dst, 0xc); } }
// MODE 2 static void pred16x16_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ uint8_t *src_top; // top address uint8_t *src_left; // left address unsigned int i; src_top = top; src_left = src - 0x4; // load top S32LDD(xr11, src_top, 0x0); S32LDD(xr12, src_top, 0x4); S32LDD(xr13, src_top, 0x8); S32LDD(xr14, src_top, 0xc); // load left (4 x 7 = 28 instructions) S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[MB_LUMA_EDGED_WIDTH+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr4[31:16] <- l3, l2 ; S32SFL(xr7, xr6, xr5, xr0, ptn3); // xr7[31: 0] <- l3, l2, l1, l0 ; S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr8, xr6, xr5, xr0, ptn3); // xr8[31:0] <- l7, l6, l5, l4 ; S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr9, xr6, xr5, xr0, ptn3); // xr9[31:0] <- l11, l10, l9, l8 ; S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr10, xr6, xr5, xr0, ptn3); // xr10[31:0] <- l15, l14, l13, l12 ; // AVG D8SUMC(xr1, xr11, xr12); D8SUMC(xr2, xr13, xr14); D8SUMC(xr3, xr7, xr8); D8SUMC(xr4, xr9, xr10); Q16ADD_AA_WW(xr5, xr1, xr2, xr0); Q16ACC_AA(xr5, xr3, xr4, xr0); Q16ADD_AA_XW(xr7, xr5, xr5, xr0); D32SLR(xr8, xr7, xr0, xr0, 0x5); S32SFL(xr9, xr8, xr8, xr0, ptn0); S32SFL(xr0, xr9, xr9, xr1, ptn3); // store dst -= MB_LUMA_EDGED_WIDTH; for(i=0; i<16; i++){ S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32STD(xr1, dst, 0x4); S32STD(xr1, dst, 0x8); S32STD(xr1, dst, 0xc); } }
// MODE 2 static void pred4x4_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src - 0x4; //load S32LDD(xr8, top, 0x0); //xr8 <- src_top[0] ; // xr8: t3, t2, t1, t0 ; high -> low, [31->0]; S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr6[31:16] <- l3, l2 ; S32SFL(xr1, xr6, xr5, xr0, ptn3); // xr1[31: 0] <- l3, l2, l1, l0 ; //avg D8SUMC(xr2, xr1, xr8); Q16ADD_AA_XW(xr3, xr2, xr2, xr0); D32SLR(xr4, xr3, xr0, xr0, 0x3); S32SFL(xr6, xr4, xr4, xr0, ptn0); S32SFL(xr0, xr6, xr6, xr7, ptn3); //store S32STD(xr7, dst, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
uint32_t dequant_mpeg_intra_mxu(int16_t * data, // const int16_t * coeff, const uint32_t quant, const uint32_t dcscalar, const uint16_t * mpeg_quant_matrices) { const uint16_t *intra_matrix = mpeg_quant_matrices; int32_t i = 0; /* deal with data[0] then save to xr6 */ S32I2M(xr3,-2048); S32I2M(xr4,2047); S32I2M(xr5,quant); S32MUL(xr0,xr6,(int32_t)data[0],dcscalar); S32LUI(xr9,1,0); D16MUL_WW(xr0,xr6,xr9,xr6); S32MIN(xr6,xr6,xr4); S32MAX(xr6,xr6,xr3); data-=2; intra_matrix-=2; for (i = 0; i < 32; i++) { S32LDI(xr1,data,4); S32LDI(xr2,intra_matrix,4); D16MUL_LW(xr13,xr9,xr1,xr14); // resave values of data[i] and data[i+1] D16CPS(xr1,xr1,xr1); /* abs(level) *( intra_matrix[i]*quant) >> 3 */ D16MUL_LW(xr7,xr5,xr2,xr8); S32SFL(xr15,xr7,xr8,xr2,3); D16MUL_WW(xr7,xr1,xr2,xr8); D32SLR(xr7,xr7,xr8,xr8,3); /* -2048 < data[i+1] < 2047 */ S32CPS(xr7,xr7,xr13); S32MAX(xr10,xr7,xr3); S32MIN(xr10,xr10,xr4); /* -2048 < data[i] < 2047 */ S32CPS(xr8,xr8,xr14); S32MAX(xr11,xr8,xr3); S32MIN(xr11,xr11,xr4); S32SFL(xr0,xr10,xr11,xr12,3); S32STD(xr12,data,0); } S16STD(xr6,data,-62*2,0);//xr6 to data[0] return(0); }
static void pred4x4_top_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ //load S32LDD(xr1, top, 0x0); // xr1[31:24] <- src_top[0] ; //avg D8SUMC(xr2, xr0, xr1); D32SLR(xr3, xr2, xr0, xr0, 0x2); S32SFL(xr0, xr3, xr3, xr4, ptn0); S32SFL(xr0, xr4, xr4, xr7, ptn3); //store S32STD(xr7, dst, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 8 static void pred4x4_horizontal_up_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src - 0x4; //load S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr6[31:16] <- l3, l2 ; S32SFL(xr1, xr6, xr5, xr0, ptn3); // xr1[31: 0] <- l3, l2, l1, l0 ; D32SLL(xr2, xr1, xr0, xr0, 0x8); // xr2: l2, l1, l0, 0 ; S32SFL(xr3, xr1, xr1, xr0, ptn0); // xr3: l3, l3, l2, l2; Q8AVGR(xr4, xr1, xr2); // xr4: src[2,1]/src[0,2], src[2,0]/src[0,1], src[0,0], ~ ; Q8AVG(xr5, xr2, xr3); Q8AVGR(xr6, xr5, xr1); // xr6: src[3,1]/src[1,2], src[3,0]/src[1,1], src[1,0], ~ ; S32SFL(xr7, xr6, xr4, xr0, ptn0); // xr7: src[3,1]/src[1,2], src[2,1]/src[0,2], // src[3,0]/src[1,1], src[2,0]/src[0,1]; D32SLR(xr8, xr4, xr6, xr9, 0x8); // xr8: 0, src[2,1]/src[0,2], src[2,0]/src[0,1], src[0,0] ; // xr9: 0, src[3,1]/src[1,2], src[3,0]/src[1,1], src[1,0] ; S32SFL(xr0, xr9, xr8, xr10, ptn0); // xr10: src[3,0], src[2,0], src[1,0], src[0,0] ; S32SFL(xr11, xr3, xr7, xr0, ptn3); // xr11: l3, l3, src[3,1]/src[1,2], src[2,1]/src[0,2] ; S32SFL(xr12, xr3, xr3, xr0, ptn3); // xr12: l3, l3, l3, l3 ; //store S32STD(xr10, dst, 0x0); S32SDIV(xr7, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr11, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr12, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
//---------- other DC modes ------------ static void pred4x4_left_dc_mxu(uint8_t *dst, uint8_t *src, uint8_t *topright, uint8_t *top, uint8_t *topleft){ uint8_t *src_left; // left address src_left = src - 0x4; //load S32LDD(xr1, src_left, 0x0); // xr1[31:24] <- src_left[3] (l0) ; S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr2[31:24] <- src_left[stride+3] (l1) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_left[2*stride+3] (l2) ; S32LDIV(xr4, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_left[3*stride+3] (l3) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l1, l0 ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr6[31:16] <- l3, l2 ; S32SFL(xr7, xr6, xr5, xr0, ptn3); // xr7[31: 0] <- l3, l2, l1, l0 ; //avg D8SUMC(xr2, xr0, xr7); D32SLR(xr8, xr2, xr0, xr0, 0x2); S32SFL(xr0, xr8, xr8, xr9, ptn0); S32SFL(xr0, xr9, xr9, xr1, ptn3); //store S32STD(xr1, dst, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32SDIV(xr1, dst, MB_LUMA_EDGED_WIDTH, 0x0); }
// MODE 3 static void pred16x16_plane_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ int i, j, k, a; uint8_t *src_top; // top address uint8_t *src_topleft, *src_left; // left address src_top = top; src_topleft = src_top - 0x14; src_left = src - 0x4; //----- H, LOAD ----- S32LDD(xr1, src_top, -0x14); // xr1 <- src_top[-4]; xr1: lt, 0, 0, 0 ; S32LDD(xr5, src_top, 0x0); // xr5 <- src_top[0] ; xr5: t3, t2, t1, t0 ; S32LDD(xr2, src_top, 0x4); // xr2 <- src_top[4] ; xr2: t7, t6, t5, t4 ; S32LDDR(xr3, src_top, 0x8); // xr3 <- src_top[8] ; xr3: t8, t9, t10, t11 ; S32LDDR(xr4, src_top, 0xc); // xr4 <- src_top[12]; xr4: t12, t13, t14, t15 ; S32ALNI(xr1, xr5, xr1, ptn1); // xr1: t2, t1, t0, lt ; S32ALNI(xr2, xr2, xr5, ptn1); // xr2: t6, t5, t4, t3 ; ---xr5 is free to use ; S32I2M(xr9, MUL_12); // xr9 : 0x00010002 ; S32I2M(xr10, MUL_34); // xr10: 0x00030004 ; //----- H, SUM ----- Q8ADDE_SS(xr5, xr3, xr2, xr6); // xr5[31:16] <- t8-t6 ; xr5[15:0] <- t9-t5 ; // xr6[31:16] <- t10-t4; xr6[15:0] <- t11-t3; S32I2M(xr11, MUL_56); // xr11: 0x00050006 ; D16MUL_WW(xr13, xr9, xr5, xr14); // xr13 <- 1*(t8-t6) ; xr14 <- 2*(t9-t5) ; D16MAC_AA_WW(xr13, xr10, xr6, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4) ; xr14 <- 2*(t9-t5)+4*(t11-t3) ; Q8ADDE_SS(xr5, xr4, xr1, xr6); // xr5[31:16] <- t12-t2; xr5[15:0] <- t13-t1; // xr6[31:16] <- t14-t0; xr6[15:0] <- t15-lt; S32I2M(xr12, MUL_78); // xr12: 0x00070008 ; D16MAC_AA_WW(xr13, xr11, xr5, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2) ; // xr14 <- 2*(t9-t5)+4*(t11-t3)+6*(t13-t1) ; D16MAC_AA_WW(xr13, xr12, xr6, xr14); // xr13 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2)+7*(t14-t0) ; // xr14 <- 2*(t9-t5)+4*(t11-t3)+6*(t13-t1)+8*(t15-lt) ; S32LDD(xr1, src_topleft, 0x0); // xr1[31:24] <- src_topleft[3] (lt) ; S32LDD(xr2, src_left, 0x0); // xr2[31:24] <- src_topleft[stride+3] (l0) ; D32ADD_AA(xr15, xr13, xr14, xr0); // xr15 <- 1*(t8-t6)+3*(t10-t4)+5*(t12-t2)+7*(t14-t0) // + 2*(t9-t5)+4*(t11-t3)+6*(t13-t1)+8*(t15-lt) ; //----- V, LOAD ----- // S32LDD(xr1, src_topleft, 0x0); // xr1[31:24] <- src_topleft[3] (lt) ; // S32LDIV(xr2, src_topleft, stride, 0x0); // xr2[31:24] <- src_topleft[stride+3] (l0) ; S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ; S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); // xr9[31:24] <- src_topleft[3*stride+3] (l2) ; S32SFL(xr5, xr2, xr1, xr0, ptn2); // xr5[31:16] <- l0, lt ; S32SFL(xr6, xr8, xr3, xr0, ptn2); // xr8[31:16] <- l2, l1 ; S32SFL(xr7, xr6, xr5, xr0, ptn3); // xr7[31: 0] <- l2, l1, l0, lt ; S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr6, xr8, xr3, xr0, ptn2); S32SFL(xr13, xr6, xr5, xr0, ptn3); // xr13[31:0] <- l6, l5, l4, l3 ; src_left += MB_LUMA_EDGED_WIDTH; S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr6, xr8, xr3, xr0, ptn2); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr14, xr6, xr5, xr0, ptn3); // xr14[31:0] <- l8, l9, l10, l11 ; S32LDIV(xr8, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr2, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32LDIV(xr1, src_left, MB_LUMA_EDGED_WIDTH, 0x0); S32SFL(xr6, xr8, xr3, xr0, ptn2); S32SFL(xr5, xr2, xr1, xr0, ptn2); S32SFL(xr1, xr6, xr5, xr0, ptn3); // xr1[31: 0] <- l12, l13, l14, l15 ; //----- V, SUM ----- Q8ADDE_SS(xr5, xr14, xr13, xr6); Q8ADDE_SS(xr2, xr1, xr7, xr3); D16MUL_WW(xr13, xr9, xr5, xr14); D16MAC_AA_WW(xr13, xr10, xr6, xr14); D16MAC_AA_WW(xr13, xr11, xr2, xr14); D16MAC_AA_WW(xr13, xr12, xr3, xr14); D32SLR(xr2, xr11, xr12, xr3, 0x8); // xr2: 0x00000500 ; xr3: 0x00000700 ; D32SLR(xr11, xr2, xr3, xr12, 0x8); //xr11: 0x00000005 ; xr12: 0x00000007 ; D32ADD_AA(xr14, xr13, xr14, xr0); // xr14 <- 1*(l8-l6)+3*(l10-l4)+5*(l12-l2)+7*(l14-l0) // + 2*(l9-l5)+4*(l11-l3)+6*(l13-l1)+8*(l15-lt) ; //----- P, CAL ----- // D32SLR(xr2, xr11, xr12, xr3, 0x8); // xr2: 0x00000500 ; xr3: 0x00000700 ; // D32SLR(xr11, xr2, xr3, xr12, 0x8); //xr11: 0x00000005 ; xr12: 0x00000007 ; D16MUL_WW(xr0, xr15, xr11, xr2); // xr2: 5*H ; D16MUL_WW(xr0, xr14, xr11, xr3); // xr3: 5*V ; D32SLR(xr8, xr11, xr0, xr0, 0x2); // xr8: 0x00000001 ; D32SLL(xr13, xr8, xr0, xr0, 0x5); //xr13: 0x00000020 ; Q8ACCE_AA(xr0, xr1, xr4, xr8); // xr8[15:0]: src1[0] + src2[16] + 1 D32ADD_AA(xr5, xr2, xr13, xr0); // xr5: 5*H+32 ; D32ADD_AA(xr6, xr3, xr13, xr0); // xr6: 5*V+32 ; D32SLR(xr2, xr5, xr6, xr3, 0x6); // xr2: ( 5*H+32 ) >> 6 ; xr3: ( 5*V+32 ) >> 6 ; // Q8ACCE_AA(xr0, xr1, xr4, xr8); // xr8[15:0]: src1[0] + src2[16] + 1 D32SLL(xr5, xr8, xr0, xr0, 0x4); // xr5[15:0]: 16*(src1[0] + src2[16] + 1) Q16ADD_AA_WW(xr7, xr2, xr3, xr0); // xr7: V+H // S32NOR(xr0, xr0, xr0); // idle S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; D16MUL_WW(xr0, xr7, xr12, xr8); // xr8: 7*(V+H) S32SFL(xr0, xr3, xr3, xr14, ptn3); // xr14[31:16]: V ; xr14[15:0]: V ; D32SLL(xr7, xr2, xr0, xr0, 0x1); Q16ADD_SS_WW(xr9, xr5, xr8, xr0); // xr9: 16*(src1[0] + src2[16] + 1) - 7*(V+H) S32SFL(xr0, xr9, xr9, xr5, ptn3); // xr5[31:16]: a ; xr5[15:0]: a ; // S32SFL(xr0, xr3, xr3, xr14, ptn3); // xr14[31:16]: V ; xr14[15:0]: V ; // D32SLL(xr7, xr2, xr0, xr0, 0x1); S32SFL(xr0, xr7, xr7, xr8, ptn3); // xr8[31:16]: 2H ; xr8[15:0]: 2H ; S32AND(xr2, xr4, xr2); Q16ADD_AA_WW(xr15, xr5, xr2, xr0); // xr15[31:16]: a ; xr15[15:0]: a + H ; dst -= MB_LUMA_EDGED_WIDTH; //----- SRC, STORE ----- for (i=0; i<16; i++) { Q16ADD_AA_WW(xr1, xr15, xr8, xr0); Q16ADD_AA_WW(xr2, xr1, xr8, xr0); Q16SAR(xr9, xr15, xr1, xr1, 0x5); Q16ADD_AA_WW(xr3, xr2, xr8, xr0); Q16SAT(xr10, xr9, xr1); Q16ADD_AA_WW(xr4, xr3, xr8, xr0); Q16SAR(xr2, xr2, xr3, xr3, 0x5); Q16ADD_AA_WW(xr5, xr4, xr8, xr0); Q16SAT(xr11, xr2, xr3); Q16ADD_AA_WW(xr6, xr5, xr8, xr0); Q16SAR(xr4, xr4, xr5, xr5, 0x5); Q16ADD_AA_WW(xr7, xr6, xr8, xr0); Q16SAR(xr6, xr6, xr7, xr7, 0x5); Q16SAT(xr12, xr4, xr5); Q16SAT(xr13, xr6, xr7); S32SDIVR(xr10, dst, MB_LUMA_EDGED_WIDTH, 0x0); S32STDR(xr11, dst, 0x4); S32STDR(xr12, dst, 0x8); // S32STDR(xr13, dst, 0xc); Q16ADD_AA_WW(xr15, xr15, xr14, xr0); S32STDR(xr13, dst, 0xc); } }
// MODE 3 static void pred8x8_plane_mxu(uint8_t *dst, uint8_t *src, uint8_t *top){ unsigned int i; uint8_t *src_top; // top address uint8_t *src_topleft, *src_left; // left address src_top = top; src_topleft = src_top - 0x1c; src_left = src - 0x4; //----- H, LOAD ----- S32LDD(xr1, src_top, -0x1c); // xr1 <- src_top[-4]; xr1: lt, 0, 0, 0 ; S32LDD(xr3, src_top, 0x0); // xr3 <- src_top[0] ; xr3: t3, t2, t1, t0 ; S32LDDR(xr2, src_top, 0x4); // xr2 <- src_top[4] ; xr2: t4, t5, t6, t7 ; S32ALNI(xr1, xr3, xr1, ptn1);// xr1: t2, t1, t0, lt ; S32I2M(xr8, MUL_12); // xr8: 0x00010002 ; S32I2M(xr9, MUL_34); // xr9: 0x00030004 ; //----- H, SUM ----- Q8ADDE_SS(xr3, xr2, xr1, xr4); // xr3[31:16] <- t4-t2 ; xr3[15:0] <- t5-t1 ; // xr4[31:16] <- t6-t0 ; xr4[15:0] <- t7-lt; S32LDD(xr1, src_topleft, 0x0); // xr1[31:24] <- src_topleft[3] (lt) ; D16MUL_WW(xr5, xr8, xr3, xr6); // xr5 <- 1*(t4-t2) ; xr6 <- 2*(t5-t1) ; D16MAC_AA_WW(xr5, xr9, xr4, xr6); // xr5 <- 1*(t4-t2)+3*(t6-t0) ; xr6 <- 2*(t5-t1)+4*(t7-lt) ; S32LDD(xr12, src_left, 0x0);//xr12[31:24] <- src_topleft[stride+3] (l0) ; S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ; D32ADD_AA(xr7, xr5, xr6, xr0); // xr7 <- 1*(t4-t2)+3*(t6-t0)+2*(t5-t1)+4*(t7-lt) ; //----- V, LOAD ----- // S32LDD(xr1, src_topleft, 0x0); // xr1[31:24] <- src_topleft[3] (lt) ; // S32LDIV(xr12, src_topleft, stride, 0x0);//xr12[31:24] <- src_topleft[stride+3] (l0) ; // S32LDIV(xr3, src_topleft, stride, 0x0); // xr3[31:24] <- src_topleft[2*stride+3] (l1) ; S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); // xr4[31:24] <- src_topleft[3*stride+3] (l2) ; S32SFL(xr5, xr12, xr1, xr0, ptn2); // xr5[31:16] <- l0, lt ; S32SFL(xr6, xr4, xr3, xr0, ptn2); // xr8[31:16] <- l2, l1 ; S32SFL(xr10, xr6, xr5, xr0, ptn3); // xr10[31:0] <- l2, l1, l0, lt ; src_left += MB_CHROM_EDGED_WIDTH; S32LDIV(xr4, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr3, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr12, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32LDIV(xr1, src_left, MB_CHROM_EDGED_WIDTH, 0x0); S32SFL(xr6, xr4, xr3, xr0, ptn2); S32SFL(xr5, xr12, xr1, xr0, ptn2); S32SFL(xr11, xr6, xr5, xr0, ptn3); // xr11[31:0] <- l4, l5, l6, l7 ; //----- V, SUM ----- Q8ADDE_SS(xr3, xr11, xr10, xr4); S32LUI(xr1, 0x1, ptn0); // xr1[31:0]: 0x00000001 ; D16MUL_WW(xr5, xr8, xr3, xr6); D16MAC_AA_WW(xr5, xr9, xr4, xr6); D32ADD_AA(xr13, xr5, xr6, xr0); // xr13 <- 1*(l4-l2)+3*(l6-l0)+2*(l5-l1)+4*(l7-lt) ; //----- P, CAL ----- useful XRs:xr13, xr7, xr2, xr11; // S32LUI(xr1, 0x1, ptn0); // xr1[31:0]: 0x00000001 ; D32SLL(xr5, xr1, xr1, xr6, 0x4); // xr5: 0x00000010; xr6: 0x00000010; D32SLL(xr3, xr13, xr7, xr4, 0x4); D32ACC_AA(xr5, xr13, xr3, xr0); // xr5: 17*V+16 D32ACC_AA(xr6, xr7, xr4, xr0); // xr6: 17*H+16 Q8ACCE_AA(xr0, xr2, xr11, xr1); // xr1[15:0]: src1[0] + src2[8] + 1 D32SLR(xr8, xr5, xr6, xr9, 0x5); // xr8: (17*V+16) >> 5 ; xr9: (17*H+16) >> 5 ; // Q8ACCE_AA(xr0, xr2, xr11, xr1); // xr1[15:0]: src1[0] + src2[8] + 1 D32SLL(xr2, xr1, xr0, xr0, 0x4); // xr2[15:0]: 16*(src1[0] + src2[16] + 1) Q16ADD_AA_WW(xr7, xr8, xr9, xr0); // xr7: V+H S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; D32SLL(xr12, xr7, xr0, xr0, 0x1); D32ADD_AA(xr5, xr12, xr7, xr0); // xr5: 3*(V+H) // S32LUI(xr12, 0x3, ptn0); // xr12[31:0]: 0x00000003 ; // D16MUL_WW(xr0, xr7, xr12, xr5); // xr5: 3*(V+H) // S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; Q16ADD_SS_WW(xr6, xr2, xr5, xr0); // xr6: 16*(src1[0] + src2[16] + 1) - 3*(V+H) // S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; S32SFL(xr0, xr8, xr8, xr14, ptn3);// xr14[31:16]: V ; xr14[15:0]: V ; S32SFL(xr0, xr6, xr6, xr5, ptn3); // xr5[31:16]: a ; xr5[15:0]: a ; D32SLL(xr7, xr9, xr0, xr0, 0x1); S32SFL(xr0, xr7, xr7, xr8, ptn3); // xr8[31:16]: 2H ; xr8[15:0]: 2H ; // S32I2M(xr4, MUX_H16); // xr4: 0x0000ffff ; S32AND(xr9, xr4, xr9); Q16ADD_AA_WW(xr15, xr5, xr9, xr0); // xr15[31:16]: a ; xr15[15:0]: a + H ; dst -= MB_CHROM_EDGED_WIDTH; //----- SRC, STORE ----- for (i=0; i<8; i++) { Q16ADD_AA_WW(xr1, xr15, xr8, xr0); Q16ADD_AA_WW(xr2, xr1, xr8, xr0); Q16SAR(xr9, xr15, xr1, xr1, 0x5); Q16ADD_AA_WW(xr3, xr2, xr8, xr0); Q16SAT(xr10, xr9, xr1); // Q16SAR(xr9, xr15, xr1, xr1, 0x5); Q16SAR(xr2, xr2, xr3, xr3, 0x5); // Q16SAT(xr10, xr9, xr1); Q16SAT(xr11, xr2, xr3); S32SDIVR(xr10, dst, MB_CHROM_EDGED_WIDTH, 0x0); Q16ADD_AA_WW(xr15, xr15, xr14, xr0); S32STDR(xr11, dst, 0x4); } }
static void rv40_dequant4x4(DCTELEM *block,uint32_t *dst, int n) { int i; uint32_t src=block-4; uint32_t dst_t = dst-4; #if 0 for(i = 0; i < n; i++){ S32LDI(xr1,src,0x8); S32LDD(xr2,src,0x4); S32LDI(xr7,src,0x8); S32LDD(xr8,src,0x4); D16MUL_LW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr5,dst_t,0x8); S32STD(xr6,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr15,dst_t,0x8); S32STD(xr14,dst_t,0xc); } #else ///////////////////// if(n == 1){ S32LDI(xr1,src,0x8); S32LDI(xr2,src,0x8); S32LDI(xr7,src,0x8); S32LDI(xr8,src,0x8); D16MUL_XW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); S32SDI(xr5,dst_t,0x10); S32STD(xr6,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); //S32STD(xr15,dst_t,0x8); //S32STD(xr14,dst_t,0xc); S32SDI(xr15,dst_t,0x10); S32STD(xr14,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); } else if(n==2) { S32LDI(xr1,src,0x8); S32LDD(xr2,src,0x4); S32LDI(xr7,src,0x8); S32LDD(xr8,src,0x4); D16MUL_XW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr5,dst_t,0x8); S32STD(xr6,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr15,dst_t,0x8); S32STD(xr14,dst_t,0xc); S32SDI(xr0,dst_t,0x10); S32STD(xr0,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); S32SDI(xr0,dst_t,0x10); S32STD(xr0,dst_t,0x4); S32STD(xr0,dst_t,0x8); S32STD(xr0,dst_t,0xc); } else { S32LDI(xr1,src,0x8); S32LDD(xr2,src,0x4); S32LDI(xr7,src,0x8); S32LDD(xr8,src,0x4); D16MUL_XW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr5,dst_t,0x8); S32STD(xr6,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr15,dst_t,0x8); S32STD(xr14,dst_t,0xc); S32LDI(xr1,src,0x8); S32LDD(xr2,src,0x4); S32LDI(xr7,src,0x8); S32LDD(xr8,src,0x4); D16MUL_LW(xr4,xr12,xr1,xr3); D16MUL_LW(xr10,xr12,xr7,xr9); D16MUL_LW(xr14,xr12,xr8,xr15); D32ASUM_AA(xr3,xr13,xr13,xr4); D16MUL_LW(xr6,xr12,xr2,xr5); D32SLR(xr3,xr3,xr4,xr4,4); D32ASUM_AA(xr5,xr13,xr13,xr6); D32ASUM_AA(xr9,xr13,xr13,xr10); D32SLR(xr5,xr5,xr6,xr6,4); D32SLR(xr9,xr9,xr10,xr10,4); D32ASUM_AA(xr15,xr13,xr13,xr14); S32SDI(xr3,dst_t,0x10); S32STD(xr4,dst_t,0x4); S32STD(xr5,dst_t,0x8); S32STD(xr6,dst_t,0xc); D32SLR(xr15,xr15,xr14,xr14,4); S32SDI(xr9,dst_t,0x10); S32STD(xr10,dst_t,0x4); S32STD(xr15,dst_t,0x8); S32STD(xr14,dst_t,0xc); } #endif }