static inline void mmx_average_2_U8 (uint8_t * dest, uint8_t * src1, uint8_t * src2) { /* *dest = (*src1 + *src2 + 1)/ 2; */ movq_m2r (*src1, mm1); // load 8 src1 bytes movq_r2r (mm1, mm2); // copy 8 src1 bytes movq_m2r (*src2, mm3); // load 8 src2 bytes movq_r2r (mm3, mm4); // copy 8 src2 bytes punpcklbw_r2r (mm0, mm1); // unpack low src1 bytes punpckhbw_r2r (mm0, mm2); // unpack high src1 bytes punpcklbw_r2r (mm0, mm3); // unpack low src2 bytes punpckhbw_r2r (mm0, mm4); // unpack high src2 bytes paddw_r2r (mm3, mm1); // add lows to mm1 paddw_m2r (round1, mm1); psraw_i2r (1, mm1); // /2 paddw_r2r (mm4, mm2); // add highs to mm2 paddw_m2r (round1, mm2); psraw_i2r (1, mm2); // /2 packuswb_r2r (mm2, mm1); // pack (w/ saturation) movq_r2m (mm1, *dest); // store result in dest }
int main(int ac, char **av) { int i, j, k, n; unsigned char dat0[8] = { 0x01, 0xf2, 0x03, 0x04, 0x05, 0x06, 0xf7, 0x08 }; long long *datp = (long long *)&dat0; int16_t dat1[8] = { 0x10, 0x20, -0x130, -0x140, 0x50, -0x160, -0x170, 0x80 }; volatile uint8_t *rfp = dat0; volatile int16_t *bp = dat1; unsigned char ans1[8], ans2[8]; n = 0; for( i=-32768; i<32768; ++i ) { j = 0; while( j < 256 ) { for( k=0; k<8; ++k ) { dat0[k] = i; dat1[k] = j++; } movq_m2r(m_(&rfp[0]),mm1); /* rfp[0..7] */ pxor_r2r(mm3,mm3); pxor_r2r(mm4,mm4); movq_m2r(m_(&bp[0]),mm5); /* bp[0..3] */ movq_r2r(mm1,mm2); movq_m2r(m_(&bp[4]),mm6); /* bp[4..7] */ punpcklbw_r2r(mm3,mm1); /* rfp[0,2,4,6] */ punpckhbw_r2r(mm3,mm2); /* rfp[1,3,5,7] */ paddsw_r2r(mm5,mm1); /* bp[0..3] */ paddsw_r2r(mm6,mm2); /* bp[4..7] */ pcmpgtw_r2r(mm1,mm3); pcmpgtw_r2r(mm2,mm4); pandn_r2r(mm1,mm3); pandn_r2r(mm2,mm4); packuswb_r2r(mm4,mm3); movq_r2m(mm3,m_(&ans1[0])); emms(); ans2[0] = clip(bp[0] + rfp[0]); ans2[1] = clip(bp[1] + rfp[1]); ans2[2] = clip(bp[2] + rfp[2]); ans2[3] = clip(bp[3] + rfp[3]); ans2[4] = clip(bp[4] + rfp[4]); ans2[5] = clip(bp[5] + rfp[5]); ans2[6] = clip(bp[6] + rfp[6]); ans2[7] = clip(bp[7] + rfp[7]); if( *(uint64_t *)&ans1[0] != *(uint64_t *)&ans2[0] ) { printf(" i=%5d %02x %02x %02x %02x %02x %02x %02x %02x\n", i, ans1[0], ans1[1], ans1[2], ans1[3], ans1[4], ans1[5], ans1[6], ans1[7]); printf(" j=%5d %02x %02x %02x %02x %02x %02x %02x %02x\n", j, ans2[0], ans2[1], ans2[2], ans2[3], ans2[4], ans2[5], ans2[6], ans2[7]); // exit(0); } n += 8; } } printf("n=%d\n",n); return 0; }
static inline void mmx_end(uint8_t *src3, uint8_t *src5, uint8_t *dst, int X) { punpcklbw_m2r (mm_cpool[0], mm4); punpckhbw_m2r (mm_cpool[0], mm5); psubusw_r2r (mm2, mm0); psubusw_r2r (mm3, mm1); movq_m2r (src5[X], mm2); movq_m2r (src5[X], mm3); punpcklbw_m2r (mm_cpool[0], mm2); punpckhbw_m2r (mm_cpool[0], mm3); psubusw_r2r (mm2, mm0); psubusw_r2r (mm3, mm1); psrlw_i2r (3, mm0); psrlw_i2r (3, mm1); psubw_r2r (mm6, mm4); psubw_r2r (mm7, mm5); packuswb_r2r (mm1,mm0); movq_r2r (mm4, mm6); movq_r2r (mm5, mm7); pcmpgtw_m2r (mm_lthr, mm4); pcmpgtw_m2r (mm_lthr, mm5); pcmpgtw_m2r (mm_hthr, mm6); pcmpgtw_m2r (mm_hthr, mm7); packsswb_r2r (mm5, mm4); packsswb_r2r (mm7, mm6); pxor_r2r (mm6, mm4); movq_r2r (mm4, mm5); pandn_r2r (mm0, mm4); pand_m2r (src3[X], mm5); por_r2r (mm4, mm5); movq_r2m (mm5, dst[X]); }
static void scale_uint8_x_4_x_bilinear_mmx(gavl_video_scale_context_t * ctx, int scanline, uint8_t * dest_start) { int i; uint8_t * src, * dst, *src_start; int32_t * factors; // mmx_t tmp_mm; /* * mm0: Input1 * mm1: Factor mask * mm2: * mm3: Output * mm4: * mm5: Input2 * mm6: 0 * mm7: Factor * */ // fprintf(stderr, "scale_uint8_x_4_x_bilinear_mmx\n"); src_start = ctx->src + scanline * ctx->src_stride; pxor_r2r(mm6, mm6); movq_m2r(factor_mask, mm1); dst = dest_start; for(i = 0; i < ctx->dst_size; i++) { src = src_start + 4*ctx->table_h.pixels[i].index; factors = ctx->table_h.pixels[i].factor_i; /* Load pixels */ movd_m2r(*(src), mm0); punpcklbw_r2r(mm6, mm0); psllw_i2r(6, mm0); /* 14 bit */ /* Load pixels */ movd_m2r(*(src+4), mm5); punpcklbw_r2r(mm6, mm5); psllw_i2r(6, mm5); /* 14 bit */ /* Load factors */ LOAD_FACTOR_1_4_NOCLIP; /* 14 bit */ /* Subtract */ psubsw_r2r(mm5, mm0); /* s1(mm0) - s2(mm5) -> mm0 (14 bit) */ pmulhw_r2r(mm7, mm0); /* factor * (s2 - s1) -> mm0 (12 bit) */ psllw_i2r(2, mm0); /* (14 bit) */ paddsw_r2r(mm5, mm0);/* (15 bit) */ psraw_i2r(6, mm0);/* (8 bit) */ packuswb_r2r(mm6, mm0); movd_r2m(mm0, *dst); dst+=4; } ctx->need_emms = 1; }
static void scale_uint8_x_4_x_generic_mmx(gavl_video_scale_context_t * ctx, int scanline, uint8_t * dest_start) { int i, j; uint8_t * src, * dst, *src_start; int32_t * factors; // mmx_t tmp_mm; /* * mm0: Input * mm1: factor_mask * mm2: Factor * mm3: Output * mm4: * mm5: * mm6: 0 * mm7: scratch * */ src_start = ctx->src + scanline * ctx->src_stride; pxor_r2r(mm6, mm6); movq_m2r(factor_mask, mm1); dst = dest_start; for(i = 0; i < ctx->dst_size; i++) { src = src_start + 4*ctx->table_h.pixels[i].index; factors = ctx->table_h.pixels[i].factor_i; pxor_r2r(mm3, mm3); for(j = 0; j < ctx->table_h.factors_per_pixel; j++) { /* Load pixels */ movd_m2r(*(src), mm0); punpcklbw_r2r(mm6, mm0); psllw_i2r(7, mm0); /* Load factors */ LOAD_FACTOR_1_4; /* Multiply */ pmulhw_r2r(mm7, mm0); paddw_r2r(mm0, mm3); // DUMP_MM("mm3_2", mm3); src += 4; factors++; } psraw_i2r(5, mm3); packuswb_r2r(mm6, mm3); movd_r2m(mm3, *dst); dst+=4; } ctx->need_emms = 1; }
static inline void mmx_average_4_U8 (uint8_t * dest, const uint8_t * src1, const uint8_t * src2, const uint8_t * src3, const uint8_t * src4) { /* *dest = (*src1 + *src2 + *src3 + *src4 + 2)/ 4; */ movq_m2r (*src1, mm1); /* load 8 src1 bytes */ movq_r2r (mm1, mm2); /* copy 8 src1 bytes */ punpcklbw_r2r (mm0, mm1); /* unpack low src1 bytes */ punpckhbw_r2r (mm0, mm2); /* unpack high src1 bytes */ movq_m2r (*src2, mm3); /* load 8 src2 bytes */ movq_r2r (mm3, mm4); /* copy 8 src2 bytes */ punpcklbw_r2r (mm0, mm3); /* unpack low src2 bytes */ punpckhbw_r2r (mm0, mm4); /* unpack high src2 bytes */ paddw_r2r (mm3, mm1); /* add lows */ paddw_r2r (mm4, mm2); /* add highs */ /* now have partials in mm1 and mm2 */ movq_m2r (*src3, mm3); /* load 8 src3 bytes */ movq_r2r (mm3, mm4); /* copy 8 src3 bytes */ punpcklbw_r2r (mm0, mm3); /* unpack low src3 bytes */ punpckhbw_r2r (mm0, mm4); /* unpack high src3 bytes */ paddw_r2r (mm3, mm1); /* add lows */ paddw_r2r (mm4, mm2); /* add highs */ movq_m2r (*src4, mm5); /* load 8 src4 bytes */ movq_r2r (mm5, mm6); /* copy 8 src4 bytes */ punpcklbw_r2r (mm0, mm5); /* unpack low src4 bytes */ punpckhbw_r2r (mm0, mm6); /* unpack high src4 bytes */ paddw_r2r (mm5, mm1); /* add lows */ paddw_r2r (mm6, mm2); /* add highs */ /* now have subtotal in mm1 and mm2 */ paddw_m2r (round4, mm1); psraw_i2r (2, mm1); /* /4 */ paddw_m2r (round4, mm2); psraw_i2r (2, mm2); /* /4 */ packuswb_r2r (mm2, mm1); /* pack (w/ saturation) */ movq_r2m (mm1, *dest); /* store result in dest */ }
static inline void mmx_average_4_U8 (uint8_t * dest, uint8_t * src1, uint8_t * src2, uint8_t * src3, uint8_t * src4) { /* *dest = (*src1 + *src2 + *src3 + *src4 + 2)/ 4; */ movq_m2r (*src1, mm1); // load 8 src1 bytes movq_r2r (mm1, mm2); // copy 8 src1 bytes punpcklbw_r2r (mm0, mm1); // unpack low src1 bytes punpckhbw_r2r (mm0, mm2); // unpack high src1 bytes movq_m2r (*src2, mm3); // load 8 src2 bytes movq_r2r (mm3, mm4); // copy 8 src2 bytes punpcklbw_r2r (mm0, mm3); // unpack low src2 bytes punpckhbw_r2r (mm0, mm4); // unpack high src2 bytes paddw_r2r (mm3, mm1); // add lows paddw_r2r (mm4, mm2); // add highs /* now have partials in mm1 and mm2 */ movq_m2r (*src3, mm3); // load 8 src3 bytes movq_r2r (mm3, mm4); // copy 8 src3 bytes punpcklbw_r2r (mm0, mm3); // unpack low src3 bytes punpckhbw_r2r (mm0, mm4); // unpack high src3 bytes paddw_r2r (mm3, mm1); // add lows paddw_r2r (mm4, mm2); // add highs movq_m2r (*src4, mm5); // load 8 src4 bytes movq_r2r (mm5, mm6); // copy 8 src4 bytes punpcklbw_r2r (mm0, mm5); // unpack low src4 bytes punpckhbw_r2r (mm0, mm6); // unpack high src4 bytes paddw_r2r (mm5, mm1); // add lows paddw_r2r (mm6, mm2); // add highs /* now have subtotal in mm1 and mm2 */ paddw_m2r (round4, mm1); psraw_i2r (2, mm1); // /4 paddw_m2r (round4, mm2); psraw_i2r (2, mm2); // /4 packuswb_r2r (mm2, mm1); // pack (w/ saturation) movq_r2m (mm1, *dest); // store result in dest }
static void deinterlace_line_mmx (uint8_t * dst, uint8_t * lum_m4, uint8_t * lum_m3, uint8_t * lum_m2, uint8_t * lum_m1, uint8_t * lum, int size) { mmx_t rounder; rounder.uw[0] = 4; rounder.uw[1] = 4; rounder.uw[2] = 4; rounder.uw[3] = 4; pxor_r2r (mm7, mm7); movq_m2r (rounder, mm6); for (; size > 3; size -= 4) { movd_m2r (*lum_m4, mm0); movd_m2r (*lum_m3, mm1); movd_m2r (*lum_m2, mm2); movd_m2r (*lum_m1, mm3); movd_m2r (*lum, mm4); punpcklbw_r2r (mm7, mm0); punpcklbw_r2r (mm7, mm1); punpcklbw_r2r (mm7, mm2); punpcklbw_r2r (mm7, mm3); punpcklbw_r2r (mm7, mm4); paddw_r2r (mm3, mm1); psllw_i2r (1, mm2); paddw_r2r (mm4, mm0); psllw_i2r (2, mm1); // 2 paddw_r2r (mm6, mm2); paddw_r2r (mm2, mm1); psubusw_r2r (mm0, mm1); psrlw_i2r (3, mm1); // 3 packuswb_r2r (mm7, mm1); movd_r2m (mm1, *dst); lum_m4 += 4; lum_m3 += 4; lum_m2 += 4; lum_m1 += 4; lum += 4; dst += 4; } emms (); /* Handle odd widths */ if (size > 0) deinterlace_line_c (dst, lum_m4, lum_m3, lum_m2, lum_m1, lum, size); }
static inline void XDeint8x8MergeMMXEXT( uint8_t *dst, int i_dst, uint8_t *src1, int i_src1, uint8_t *src2, int i_src2 ) { static const uint64_t m_4 = INT64_C(0x0004000400040004); int y, x; /* Progressive */ pxor_r2r( mm7, mm7 ); for( y = 0; y < 8; y += 2 ) { for( x = 0; x < 8; x +=4 ) { movd_m2r( src1[x], mm0 ); movd_r2m( mm0, dst[x] ); movd_m2r( src2[x], mm1 ); movd_m2r( src1[i_src1+x], mm2 ); punpcklbw_r2r( mm7, mm0 ); punpcklbw_r2r( mm7, mm1 ); punpcklbw_r2r( mm7, mm2 ); paddw_r2r( mm1, mm1 ); movq_r2r( mm1, mm3 ); paddw_r2r( mm3, mm3 ); paddw_r2r( mm2, mm0 ); paddw_r2r( mm3, mm1 ); paddw_m2r( m_4, mm1 ); paddw_r2r( mm1, mm0 ); psraw_i2r( 3, mm0 ); packuswb_r2r( mm7, mm0 ); movd_r2m( mm0, dst[i_dst+x] ); } dst += 2*i_dst; src1 += i_src1; src2 += i_src2; } }
static inline void mmx_yuv2rgb (uint8_t * py, uint8_t * pu, uint8_t * pv) { static mmx_t mmx_80w = {0x0080008000800080LL}; static mmx_t mmx_U_green = {0xf37df37df37df37dLL}; static mmx_t mmx_U_blue = {0x4093409340934093LL}; static mmx_t mmx_V_red = {0x3312331233123312LL}; static mmx_t mmx_V_green = {0xe5fce5fce5fce5fcLL}; static mmx_t mmx_10w = {0x1010101010101010LL}; static mmx_t mmx_00ffw = {0x00ff00ff00ff00ffLL}; static mmx_t mmx_Y_coeff = {0x253f253f253f253fLL}; movd_m2r (*pu, mm0); /* mm0 = 00 00 00 00 u3 u2 u1 u0 */ movd_m2r (*pv, mm1); /* mm1 = 00 00 00 00 v3 v2 v1 v0 */ movq_m2r (*py, mm6); /* mm6 = Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ pxor_r2r (mm4, mm4); /* mm4 = 0 */ /* XXX might do cache preload for image here */ /* * Do the multiply part of the conversion for even and odd pixels * register usage: * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels * mm6 -> Y even, mm7 -> Y odd */ punpcklbw_r2r (mm4, mm0); /* mm0 = u3 u2 u1 u0 */ punpcklbw_r2r (mm4, mm1); /* mm1 = v3 v2 v1 v0 */ psubsw_m2r (mmx_80w, mm0); /* u -= 128 */ psubsw_m2r (mmx_80w, mm1); /* v -= 128 */ psllw_i2r (3, mm0); /* promote precision */ psllw_i2r (3, mm1); /* promote precision */ movq_r2r (mm0, mm2); /* mm2 = u3 u2 u1 u0 */ movq_r2r (mm1, mm3); /* mm3 = v3 v2 v1 v0 */ pmulhw_m2r (mmx_U_green, mm2); /* mm2 = u * u_green */ pmulhw_m2r (mmx_V_green, mm3); /* mm3 = v * v_green */ pmulhw_m2r (mmx_U_blue, mm0); /* mm0 = chroma_b */ pmulhw_m2r (mmx_V_red, mm1); /* mm1 = chroma_r */ paddsw_r2r (mm3, mm2); /* mm2 = chroma_g */ psubusb_m2r (mmx_10w, mm6); /* Y -= 16 */ movq_r2r (mm6, mm7); /* mm7 = Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ pand_m2r (mmx_00ffw, mm6); /* mm6 = Y6 Y4 Y2 Y0 */ psrlw_i2r (8, mm7); /* mm7 = Y7 Y5 Y3 Y1 */ psllw_i2r (3, mm6); /* promote precision */ psllw_i2r (3, mm7); /* promote precision */ pmulhw_m2r (mmx_Y_coeff, mm6); /* mm6 = luma_rgb even */ pmulhw_m2r (mmx_Y_coeff, mm7); /* mm7 = luma_rgb odd */ /* * Do the addition part of the conversion for even and odd pixels * register usage: * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels * mm6 -> Y even, mm7 -> Y odd */ movq_r2r (mm0, mm3); /* mm3 = chroma_b */ movq_r2r (mm1, mm4); /* mm4 = chroma_r */ movq_r2r (mm2, mm5); /* mm5 = chroma_g */ paddsw_r2r (mm6, mm0); /* mm0 = B6 B4 B2 B0 */ paddsw_r2r (mm7, mm3); /* mm3 = B7 B5 B3 B1 */ paddsw_r2r (mm6, mm1); /* mm1 = R6 R4 R2 R0 */ paddsw_r2r (mm7, mm4); /* mm4 = R7 R5 R3 R1 */ paddsw_r2r (mm6, mm2); /* mm2 = G6 G4 G2 G0 */ paddsw_r2r (mm7, mm5); /* mm5 = G7 G5 G3 G1 */ packuswb_r2r (mm0, mm0); /* saturate to 0-255 */ packuswb_r2r (mm1, mm1); /* saturate to 0-255 */ packuswb_r2r (mm2, mm2); /* saturate to 0-255 */ packuswb_r2r (mm3, mm3); /* saturate to 0-255 */ packuswb_r2r (mm4, mm4); /* saturate to 0-255 */ packuswb_r2r (mm5, mm5); /* saturate to 0-255 */ punpcklbw_r2r (mm3, mm0); /* mm0 = B7 B6 B5 B4 B3 B2 B1 B0 */ punpcklbw_r2r (mm4, mm1); /* mm1 = R7 R6 R5 R4 R3 R2 R1 R0 */ punpcklbw_r2r (mm5, mm2); /* mm2 = G7 G6 G5 G4 G3 G2 G1 G0 */ }
static void deinterlace_line( uint8_t *dst, uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum, int size ) { #if defined(__i386__) || defined(__x86_64__) mmx_t rounder; rounder.uw[0]=4; rounder.uw[1]=4; rounder.uw[2]=4; rounder.uw[3]=4; pxor_r2r(mm7,mm7); movq_m2r(rounder,mm6); for (;size > 3; size-=4) { movd_m2r(lum_m4[0],mm0); movd_m2r(lum_m3[0],mm1); movd_m2r(lum_m2[0],mm2); movd_m2r(lum_m1[0],mm3); movd_m2r(lum[0],mm4); punpcklbw_r2r(mm7,mm0); punpcklbw_r2r(mm7,mm1); punpcklbw_r2r(mm7,mm2); punpcklbw_r2r(mm7,mm3); punpcklbw_r2r(mm7,mm4); paddw_r2r(mm3,mm1); psllw_i2r(1,mm2); paddw_r2r(mm4,mm0); psllw_i2r(2,mm1);// 2 paddw_r2r(mm6,mm2); paddw_r2r(mm2,mm1); psubusw_r2r(mm0,mm1); psrlw_i2r(3,mm1); // 3 packuswb_r2r(mm7,mm1); movd_r2m(mm1,dst[0]); lum_m4+=4; lum_m3+=4; lum_m2+=4; lum_m1+=4; lum+=4; dst+=4; } emms(); #else /** * C implementation. */ int sum; for(;size > 0;size--) { sum = -lum_m4[0]; sum += lum_m3[0] << 2; sum += lum_m2[0] << 1; sum += lum_m1[0] << 2; sum += -lum[0]; dst[0] = (sum + 4) >> 3; // This needs to be clipped at 0 and 255: cm[(sum + 4) >> 3]; lum_m4++; lum_m3++; lum_m2++; lum_m1++; lum++; dst++; } #endif }
static inline void mmx_interp_average_4_U8 (uint8_t * dest, const uint8_t * src1, const uint8_t * src2, const uint8_t * src3, const uint8_t * src4) { /* *dest = (*dest + (*src1 + *src2 + *src3 + *src4 + 2)/ 4 + 1)/ 2; */ movq_m2r (*src1, mm1); /* load 8 src1 bytes */ movq_r2r (mm1, mm2); /* copy 8 src1 bytes */ punpcklbw_r2r (mm0, mm1); /* unpack low src1 bytes */ punpckhbw_r2r (mm0, mm2); /* unpack high src1 bytes */ movq_m2r (*src2, mm3); /* load 8 src2 bytes */ movq_r2r (mm3, mm4); /* copy 8 src2 bytes */ punpcklbw_r2r (mm0, mm3); /* unpack low src2 bytes */ punpckhbw_r2r (mm0, mm4); /* unpack high src2 bytes */ paddw_r2r (mm3, mm1); /* add lows */ paddw_r2r (mm4, mm2); /* add highs */ /* now have partials in mm1 and mm2 */ movq_m2r (*src3, mm3); /* load 8 src3 bytes */ movq_r2r (mm3, mm4); /* copy 8 src3 bytes */ punpcklbw_r2r (mm0, mm3); /* unpack low src3 bytes */ punpckhbw_r2r (mm0, mm4); /* unpack high src3 bytes */ paddw_r2r (mm3, mm1); /* add lows */ paddw_r2r (mm4, mm2); /* add highs */ movq_m2r (*src4, mm5); /* load 8 src4 bytes */ movq_r2r (mm5, mm6); /* copy 8 src4 bytes */ punpcklbw_r2r (mm0, mm5); /* unpack low src4 bytes */ punpckhbw_r2r (mm0, mm6); /* unpack high src4 bytes */ paddw_r2r (mm5, mm1); /* add lows */ paddw_r2r (mm6, mm2); /* add highs */ paddw_m2r (round4, mm1); psraw_i2r (2, mm1); /* /4 */ paddw_m2r (round4, mm2); psraw_i2r (2, mm2); /* /4 */ /* now have subtotal/4 in mm1 and mm2 */ movq_m2r (*dest, mm3); /* load 8 dest bytes */ movq_r2r (mm3, mm4); /* copy 8 dest bytes */ packuswb_r2r (mm2, mm1); /* pack (w/ saturation) */ movq_r2r (mm1,mm2); /* copy subresult */ pxor_r2r (mm1, mm3); /* xor srcavg and dest */ pand_m2r (mask1, mm3); /* mask lower bits */ psrlq_i2r (1, mm3); /* /2 */ por_r2r (mm2, mm4); /* or srcavg and dest */ psubb_r2r (mm3, mm4); /* subtract subresults */ movq_r2m (mm4, *dest); /* store result in dest */ }
static void scale_uint8_x_1_x_bilinear_mmx(gavl_video_scale_context_t * ctx, int scanline, uint8_t * dest_start) { int i, imax, index; uint8_t * src, * dst, *src_start; mmx_t tmp_mm; /* * mm0: Input1 Input2 * mm1: Factor * mm2: * mm3: * mm4: * mm5: * mm6: 0 * mm7: scratch * */ src_start = ctx->src + scanline * ctx->src_stride; pxor_r2r(mm6, mm6); dst = dest_start; imax = ctx->dst_size / 4; // imax = 0; index = 0; for(i = 0; i < imax; i++) { /* Load pixels */ src = src_start + ctx->table_h.pixels[index].index; tmp_mm.uw[0] = *src; tmp_mm.uw[1] = *(src+1); src = src_start + ctx->table_h.pixels[index+1].index; tmp_mm.uw[2] = *src; tmp_mm.uw[3] = *(src+1); movq_m2r(tmp_mm, mm0); /* Load factors */ movq_m2r(ctx->table_h.pixels[index].factor_i[0], mm1); movq_m2r(ctx->table_h.pixels[index+1].factor_i[0], mm7); packssdw_r2r(mm7, mm1); pmaddwd_r2r(mm0, mm1); index += 2; /* Load pixels */ src = src_start + ctx->table_h.pixels[index].index; tmp_mm.uw[0] = *src; tmp_mm.uw[1] = *(src+1); src = src_start + ctx->table_h.pixels[index+1].index; tmp_mm.uw[2] = *src; tmp_mm.uw[3] = *(src+1); movq_m2r(tmp_mm, mm0); /* Load factors */ movq_m2r(ctx->table_h.pixels[index].factor_i[0], mm3); movq_m2r(ctx->table_h.pixels[index+1].factor_i[0], mm7); packssdw_r2r(mm7, mm3); pmaddwd_r2r(mm0, mm3); psrld_i2r(7, mm3); psrld_i2r(7, mm1); packssdw_r2r(mm3, mm1); psrlw_i2r(7, mm1); index += 2; packuswb_r2r(mm6, mm1); movd_r2m(mm1, *dst); // *dst = tmp_mm.ub[0]; // *(dst+1) = tmp_mm.ub[4]; dst+=4; } ctx->need_emms = 1; #if 1 imax = ctx->dst_size % 4; for(i = 0; i < imax; i++) { src = (src_start + ctx->table_h.pixels[index].index); *dst = (ctx->table_h.pixels[index].factor_i[0] * *src + ctx->table_h.pixels[index].factor_i[1] * *(src+1)) >> 14; dst++; index++; } #endif }
void yuv411planar_to_rgb_mmx (const unsigned char *yuv, unsigned char *rgb, unsigned int w, unsigned int h) { unsigned int xx, yy; register const unsigned char *yp1, *up, *vp; unsigned char *dp1; /* plane pointers */ yp1 = yuv; up = yuv + (w * h); vp = up + (w * (h / 4)); /* destination pointers */ dp1 = rgb; yp1 = yuv; up = yuv + (w * h); vp = up + ((w / 2) * (h / 2)); dp1 = rgb; for (yy = 0; yy < h; yy++) { for (xx = 0; xx < w; xx += 8) { movq_m2r(*yp1, mm0); movq_r2r(mm0, mm1); psrlw_i2r(8, mm0); psllw_i2r(8, mm1); psrlw_i2r(8, mm1); pxor_r2r(mm7, mm7); movd_m2r(*up, mm3); movd_m2r(*vp, mm2); punpcklbw_r2r(mm7, mm2); punpcklbw_r2r(mm7, mm3); movq_m2r(CONST_16, mm4); psubsw_r2r(mm4, mm0); psubsw_r2r(mm4, mm1); movq_m2r(CONST_128, mm5); psubsw_r2r(mm5, mm2); psubsw_r2r(mm5, mm3); movq_m2r(CONST_YMUL, mm4); pmullw_r2r(mm4, mm0); pmullw_r2r(mm4, mm1); movq_m2r(CONST_CRVCRV, mm7); pmullw_r2r(mm3, mm7); movq_m2r(CONST_CBUCBU, mm6); pmullw_r2r(mm2, mm6); movq_m2r(CONST_CGUCGU, mm5); pmullw_r2r(mm2, mm5); movq_m2r(CONST_CGVCGV, mm4); pmullw_r2r(mm3, mm4); movq_r2r(mm0, mm2); paddsw_r2r(mm7, mm2); paddsw_r2r(mm1, mm7); psraw_i2r(RES, mm2); psraw_i2r(RES, mm7); packuswb_r2r(mm7, mm2); pxor_r2r(mm7, mm7); movq_r2r(mm2, mm3); punpckhbw_r2r(mm7, mm2); punpcklbw_r2r(mm3, mm7); por_r2r(mm7, mm2); movq_r2r(mm0, mm3); psubsw_r2r(mm5, mm3); psubsw_r2r(mm4, mm3); paddsw_m2r(CONST_32, mm3); movq_r2r(mm1, mm7); psubsw_r2r(mm5, mm7); psubsw_r2r(mm4, mm7); paddsw_m2r(CONST_32, mm7); psraw_i2r(RES, mm3); psraw_i2r(RES, mm7); packuswb_r2r(mm7, mm3); pxor_r2r(mm7, mm7); movq_r2r(mm3, mm4); punpckhbw_r2r(mm7, mm3); punpcklbw_r2r(mm4, mm7); por_r2r(mm7, mm3); movq_m2r(CONST_32, mm4); paddsw_r2r(mm6, mm0); paddsw_r2r(mm6, mm1); paddsw_r2r(mm4, mm0); paddsw_r2r(mm4, mm1); psraw_i2r(RES, mm0); psraw_i2r(RES, mm1); packuswb_r2r(mm1, mm0); pxor_r2r(mm7, mm7); movq_r2r(mm0, mm5); punpckhbw_r2r(mm7, mm0); punpcklbw_r2r(mm5, mm7); por_r2r(mm7, mm0); pxor_r2r(mm1, mm1); movq_r2r(mm0, mm5); movq_r2r(mm3, mm6); movq_r2r(mm2, mm7); punpckhbw_r2r(mm3, mm2); punpcklbw_r2r(mm6, mm7); punpckhbw_r2r(mm1, mm0); punpcklbw_r2r(mm1, mm5); movq_r2r(mm7, mm1); punpckhwd_r2r(mm5, mm7); punpcklwd_r2r(mm5, mm1); movq_r2r(mm2, mm4); punpckhwd_r2r(mm0, mm2); punpcklwd_r2r(mm0, mm4); movntq_r2m(mm1, *(dp1)); movntq_r2m(mm7, *(dp1 + 8)); movntq_r2m(mm4, *(dp1 + 16)); movntq_r2m(mm2, *(dp1 + 24)); yp1 += 8; up += 4; vp += 4; dp1 += 8 * 4; } if (yy & 0x1) { up -= w / 2; vp -= w / 2; } } emms(); }
static void _evas_yv12torgb_sse(unsigned char **yuv, unsigned char *rgb, int w, int h) { #ifdef BUILD_MMX int xx, yy; register unsigned char *yp1, *up, *vp; unsigned char *dp1; /* destination pointers */ dp1 = rgb; for (yy = 0; yy < h; yy++) { /* plane pointers */ yp1 = yuv[yy]; up = yuv[h + (yy / 2)]; vp = yuv[h + (h / 2) + (yy / 2)]; for (xx = 0; xx < (w - 7); xx += 8) { movd_m2r(*up, mm3); movd_m2r(*vp, mm2); movq_m2r(*yp1, mm0); pxor_r2r(mm7, mm7); punpcklbw_r2r(mm7, mm2); punpcklbw_r2r(mm7, mm3); movq_r2r(mm0, mm1); psrlw_i2r(8, mm0); psllw_i2r(8, mm1); psrlw_i2r(8, mm1); movq_m2r(CONST_16, mm4); psubsw_r2r(mm4, mm0); psubsw_r2r(mm4, mm1); movq_m2r(CONST_128, mm5); psubsw_r2r(mm5, mm2); psubsw_r2r(mm5, mm3); movq_m2r(CONST_YMUL, mm4); pmullw_r2r(mm4, mm0); pmullw_r2r(mm4, mm1); movq_m2r(CONST_CRVCRV, mm7); pmullw_r2r(mm3, mm7); movq_m2r(CONST_CBUCBU, mm6); pmullw_r2r(mm2, mm6); movq_m2r(CONST_CGUCGU, mm5); pmullw_r2r(mm2, mm5); movq_m2r(CONST_CGVCGV, mm4); pmullw_r2r(mm3, mm4); movq_r2r(mm0, mm2); paddsw_r2r(mm7, mm2); paddsw_r2r(mm1, mm7); psraw_i2r(RES, mm2); psraw_i2r(RES, mm7); packuswb_r2r(mm7, mm2); pxor_r2r(mm7, mm7); movq_r2r(mm2, mm3); punpckhbw_r2r(mm7, mm2); punpcklbw_r2r(mm3, mm7); por_r2r(mm7, mm2); movq_r2r(mm0, mm3); psubsw_r2r(mm5, mm3); psubsw_r2r(mm4, mm3); paddsw_m2r(CONST_32, mm3); movq_r2r(mm1, mm7); psubsw_r2r(mm5, mm7); psubsw_r2r(mm4, mm7); paddsw_m2r(CONST_32, mm7); psraw_i2r(RES, mm3); psraw_i2r(RES, mm7); packuswb_r2r(mm7, mm3); pxor_r2r(mm7, mm7); movq_r2r(mm3, mm4); punpckhbw_r2r(mm7, mm3); punpcklbw_r2r(mm4, mm7); por_r2r(mm7, mm3); movq_m2r(CONST_32, mm4); paddsw_r2r(mm6, mm0); paddsw_r2r(mm6, mm1); paddsw_r2r(mm4, mm0); paddsw_r2r(mm4, mm1); psraw_i2r(RES, mm0); psraw_i2r(RES, mm1); packuswb_r2r(mm1, mm0); pxor_r2r(mm7, mm7); movq_r2r(mm0, mm5); punpckhbw_r2r(mm7, mm0); punpcklbw_r2r(mm5, mm7); por_r2r(mm7, mm0); movq_m2r(CONST_FF, mm1); movq_r2r(mm0, mm5); movq_r2r(mm3, mm6); movq_r2r(mm2, mm7); punpckhbw_r2r(mm3, mm2); punpcklbw_r2r(mm6, mm7); punpckhbw_r2r(mm1, mm0); punpcklbw_r2r(mm1, mm5); movq_r2r(mm7, mm1); punpckhwd_r2r(mm5, mm7); punpcklwd_r2r(mm5, mm1); movq_r2r(mm2, mm4); punpckhwd_r2r(mm0, mm2); punpcklwd_r2r(mm0, mm4); movntq_r2m(mm1, *(dp1)); movntq_r2m(mm7, *(dp1 + 8)); movntq_r2m(mm4, *(dp1 + 16)); movntq_r2m(mm2, *(dp1 + 24)); yp1 += 8; up += 4; vp += 4; dp1 += 8 * 4; } /* cleanup pixles that arent a multiple of 8 pixels wide */ if (xx < w) { int y, u, v, r, g, b; for (; xx < w; xx += 2) { u = (*up++) - 128; v = (*vp++) - 128; y = RZ(YMUL) * ((*yp1++) - 16); r = LUT_CLIP((y + (_crv * v)) >> RES); g = LUT_CLIP((y - (_cgu * u) - (_cgv * v) + RZ(OFF)) >> RES); b = LUT_CLIP((y + (_cbu * u) + RZ(OFF)) >> RES); *((DATA32 *) dp1) = 0xff000000 + RGB_JOIN(r,g,b); dp1 += 4; y = RZ(YMUL) * ((*yp1++) - 16); r = LUT_CLIP((y + (_crv * v)) >> RES); g = LUT_CLIP((y - (_cgu * u) - (_cgv * v) + RZ(OFF)) >> RES); b = LUT_CLIP((y + (_cbu * u) + RZ(OFF)) >> RES); *((DATA32 *) dp1) = 0xff000000 + RGB_JOIN(r,g,b); dp1 += 4; } } }
int bsad_mmx(uint8_t *pf, uint8_t *pb, uint8_t *p2, int lx, int hxf, int hyf, int hxb, int hyb, int h) { uint8_t *pfa,*pfb,*pfc,*pba,*pbb,*pbc; int s, s1, s2; pfa = pf + hxf; pfb = pf + lx * hyf; pfc = pfb + hxf; pba = pb + hxb; pbb = pb + lx * hyb; pbc = pbb + hxb; s = 0; /* the accumulator */ if (h > 0) { pxor_r2r(mm7, mm7); pxor_r2r(mm6, mm6); pcmpeqw_r2r(mm5, mm5); psubw_r2r(mm5, mm6); psllw_i2r(1, mm6); do { BSAD_LOAD(pf[0],mm0,mm1); BSAD_LOAD_ACC(pfa[0],mm2,mm3,mm0,mm1); BSAD_LOAD_ACC(pfb[0],mm2,mm3,mm0,mm1); BSAD_LOAD_ACC(pfc[0],mm2,mm3,mm0,mm1); paddw_r2r(mm6, mm0); paddw_r2r(mm6, mm1); psrlw_i2r(2, mm0); psrlw_i2r(2, mm1); BSAD_LOAD(pb[0],mm2,mm3); BSAD_LOAD_ACC(pba[0],mm4,mm5,mm2,mm3); BSAD_LOAD_ACC(pbb[0],mm4,mm5,mm2,mm3); BSAD_LOAD_ACC(pbc[0],mm4,mm5,mm2,mm3); paddw_r2r(mm6, mm2); paddw_r2r(mm6, mm3); psrlw_i2r(2, mm2); psrlw_i2r(2, mm3); paddw_r2r(mm2, mm0); paddw_r2r(mm3, mm1); psrlw_i2r(1, mm6); paddw_r2r(mm6, mm0); paddw_r2r(mm6, mm1); psllw_i2r(1, mm6); psrlw_i2r(1, mm0); psrlw_i2r(1, mm1); packuswb_r2r(mm1, mm0); movq_m2r(p2[0], mm1); movq_r2r(mm0, mm2); psubusb_r2r(mm1, mm0); psubusb_r2r(mm2, mm1); por_r2r(mm1, mm0); movq_r2r(mm0, mm1); punpcklbw_r2r(mm7, mm0); punpckhbw_r2r(mm7, mm1); paddw_r2r(mm1, mm0); movq_r2r(mm0, mm1); punpcklwd_r2r(mm7, mm0); punpckhwd_r2r(mm7, mm1); paddd_r2r(mm1, mm0); movd_r2g(mm0, s1); psrlq_i2r(32, mm0); movd_r2g(mm0, s2); s += s1 + s2; BSAD_LOAD(pf[8],mm0,mm1); BSAD_LOAD_ACC(pfa[8],mm2,mm3,mm0,mm1); BSAD_LOAD_ACC(pfb[8],mm2,mm3,mm0,mm1); BSAD_LOAD_ACC(pfc[8],mm2,mm3,mm0,mm1); paddw_r2r(mm6, mm0); paddw_r2r(mm6, mm1); psrlw_i2r(2, mm0); psrlw_i2r(2, mm1); BSAD_LOAD(pb[8],mm2,mm3); BSAD_LOAD_ACC(pba[8],mm4,mm5,mm2,mm3); BSAD_LOAD_ACC(pbb[8],mm4,mm5,mm2,mm3); BSAD_LOAD_ACC(pbc[8],mm4,mm5,mm2,mm3); paddw_r2r(mm6, mm2); paddw_r2r(mm6, mm3); psrlw_i2r(2, mm2); psrlw_i2r(2, mm3); paddw_r2r(mm2, mm0); paddw_r2r(mm3, mm1); psrlw_i2r(1, mm6); paddw_r2r(mm6, mm0); paddw_r2r(mm6, mm1); psllw_i2r(1, mm6); psrlw_i2r(1, mm0); psrlw_i2r(1, mm1); packuswb_r2r(mm1, mm0); movq_m2r(p2[8], mm1); movq_r2r(mm0, mm2); psubusb_r2r(mm1, mm0); psubusb_r2r(mm2, mm1); por_r2r(mm1, mm0); movq_r2r(mm0, mm1); punpcklbw_r2r(mm7, mm0); punpckhbw_r2r(mm7, mm1); paddw_r2r(mm1, mm0); movq_r2r(mm0, mm1); punpcklwd_r2r(mm7, mm0); punpckhwd_r2r(mm7, mm1); paddd_r2r(mm1, mm0); movd_r2g(mm0, s1); psrlq_i2r(32, mm0); movd_r2g(mm0, s2); s += s1 + s2; p2 += lx; pf += lx; pfa += lx; pfb += lx; pfc += lx; pb += lx; pba += lx; pbb += lx; pbc += lx; h--; } while (h > 0); } emms(); return s; }
static int bsad_1quad_mmxe(uint8_t *pf, uint8_t *pb, uint8_t *pb2, uint8_t *p2, int lx, int h) { int s; s = 0; /* the accumulator */ if (h > 0) { pcmpeqw_r2r(mm6, mm6); psrlw_i2r(15, mm6); paddw_r2r(mm6, mm6); pxor_r2r(mm7, mm7); pxor_r2r(mm5, mm5); do { BSAD_LOAD(pf[0],mm0,mm1); BSAD_LOAD_ACC(pf[1],mm2,mm3,mm0,mm1); BSAD_LOAD_ACC(pf[lx],mm2,mm3,mm0,mm1); BSAD_LOAD_ACC(pf[lx+1],mm2,mm3,mm0,mm1); paddw_r2r(mm6, mm0); paddw_r2r(mm6, mm1); psrlw_i2r(2, mm0); psrlw_i2r(2, mm1); packuswb_r2r(mm1, mm0); movq_m2r(pb2[0],mm1); pavgb_m2r(pb[0],mm1); pavgb_r2r(mm1, mm0); psadbw_m2r(p2[0],mm0); paddd_r2r(mm0,mm5); BSAD_LOAD(pf[8],mm0,mm1); BSAD_LOAD_ACC(pf[9],mm2,mm3,mm0,mm1); BSAD_LOAD_ACC(pf[lx+8],mm2,mm3,mm0,mm1); BSAD_LOAD_ACC(pf[lx+9],mm2,mm3,mm0,mm1); paddw_r2r(mm6, mm0); paddw_r2r(mm6, mm1); psrlw_i2r(2, mm0); psrlw_i2r(2, mm1); packuswb_r2r(mm1, mm0); movq_m2r(pb2[8],mm1); pavgb_m2r(pb[8],mm1); pavgb_r2r(mm1, mm0); psadbw_m2r(p2[8],mm0); paddd_r2r(mm0,mm5); p2 += lx; pf += lx; pb += lx; pb2 += lx; h--; } while (h > 0); } movd_r2g(mm5,s); emms(); return s; }