static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order, const int shift) { int i; LOAD_ZERO; register vec_s16_t vec1, *pv; register vec_s32_t res = vec_splat_s32(0), t; register vec_u32_t shifts; DECLARE_ALIGNED_16(int32_t, ires); shifts = zero_u32v; if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1))); if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08)); if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04)); if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02)); if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01)); for(i = 0; i < order; i += 8){ pv = (vec_s16_t*)v1; vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1)); t = vec_msum(vec1, vec_ld(0, v2), zero_s32v); t = vec_sr(t, shifts); res = vec_sums(t, res); v1 += 8; v2 += 8; } res = vec_splat(res, 3); vec_ste(res, 0, &ires); return ires; }
void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, int *p_halfpel_thresh ) { const int bw = x264_pixel_size[m->i_pixel].w; const int bh = x264_pixel_size[m->i_pixel].h; const int i_pixel = m->i_pixel; int i_me_range = h->param.analyse.i_me_range; int bmx, bmy, bcost; int bpred_mx = 0, bpred_my = 0, bpred_cost = COST_MAX; int omx, omy, pmx, pmy; uint8_t *p_fref = m->p_fref[0]; DECLARE_ALIGNED_16( uint8_t pix[16*16] ); int i, j; int dir; int costs[6]; int mv_x_min = h->mb.mv_min_fpel[0]; int mv_y_min = h->mb.mv_min_fpel[1]; int mv_x_max = h->mb.mv_max_fpel[0]; int mv_y_max = h->mb.mv_max_fpel[1]; #define CHECK_MVRANGE(mx,my) ( mx >= mv_x_min && mx <= mv_x_max && my >= mv_y_min && my <= mv_y_max ) const int16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0]; const int16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1]; bmx = x264_clip3( m->mvp[0], mv_x_min*4, mv_x_max*4 ); bmy = x264_clip3( m->mvp[1], mv_y_min*4, mv_y_max*4 ); pmx = ( bmx + 2 ) >> 2; pmy = ( bmy + 2 ) >> 2; bcost = COST_MAX; /* try extra predictors if provided */ if( h->mb.i_subpel_refine >= 3 ) { uint32_t bmv = pack16to32_mask(bmx,bmy); COST_MV_HPEL( bmx, bmy ); for( i = 0; i < i_mvc; i++ ) { if( *(uint32_t*)mvc[i] && (bmv - *(uint32_t*)mvc[i]) ) { int mx = x264_clip3( mvc[i][0], mv_x_min*4, mv_x_max*4 ); int my = x264_clip3( mvc[i][1], mv_y_min*4, mv_y_max*4 ); COST_MV_HPEL( mx, my ); } } bmx = ( bpred_mx + 2 ) >> 2; bmy = ( bpred_my + 2 ) >> 2; COST_MV( bmx, bmy ); }
static int check_quant( int cpu_ref, int cpu_new ) { x264_quant_function_t qf_c; x264_quant_function_t qf_ref; x264_quant_function_t qf_a; DECLARE_ALIGNED_16( int16_t dct1[64] ); DECLARE_ALIGNED_16( int16_t dct2[64] ); DECLARE_ALIGNED_16( uint8_t cqm_buf[64] ); int ret = 0, ok, used_asm; int oks[2] = {1,1}, used_asms[2] = {0,0}; int i, i_cqm, qp; x264_t h_buf; x264_t *h = &h_buf; memset( h, 0, sizeof(*h) ); h->pps = h->pps_array; x264_param_default( &h->param ); h->param.rc.i_qp_min = 26; h->param.analyse.b_transform_8x8 = 1; for( i_cqm = 0; i_cqm < 4; i_cqm++ ) { if( i_cqm == 0 ) { for( i = 0; i < 6; i++ ) h->pps->scaling_list[i] = x264_cqm_flat16; h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_FLAT; } else if( i_cqm == 1 ) { for( i = 0; i < 6; i++ ) h->pps->scaling_list[i] = x264_cqm_jvt[i]; h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_JVT; } else { if( i_cqm == 2 ) for( i = 0; i < 64; i++ ) cqm_buf[i] = 10 + rand() % 246; else for( i = 0; i < 64; i++ ) cqm_buf[i] = 1; for( i = 0; i < 6; i++ ) h->pps->scaling_list[i] = cqm_buf; h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_CUSTOM; } x264_cqm_init( h ); x264_quant_init( h, 0, &qf_c ); x264_quant_init( h, cpu_ref, &qf_ref ); x264_quant_init( h, cpu_new, &qf_a ); #define INIT_QUANT8() \ { \ static const int scale1d[8] = {32,31,24,31,32,31,24,31}; \ int x, y; \ for( y = 0; y < 8; y++ ) \ for( x = 0; x < 8; x++ ) \ { \ unsigned int scale = (255*scale1d[y]*scale1d[x])/16; \ dct1[y*8+x] = dct2[y*8+x] = (rand()%(2*scale+1))-scale; \ } \ } #define INIT_QUANT4() \ { \ static const int scale1d[4] = {4,6,4,6}; \ int x, y; \ for( y = 0; y < 4; y++ ) \ for( x = 0; x < 4; x++ ) \ { \ unsigned int scale = 255*scale1d[y]*scale1d[x]; \ dct1[y*4+x] = dct2[y*4+x] = (rand()%(2*scale+1))-scale; \ } \ } #define TEST_QUANT_DC( name, cqm ) \ if( qf_a.name != qf_ref.name ) \ { \ set_func_name( #name ); \ used_asms[0] = 1; \ for( qp = 51; qp > 0; qp-- ) \ { \ for( i = 0; i < 16; i++ ) \ dct1[i] = dct2[i] = (rand() & 0x1fff) - 0xfff; \ call_c1( qf_c.name, (void*)dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \ call_a1( qf_a.name, (void*)dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \ if( memcmp( dct1, dct2, 16*2 ) ) \ { \ oks[0] = 0; \ fprintf( stderr, #name "(cqm=%d): [FAILED]\n", i_cqm ); \ break; \ } \ call_c2( qf_c.name, (void*)dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \ call_a2( qf_a.name, (void*)dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \ } \ } #define TEST_QUANT( qname, block, w ) \ if( qf_a.qname != qf_ref.qname ) \ { \ set_func_name( #qname ); \ used_asms[0] = 1; \ for( qp = 51; qp > 0; qp-- ) \ { \ INIT_QUANT##w() \ call_c1( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \ call_a1( qf_a.qname, (void*)dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \ if( memcmp( dct1, dct2, w*w*2 ) ) \ { \ oks[0] = 0; \ fprintf( stderr, #qname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \ break; \ } \ call_c2( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \ call_a2( qf_a.qname, (void*)dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \ } \ } TEST_QUANT( quant_8x8, CQM_8IY, 8 ); TEST_QUANT( quant_8x8, CQM_8PY, 8 ); TEST_QUANT( quant_4x4, CQM_4IY, 4 ); TEST_QUANT( quant_4x4, CQM_4PY, 4 ); TEST_QUANT_DC( quant_4x4_dc, **h->quant4_mf[CQM_4IY] ); TEST_QUANT_DC( quant_2x2_dc, **h->quant4_mf[CQM_4IC] ); #define TEST_DEQUANT( qname, dqname, block, w ) \ if( qf_a.dqname != qf_ref.dqname ) \ { \ set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \ used_asms[1] = 1; \ for( qp = 51; qp > 0; qp-- ) \ { \ INIT_QUANT##w() \ call_c( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \ memcpy( dct2, dct1, w*w*2 ); \ call_c1( qf_c.dqname, (void*)dct1, h->dequant##w##_mf[block], qp ); \ call_a1( qf_a.dqname, (void*)dct2, h->dequant##w##_mf[block], qp ); \ if( memcmp( dct1, dct2, w*w*2 ) ) \ { \ oks[1] = 0; \ fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \ break; \ } \ call_c2( qf_c.dqname, (void*)dct1, h->dequant##w##_mf[block], qp ); \ call_a2( qf_a.dqname, (void*)dct2, h->dequant##w##_mf[block], qp ); \ } \ } TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8IY, 8 ); TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8PY, 8 ); TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4IY, 4 ); TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4PY, 4 ); x264_cqm_delete( h ); } ok = oks[0]; used_asm = used_asms[0]; report( "quant :" ); ok = oks[1]; used_asm = used_asms[1]; report( "dequant :" ); if( qf_a.denoise_dct != qf_ref.denoise_dct ) { int size; for( size = 16; size <= 64; size += 48 ) { set_func_name( "denoise_dct" ); used_asm = 1; memcpy(dct1, buf1, size*2); memcpy(dct2, buf1, size*2); memcpy(buf3+256, buf3, 256); call_c1( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (uint16_t*)buf2, size ); call_a1( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (uint16_t*)buf2, size ); if( memcmp( dct1, dct2, size*2 ) || memcmp( buf3+4, buf3+256+4, (size-1)*sizeof(uint32_t) ) ) ok = 0; call_c2( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (uint16_t*)buf2, size ); call_a2( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (uint16_t*)buf2, size ); } } report( "denoise dct :" ); return ret; }
static int check_dct( int cpu_ref, int cpu_new ) { x264_dct_function_t dct_c; x264_dct_function_t dct_ref; x264_dct_function_t dct_asm; x264_quant_function_t qf; int ret = 0, ok, used_asm, i, interlace; DECLARE_ALIGNED_16( int16_t dct1[16][4][4] ); DECLARE_ALIGNED_16( int16_t dct2[16][4][4] ); DECLARE_ALIGNED_16( int16_t dct4[16][4][4] ); DECLARE_ALIGNED_16( int16_t dct8[4][8][8] ); x264_t h_buf; x264_t *h = &h_buf; x264_dct_init( 0, &dct_c ); x264_dct_init( cpu_ref, &dct_ref); x264_dct_init( cpu_new, &dct_asm ); memset( h, 0, sizeof(*h) ); h->pps = h->pps_array; x264_param_default( &h->param ); h->param.analyse.i_luma_deadzone[0] = 0; h->param.analyse.i_luma_deadzone[1] = 0; h->param.analyse.b_transform_8x8 = 1; for( i=0; i<6; i++ ) h->pps->scaling_list[i] = x264_cqm_flat16; x264_cqm_init( h ); x264_quant_init( h, 0, &qf ); #define TEST_DCT( name, t1, t2, size ) \ if( dct_asm.name != dct_ref.name ) \ { \ set_func_name( #name );\ used_asm = 1; \ call_c( dct_c.name, t1, buf1, buf2 ); \ call_a( dct_asm.name, t2, buf1, buf2 ); \ if( memcmp( t1, t2, size ) ) \ { \ ok = 0; \ fprintf( stderr, #name " [FAILED]\n" ); \ } \ } ok = 1; used_asm = 0; TEST_DCT( sub4x4_dct, dct1[0], dct2[0], 16*2 ); TEST_DCT( sub8x8_dct, dct1, dct2, 16*2*4 ); TEST_DCT( sub16x16_dct, dct1, dct2, 16*2*16 ); report( "sub_dct4 :" ); ok = 1; used_asm = 0; TEST_DCT( sub8x8_dct8, (void*)dct1[0], (void*)dct2[0], 64*2 ); TEST_DCT( sub16x16_dct8, (void*)dct1, (void*)dct2, 64*2*4 ); report( "sub_dct8 :" ); #undef TEST_DCT // fdct and idct are denormalized by different factors, so quant/dequant // is needed to force the coefs into the right range. dct_c.sub16x16_dct( dct4, buf1, buf2 ); dct_c.sub16x16_dct8( dct8, buf1, buf2 ); for( i=0; i<16; i++ ) { qf.quant_4x4( dct4[i], h->quant4_mf[CQM_4IY][20], h->quant4_bias[CQM_4IY][20] ); qf.dequant_4x4( dct4[i], h->dequant4_mf[CQM_4IY], 20 ); } for( i=0; i<4; i++ ) { qf.quant_8x8( dct8[i], h->quant8_mf[CQM_8IY][20], h->quant8_bias[CQM_8IY][20] ); qf.dequant_8x8( dct8[i], h->dequant8_mf[CQM_8IY], 20 ); } #define TEST_IDCT( name, src ) \ if( dct_asm.name != dct_ref.name ) \ { \ set_func_name( #name );\ used_asm = 1; \ memcpy( buf3, buf1, 32*32 ); \ memcpy( buf4, buf1, 32*32 ); \ memcpy( dct1, src, 512 ); \ memcpy( dct2, src, 512 ); \ call_c1( dct_c.name, buf3, (void*)dct1 ); \ call_a1( dct_asm.name, buf4, (void*)dct2 ); \ if( memcmp( buf3, buf4, 32*32 ) ) \ { \ ok = 0; \ fprintf( stderr, #name " [FAILED]\n" ); \ } \ call_c2( dct_c.name, buf3, (void*)dct1 ); \ call_a2( dct_asm.name, buf4, (void*)dct2 ); \ } ok = 1; used_asm = 0; TEST_IDCT( add4x4_idct, dct4 ); TEST_IDCT( add8x8_idct, dct4 ); TEST_IDCT( add16x16_idct, dct4 ); report( "add_idct4 :" ); ok = 1; used_asm = 0; TEST_IDCT( add8x8_idct8, dct8 ); TEST_IDCT( add16x16_idct8, dct8 ); report( "add_idct8 :" ); #undef TEST_IDCT ok = 1; used_asm = 0; if( dct_asm.dct4x4dc != dct_ref.dct4x4dc ) { DECLARE_ALIGNED_16( int16_t dct1[4][4] ) = {{-12, 42, 23, 67},{2, 90, 89,56},{67,43,-76,91},{56,-78,-54,1}}; DECLARE_ALIGNED_16( int16_t dct2[4][4] ) = {{-12, 42, 23, 67},{2, 90, 89,56},{67,43,-76,91},{56,-78,-54,1}}; set_func_name( "dct4x4dc" ); used_asm = 1; call_c1( dct_c.dct4x4dc, dct1 ); call_a1( dct_asm.dct4x4dc, dct2 ); if( memcmp( dct1, dct2, 32 ) ) { ok = 0; fprintf( stderr, " - dct4x4dc : [FAILED]\n" ); } call_c2( dct_c.dct4x4dc, dct1 ); call_a2( dct_asm.dct4x4dc, dct2 ); } if( dct_asm.idct4x4dc != dct_ref.idct4x4dc ) { DECLARE_ALIGNED_16( int16_t dct1[4][4] ) = {{-12, 42, 23, 67},{2, 90, 89,56},{67,43,-76,91},{56,-78,-54,1}}; DECLARE_ALIGNED_16( int16_t dct2[4][4] ) = {{-12, 42, 23, 67},{2, 90, 89,56},{67,43,-76,91},{56,-78,-54,1}}; set_func_name( "idct4x4dc" ); used_asm = 1; call_c1( dct_c.idct4x4dc, dct1 ); call_a1( dct_asm.idct4x4dc, dct2 ); if( memcmp( dct1, dct2, 32 ) ) { ok = 0; fprintf( stderr, " - idct4x4dc : [FAILED]\n" ); } call_c2( dct_c.idct4x4dc, dct1 ); call_a2( dct_asm.idct4x4dc, dct2 ); } report( "(i)dct4x4dc :" ); ok = 1; used_asm = 0; if( dct_asm.dct2x2dc != dct_ref.dct2x2dc ) { DECLARE_ALIGNED_16( int16_t dct1[2][2] ) = {{-12, 42},{2, 90}}; DECLARE_ALIGNED_16( int16_t dct2[2][2] ) = {{-12, 42},{2, 90}}; set_func_name( "dct2x2dc" ); used_asm = 1; call_c( dct_c.dct2x2dc, dct1 ); call_a( dct_asm.dct2x2dc, dct2 ); if( memcmp( dct1, dct2, 4*2 ) ) { ok = 0; fprintf( stderr, " - dct2x2dc : [FAILED]\n" ); } } if( dct_asm.idct2x2dc != dct_ref.idct2x2dc ) { DECLARE_ALIGNED_16( int16_t dct1[2][2] ) = {{-12, 42},{2, 90}}; DECLARE_ALIGNED_16( int16_t dct2[2][2] ) = {{-12, 42},{2, 90}}; set_func_name( "idct2x2dc" ); used_asm = 1; call_c( dct_c.idct2x2dc, dct1 ); call_a( dct_asm.idct2x2dc, dct2 ); if( memcmp( dct1, dct2, 4*2 ) ) { ok = 0; fprintf( stderr, " - idct2x2dc : [FAILED]\n" ); } } report( "(i)dct2x2dc :" ); x264_zigzag_function_t zigzag_c; x264_zigzag_function_t zigzag_ref; x264_zigzag_function_t zigzag_asm; DECLARE_ALIGNED_16( int16_t level1[64] ); DECLARE_ALIGNED_16( int16_t level2[64] ); #define TEST_ZIGZAG_SCAN( name, t1, t2, dct, size ) \ if( zigzag_asm.name != zigzag_ref.name ) \ { \ set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\ used_asm = 1; \ call_c( zigzag_c.name, t1, dct ); \ call_a( zigzag_asm.name, t2, dct ); \ if( memcmp( t1, t2, size*sizeof(int16_t) ) ) \ { \ ok = 0; \ fprintf( stderr, #name " [FAILED]\n" ); \ } \ } #define TEST_ZIGZAG_SUB( name, t1, t2, size ) \ if( zigzag_asm.name != zigzag_ref.name ) \ { \ set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\ used_asm = 1; \ memcpy( buf3, buf1, 16*FDEC_STRIDE ); \ memcpy( buf4, buf1, 16*FDEC_STRIDE ); \ call_c1( zigzag_c.name, t1, buf2, buf3 ); \ call_a1( zigzag_asm.name, t2, buf2, buf4 ); \ if( memcmp( t1, t2, size*sizeof(int16_t) )|| memcmp( buf3, buf4, 16*FDEC_STRIDE ) ) \ { \ ok = 0; \ fprintf( stderr, #name " [FAILED]\n" ); \ } \ call_c2( zigzag_c.name, t1, buf2, buf3 ); \ call_a2( zigzag_asm.name, t2, buf2, buf4 ); \ } interlace = 0; x264_zigzag_init( 0, &zigzag_c, 0 ); x264_zigzag_init( cpu_ref, &zigzag_ref, 0 ); x264_zigzag_init( cpu_new, &zigzag_asm, 0 ); ok = 1; used_asm = 0; TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, (void*)dct1, 64 ); TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 16 ); TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 ); report( "zigzag_frame :" ); interlace = 1; x264_zigzag_init( 0, &zigzag_c, 1 ); x264_zigzag_init( cpu_ref, &zigzag_ref, 1 ); x264_zigzag_init( cpu_new, &zigzag_asm, 1 ); ok = 1; used_asm = 0; TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, (void*)dct1, 64 ); TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 16 ); TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 ); report( "zigzag_field :" ); #undef TEST_ZIGZAG_SCAN #undef TEST_ZIGZAG_SUB return ret; }
static int check_pixel( int cpu_ref, int cpu_new ) { x264_pixel_function_t pixel_c; x264_pixel_function_t pixel_ref; x264_pixel_function_t pixel_asm; x264_predict_t predict_16x16[4+3]; x264_predict_t predict_8x8c[4+3]; x264_predict_t predict_4x4[9+3]; x264_predict8x8_t predict_8x8[9+3]; DECLARE_ALIGNED_16( uint8_t edge[33] ); uint16_t cost_mv[32]; int ret = 0, ok, used_asm; int i, j; x264_pixel_init( 0, &pixel_c ); x264_pixel_init( cpu_ref, &pixel_ref ); x264_pixel_init( cpu_new, &pixel_asm ); x264_predict_16x16_init( 0, predict_16x16 ); x264_predict_8x8c_init( 0, predict_8x8c ); x264_predict_8x8_init( 0, predict_8x8 ); x264_predict_4x4_init( 0, predict_4x4 ); x264_predict_8x8_filter( buf2+40, edge, ALL_NEIGHBORS, ALL_NEIGHBORS ); #define TEST_PIXEL( name, align ) \ for( i = 0, ok = 1, used_asm = 0; i < 7; i++ ) \ { \ int res_c, res_asm; \ if( pixel_asm.name[i] != pixel_ref.name[i] ) \ { \ set_func_name( "%s_%s", #name, pixel_names[i] ); \ for( j=0; j<64; j++ ) \ { \ used_asm = 1; \ res_c = call_c( pixel_c.name[i], buf1, 16, buf2+j*!align, 64 ); \ res_asm = call_a( pixel_asm.name[i], buf1, 16, buf2+j*!align, 64 ); \ if( res_c != res_asm ) \ { \ ok = 0; \ fprintf( stderr, #name "[%d]: %d != %d [FAILED]\n", i, res_c, res_asm ); \ break; \ } \ } \ } \ } \ report( "pixel " #name " :" ); TEST_PIXEL( sad, 0 ); TEST_PIXEL( ssd, 1 ); TEST_PIXEL( satd, 0 ); TEST_PIXEL( sa8d, 0 ); #define TEST_PIXEL_X( N ) \ for( i = 0, ok = 1, used_asm = 0; i < 7; i++ ) \ { \ int res_c[4]={0}, res_asm[4]={0}; \ if( pixel_asm.sad_x##N[i] && pixel_asm.sad_x##N[i] != pixel_ref.sad_x##N[i] ) \ { \ set_func_name( "sad_x%d_%s", N, pixel_names[i] ); \ for( j=0; j<64; j++) \ { \ uint8_t *pix2 = buf2+j; \ used_asm = 1; \ res_c[0] = pixel_c.sad[i]( buf1, 16, pix2, 64 ); \ res_c[1] = pixel_c.sad[i]( buf1, 16, pix2+6, 64 ); \ res_c[2] = pixel_c.sad[i]( buf1, 16, pix2+1, 64 ); \ if(N==4) \ { \ res_c[3] = pixel_c.sad[i]( buf1, 16, pix2+10, 64 ); \ call_a( pixel_asm.sad_x4[i], buf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \ } \ else \ call_a( pixel_asm.sad_x3[i], buf1, pix2, pix2+6, pix2+1, 64, res_asm ); \ if( memcmp(res_c, res_asm, sizeof(res_c)) ) \ { \ ok = 0; \ fprintf( stderr, "sad_x"#N"[%d]: %d,%d,%d,%d != %d,%d,%d,%d [FAILED]\n", \ i, res_c[0], res_c[1], res_c[2], res_c[3], \ res_asm[0], res_asm[1], res_asm[2], res_asm[3] ); \ } \ if(N==4) \ call_c2( pixel_c.sad_x4[i], buf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \ else \ call_c2( pixel_c.sad_x3[i], buf1, pix2, pix2+6, pix2+1, 64, res_asm ); \ } \ } \ } \ report( "pixel sad_x"#N" :" ); TEST_PIXEL_X(3); TEST_PIXEL_X(4); #define TEST_PIXEL_VAR( i ) \ if( pixel_asm.var[i] != pixel_ref.var[i] ) \ { \ uint32_t res_c, res_asm; \ uint32_t sad_c, sad_asm; \ set_func_name( "%s_%s", "var", pixel_names[i] ); \ used_asm = 1; \ res_c = call_c( pixel_c.var[i], buf1, 16, &sad_c ); \ res_asm = call_a( pixel_asm.var[i], buf1, 16, &sad_asm ); \ if( (res_c != res_asm) || (sad_c != sad_asm) ) \ { \ ok = 0; \ fprintf( stderr, "var[%d]: %d,%d != %d,%d [FAILED]\n", i, res_c, sad_c, res_asm, sad_asm ); \ } \ } ok = 1; used_asm = 0; TEST_PIXEL_VAR( PIXEL_16x16 ); TEST_PIXEL_VAR( PIXEL_8x8 ); report( "pixel var :" ); #define TEST_INTRA_SATD( name, pred, satd, i8x8, ... ) \ if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \ { \ int res_c[3], res_asm[3]; \ set_func_name( #name );\ used_asm = 1; \ memcpy( buf3, buf2, 1024 ); \ for( i=0; i<3; i++ ) \ { \ pred[i]( buf3+40, ##__VA_ARGS__ ); \ res_c[i] = pixel_c.satd( buf1+40, 16, buf3+40, 32 ); \ } \ call_a( pixel_asm.name, buf1+40, i8x8 ? edge : buf3+40, res_asm ); \ if( memcmp(res_c, res_asm, sizeof(res_c)) ) \ { \ ok = 0; \ fprintf( stderr, #name": %d,%d,%d != %d,%d,%d [FAILED]\n", \ res_c[0], res_c[1], res_c[2], \ res_asm[0], res_asm[1], res_asm[2] ); \ } \ } ok = 1; used_asm = 0; TEST_INTRA_SATD( intra_satd_x3_16x16, predict_16x16, satd[PIXEL_16x16], 0 ); TEST_INTRA_SATD( intra_satd_x3_8x8c, predict_8x8c, satd[PIXEL_8x8], 0 ); TEST_INTRA_SATD( intra_satd_x3_4x4, predict_4x4, satd[PIXEL_4x4], 0 ); TEST_INTRA_SATD( intra_sa8d_x3_8x8, predict_8x8, sa8d[PIXEL_8x8], 1, edge ); report( "intra satd_x3 :" ); if( pixel_asm.ssim_4x4x2_core != pixel_ref.ssim_4x4x2_core || pixel_asm.ssim_end4 != pixel_ref.ssim_end4 ) { float res_c, res_a; int sums[5][4] = {{0}}; used_asm = ok = 1; x264_emms(); res_c = x264_pixel_ssim_wxh( &pixel_c, buf1+2, 32, buf2+2, 32, 32, 28 ); res_a = x264_pixel_ssim_wxh( &pixel_asm, buf1+2, 32, buf2+2, 32, 32, 28 ); if( fabs(res_c - res_a) > 1e-6 ) { ok = 0; fprintf( stderr, "ssim: %.7f != %.7f [FAILED]\n", res_c, res_a ); } set_func_name( "ssim_core" ); call_c2( pixel_c.ssim_4x4x2_core, buf1+2, 32, buf2+2, 32, sums ); call_a2( pixel_asm.ssim_4x4x2_core, buf1+2, 32, buf2+2, 32, sums ); set_func_name( "ssim_end" ); call_c2( pixel_c.ssim_end4, sums, sums, 4 ); call_a2( pixel_asm.ssim_end4, sums, sums, 4 ); report( "ssim :" ); } ok = 1; used_asm = 0; for( i=0; i<32; i++ ) cost_mv[i] = i*10; for( i=0; i<100 && ok; i++ ) if( pixel_asm.ads[i&3] != pixel_ref.ads[i&3] ) { DECLARE_ALIGNED_16( uint16_t sums[72] ); DECLARE_ALIGNED_16( int dc[4] ); int16_t mvs_a[32], mvs_c[32]; int mvn_a, mvn_c; int thresh = rand() & 0x3fff; set_func_name( "esa_ads" ); for( j=0; j<72; j++ ) sums[j] = rand() & 0x3fff; for( j=0; j<4; j++ ) dc[j] = rand() & 0x3fff; used_asm = 1; mvn_c = call_c( pixel_c.ads[i&3], dc, sums, 32, cost_mv, mvs_c, 28, thresh ); mvn_a = call_a( pixel_asm.ads[i&3], dc, sums, 32, cost_mv, mvs_a, 28, thresh ); if( mvn_c != mvn_a || memcmp( mvs_c, mvs_a, mvn_c*sizeof(*mvs_c) ) ) { ok = 0; printf("c%d: ", i&3); for(j=0; j<mvn_c; j++) printf("%d ", mvs_c[j]); printf("\na%d: ", i&3); for(j=0; j<mvn_a; j++) printf("%d ", mvs_a[j]); printf("\n\n"); } } report( "esa ads:" ); return ret; }
static int check_intra( int cpu_ref, int cpu_new ) { int ret = 0, ok = 1, used_asm = 0; int i; DECLARE_ALIGNED_16( uint8_t edge[33] ); struct { x264_predict_t predict_16x16[4+3]; x264_predict_t predict_8x8c[4+3]; x264_predict8x8_t predict_8x8[9+3]; x264_predict_t predict_4x4[9+3]; } ip_c, ip_ref, ip_a; x264_predict_16x16_init( 0, ip_c.predict_16x16 ); x264_predict_8x8c_init( 0, ip_c.predict_8x8c ); x264_predict_8x8_init( 0, ip_c.predict_8x8 ); x264_predict_4x4_init( 0, ip_c.predict_4x4 ); x264_predict_16x16_init( cpu_ref, ip_ref.predict_16x16 ); x264_predict_8x8c_init( cpu_ref, ip_ref.predict_8x8c ); x264_predict_8x8_init( cpu_ref, ip_ref.predict_8x8 ); x264_predict_4x4_init( cpu_ref, ip_ref.predict_4x4 ); x264_predict_16x16_init( cpu_new, ip_a.predict_16x16 ); x264_predict_8x8c_init( cpu_new, ip_a.predict_8x8c ); x264_predict_8x8_init( cpu_new, ip_a.predict_8x8 ); x264_predict_4x4_init( cpu_new, ip_a.predict_4x4 ); x264_predict_8x8_filter( buf1+48, edge, ALL_NEIGHBORS, ALL_NEIGHBORS ); #define INTRA_TEST( name, dir, w, ... ) \ if( ip_a.name[dir] != ip_ref.name[dir] )\ { \ set_func_name( "intra_%s_%s", #name, intra_##name##_names[dir] );\ used_asm = 1; \ memcpy( buf3, buf1, 32*20 );\ memcpy( buf4, buf1, 32*20 );\ call_c( ip_c.name[dir], buf3+48, ##__VA_ARGS__ );\ call_a( ip_a.name[dir], buf4+48, ##__VA_ARGS__ );\ if( memcmp( buf3, buf4, 32*20 ) )\ {\ fprintf( stderr, #name "[%d] : [FAILED]\n", dir );\ ok = 0;\ int j,k;\ for(k=-1; k<16; k++)\ printf("%2x ", edge[16+k]);\ printf("\n");\ for(j=0; j<w; j++){\ printf("%2x ", edge[14-j]);\ for(k=0; k<w; k++)\ printf("%2x ", buf4[48+k+j*32]);\ printf("\n");\ }\ printf("\n");\ for(j=0; j<w; j++){\ printf(" ");\ for(k=0; k<w; k++)\ printf("%2x ", buf3[48+k+j*32]);\ printf("\n");\ }\ }\ } for( i = 0; i < 12; i++ ) INTRA_TEST( predict_4x4, i, 4 ); for( i = 0; i < 7; i++ ) INTRA_TEST( predict_8x8c, i, 8 ); for( i = 0; i < 7; i++ ) INTRA_TEST( predict_16x16, i, 16 ); for( i = 0; i < 12; i++ ) INTRA_TEST( predict_8x8, i, 8, edge ); report( "intra pred :" ); return ret; }