void x264_mb_predict_mv_16x16( x264_t *h, int i_ref, int16_t mvp[2] ) { int i_refa = h->mb.cache.ref[X264_SCAN8_0 - 1]; int16_t *mv_a = h->mb.cache.mv[X264_SCAN8_0 - 1]; int i_refb = h->mb.cache.ref[X264_SCAN8_0 - 8]; int16_t *mv_b = h->mb.cache.mv[X264_SCAN8_0 - 8]; int i_refc = h->mb.cache.ref[X264_SCAN8_0 - 8 + 4]; int16_t *mv_c = h->mb.cache.mv[X264_SCAN8_0 - 8 + 4]; if( i_refc == -2 ) { i_refc = h->mb.cache.ref[X264_SCAN8_0 - 8 - 1]; mv_c = h->mb.cache.mv[X264_SCAN8_0 - 8 - 1]; } int i_count = (i_refa == i_ref) + (i_refb == i_ref) + (i_refc == i_ref); if( i_count > 1 ) { median: x264_median_mv( mvp, mv_a, mv_b, mv_c ); } else if( i_count == 1 ) { if( i_refa == i_ref ) CP32( mvp, mv_a ); else if( i_refb == i_ref ) CP32( mvp, mv_b ); else CP32( mvp, mv_c ); } else if( i_refb == -2 && i_refc == -2 && i_refa != -2 ) CP32( mvp, mv_a ); else goto median; }
//{ mb_analyse_inter_p8x8 //{ mb_analyse_inter_p8x8 //{ mb_analyse_inter_p8x8 //{ mb_analyse_inter_p8x8 static void dull_mb_analyse_inter_p8x8_2( x264_t *h, x264_mb_analysis_t *a ) { /* Duplicate refs are rarely useful in p8x8 due to the high cost of the * reference frame flags. Thus, if we're not doing mixedrefs, just * don't bother analysing the dupes. */ const int i_ref = h->mb.ref_blind_dupe == a->l0.me16x16.i_ref ? 0 : a->l0.me16x16.i_ref; const int i_ref_cost = h->param.b_cabac || i_ref ? REF_COST( 0, i_ref ) : 0; pixel **p_fenc = h->mb.pic.p_fenc; int i_mvc; int16_t (*mvc)[2] = a->l0.mvc[i_ref]; int i; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; i_mvc = 1; CP32( mvc[0], a->l0.me16x16.mv ); for( i = 0; i < 4; i++ ) { x264_me_t *m = &a->l0.me8x8[i]; int x8 = i&1; int y8 = i>>1; m->i_pixel = PIXEL_8x8; m->i_ref_cost = i_ref_cost; LOAD_FENC( m, p_fenc, 8*x8, 8*y8 ); LOAD_HPELS( m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 ); LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 8*x8, 8*y8 ); x264_mb_predict_mv( h, 0, 4*i, 2, m->mvp ); x264_me_search( h, m, mvc, i_mvc ); x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, m->mv ); CP32( mvc[i_mvc], m->mv ); i_mvc++; a->i_satd8x8[0][i] = m->cost - m->cost_mv; /* mb type cost */ m->cost += i_ref_cost; if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) ) m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8]; } a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost + a->l0.me8x8[2].cost + a->l0.me8x8[3].cost; /* theoretically this should include 4*ref_cost, * but 3 seems a better approximation of cabac. */ if( h->param.b_cabac ) a->l0.i_cost8x8 -= i_ref_cost; h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] = h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; }
//{ mb_analyse_inter_p16x16 //{ mb_analyse_inter_p16x16 //{ mb_analyse_inter_p16x16 //{ mb_analyse_inter_p16x16 static void dull_mb_analyse_inter_p16x16_1( x264_t *h, x264_mb_analysis_t *a ) { int i_ref, i_mvc; ALIGNED_4( int16_t mvc[8][2] ); x264_me_t m; m.i_pixel = PIXEL_16x16; LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 ); a->l0.me16x16.cost = INT_MAX; for( i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ ) { m.i_ref_cost = REF_COST( 0, i_ref ); /* search with ref */ LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 ); LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 0, 0 ); x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp ); { x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc ); dull_me_search_ref_1( h, &m, mvc, i_mvc, NULL ); } /* save mv for predicting neighbors */ CP32( h->mb.mvr[0][i_ref][h->mb.i_mb_xy], m.mv ); CP32( a->l0.mvc[i_ref][0], m.mv ); /* early termination * SSD threshold would probably be better than SATD */ if( i_ref == 0 && abs(m.mv[0]-h->mb.cache.pskip_mv[0]) + abs(m.mv[1]-h->mb.cache.pskip_mv[1]) <= 1 && x264_macroblock_probe_pskip( h ) ) { h->mb.i_type = P_SKIP; x264_analyse_update_cache( h, a ); assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 ); return; } h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) ); } x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref ); assert( a->l0.me16x16.mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 ); h->mb.i_type = P_L0; }
void x264_mb_predict_mv( x264_t *h, int idx, int i_width, int16_t mvp[2] ) { const int i8 = x264_scan8[idx]; const int i_ref= h->mb.cache.ref[i8]; int i_refa = h->mb.cache.ref[i8 - 1]; int16_t *mv_a = h->mb.cache.mv[i8 - 1]; int i_refb = h->mb.cache.ref[i8 - 8]; int16_t *mv_b = h->mb.cache.mv[i8 - 8]; int i_refc = h->mb.cache.ref[i8 - 8 + i_width]; int16_t *mv_c = h->mb.cache.mv[i8 - 8 + i_width]; // Partitions not yet reached in scan order are unavailable. if( (idx&3) >= 2 + (i_width&1) || i_refc == -2 ) { i_refc = h->mb.cache.ref[i8 - 8 - 1]; mv_c = h->mb.cache.mv[i8 - 8 - 1]; } if( h->mb.i_partition == D_16x8 ) { if( idx == 0 ) { if( i_refb == i_ref ) { CP32( mvp, mv_b ); return; } } else { if( i_refa == i_ref ) { CP32( mvp, mv_a ); return; } } } else if( h->mb.i_partition == D_8x16 ) { if( idx == 0 ) { if( i_refa == i_ref ) { CP32( mvp, mv_a ); return; } } else { if( i_refc == i_ref ) { CP32( mvp, mv_c ); return; } } } int i_count = (i_refa == i_ref) + (i_refb == i_ref) + (i_refc == i_ref); if( i_count > 1 ) { median: x264_median_mv( mvp, mv_a, mv_b, mv_c ); } else if( i_count == 1 ) { if( i_refa == i_ref ) CP32( mvp, mv_a ); else if( i_refb == i_ref ) CP32( mvp, mv_b ); else CP32( mvp, mv_c ); } else if( i_refb == -2 && i_refc == -2 && i_refa != -2 ) CP32( mvp, mv_a ); else goto median; }
//{ mb_analyse_inter_p16x16 //{ mb_analyse_inter_p16x16 //{ mb_analyse_inter_p16x16 //{ mb_analyse_inter_p16x16 static void dull_mb_analyse_inter_p16x16_2( x264_t *h, x264_mb_analysis_t *a ) { int i_ref, i_mvc; ALIGNED_4( int16_t mvc[8][2] ); int i_halfpel_thresh = INT_MAX; int *p_halfpel_thresh = NULL; x264_me_t m; m.i_pixel = PIXEL_16x16; LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 ); a->l0.me16x16.cost = INT_MAX; for( i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ ) { m.i_ref_cost = REF_COST( 0, i_ref ); i_halfpel_thresh -= m.i_ref_cost; /* search with ref */ LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 ); LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 0, 0 ); x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp ); if( h->mb.ref_blind_dupe == i_ref ) { CP32( m.mv, a->l0.mvc[0][0] ); x264_me_refine_qpel_refdupe( h, &m, p_halfpel_thresh ); } else { x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc ); x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh ); } /* save mv for predicting neighbors */ CP32( h->mb.mvr[0][i_ref][h->mb.i_mb_xy], m.mv ); CP32( a->l0.mvc[i_ref][0], m.mv ); /* early termination * SSD threshold would probably be better than SATD */ if( i_ref == 0 && a->b_try_skip && m.cost-m.cost_mv < 300*a->i_lambda && abs(m.mv[0]-h->mb.cache.pskip_mv[0]) + abs(m.mv[1]-h->mb.cache.pskip_mv[1]) <= 1 && x264_macroblock_probe_pskip( h ) ) { h->mb.i_type = P_SKIP; x264_analyse_update_cache( h, a ); assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 ); return; } m.cost += m.i_ref_cost; i_halfpel_thresh += m.i_ref_cost; if( m.cost < a->l0.me16x16.cost ) h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) ); } x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref ); assert( a->l0.me16x16.mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 ); h->mb.i_type = P_L0; if( a->i_mbrd ) { x264_mb_init_fenc_cache( h, a->i_mbrd >= 2 || h->param.analyse.inter & X264_ANALYSE_PSUB8x8 ); if( a->l0.me16x16.i_ref == 0 && M32( a->l0.me16x16.mv ) == M32( h->mb.cache.pskip_mv ) && !a->b_force_intra ) { h->mb.i_partition = D_16x16; x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv ); a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 ); if( !(h->mb.i_cbp_luma|h->mb.i_cbp_chroma) ) h->mb.i_type = P_SKIP; } } }