void SubpelRefine::MatchPic(const PicArray& pic_data , const PicArray& refup_data , MEData& me_data , int ref_id) { // Match a picture against a single reference. Loop over all the blocks // doing the matching // Initialisation // //////////////////// // Provide aliases for the appropriate motion vector data components MvArray& mv_array = me_data.Vectors( ref_id ); TwoDArray<MvCostData>& pred_costs = me_data.PredCosts( ref_id ); // Provide a block matching object to do the work BlockMatcher my_bmatch( pic_data , refup_data , m_predparams->LumaBParams(2) , m_predparams->MVPrecision() , mv_array , pred_costs ); // Do the work // ///////////////// // Loop over all the blocks, doing the work for (int yblock=0 ; yblock<m_predparams->YNumBlocks() ; ++yblock){ for (int xblock=0 ; xblock<m_predparams->XNumBlocks() ; ++xblock){ DoBlock(xblock , yblock , my_bmatch , me_data , ref_id ); }// xblock }// yblock }
void SubpelRefine::DoBlock(const int xblock , const int yblock , BlockMatcher& my_bmatch, MEData& me_data , const int ref_id ) { // For each block, home into the sub-pixel vector // Provide aliases for the appropriate motion vector data components MvArray& mv_array = me_data.Vectors( ref_id ); const MVector mv_pred = GetPred( xblock , yblock , mv_array ); const float loc_lambda = me_data.LambdaMap()[yblock][xblock]; my_bmatch.RefineMatchSubp( xblock , yblock , mv_pred, loc_lambda ); }
void MotionEstimator::DoME(const FrameBuffer& my_buffer, int frame_num, MEData& me_data) { const FrameParams& fparams = my_buffer.GetFrame(frame_num).GetFparams(); // Step 1. //Initial search gives vectors for each reference accurate to 1 pixel PixelMatcher pix_match( m_encparams ); pix_match.DoSearch( my_buffer , frame_num , me_data); float lambda; // Get the references const std::vector<int>& refs = my_buffer.GetFrame(frame_num).GetFparams().Refs(); const int num_refs = refs.size(); if ( fparams.IsBFrame()) lambda = m_encparams.L2MELambda(); else lambda = m_encparams.L1MELambda(); // Set up the lambda to be used me_data.SetLambdaMap( num_refs , lambda ); MVPrecisionType orig_prec = m_encparams.MVPrecision(); // Step 2. // Pixel accurate vectors are then refined to sub-pixel accuracy if (orig_prec != MV_PRECISION_PIXEL) { SubpelRefine pelrefine( m_encparams ); pelrefine.DoSubpel( my_buffer , frame_num , me_data ); } else { // FIXME: HACK HACK // Mutiplying the motion vectors by 2 and setting MV precision to // HALF_PIXEL to implement pixel accurate motion estimate MvArray &mv_arr1 = me_data.Vectors(1); for (int j = 0; j < mv_arr1.LengthY(); ++j) { for (int i = 0; i < mv_arr1.LengthX(); ++i) mv_arr1[j][i] = mv_arr1[j][i] << 1; } if (num_refs > 1) { MvArray &mv_arr2 = me_data.Vectors(2); for (int j = 0; j < mv_arr2.LengthY(); ++j) { for (int i = 0; i < mv_arr2.LengthX(); ++i) mv_arr2[j][i] = mv_arr2[j][i] << 1; } } m_encparams.SetMVPrecision(MV_PRECISION_HALF_PIXEL); } // Step3. // We now have to decide how each macroblock should be split // and which references should be used, and so on. ModeDecider my_mode_dec( m_encparams ); my_mode_dec.DoModeDecn( my_buffer , frame_num , me_data ); if (orig_prec == MV_PRECISION_PIXEL) { // FIXME: HACK HACK // Divide the motion vectors by 2 to convert back to pixel // accurate motion vectors and reset MV precision to // PIXEL accuracy MvArray &mv_arr1 = me_data.Vectors(1); for (int j = 0; j < mv_arr1.LengthY(); ++j) { for (int i = 0; i < mv_arr1.LengthX(); ++i) mv_arr1[j][i] = mv_arr1[j][i] >> 1; } if (num_refs > 1) { MvArray &mv_arr2 = me_data.Vectors(2); for (int j = 0; j < mv_arr2.LengthY(); ++j) { for (int i = 0; i < mv_arr2.LengthX(); ++i) mv_arr2[j][i] = mv_arr2[j][i]>>1; } }
void ModeDecider::DoModeDecn(const FrameBuffer& my_buffer, int frame_num, MEData& me_data) { // We've got 'raw' block motion vectors for up to two reference frames. Now we want // to make a decision as to mode. In this initial implementation, this is bottom-up // i.e. find mvs for MBs and sub-MBs and see whether it's worthwhile merging. int ref1,ref2; // Initialise // //////////////// fsort = my_buffer.GetFrame(frame_num).GetFparams().FSort(); if (fsort.IsInter()) { // Extract the references const vector<int>& refs = my_buffer.GetFrame(frame_num).GetFparams().Refs(); num_refs = refs.size(); ref1 = refs[0]; // The picture we're doing estimation from m_pic_data = &(my_buffer.GetComponent( frame_num , Y_COMP)); // Set up the hierarchy of motion vector data objects m_me_data_set[0] = new MEData( m_encparams.XNumMB() , m_encparams.YNumMB() , m_encparams.XNumBlocks()/4 , m_encparams.YNumBlocks()/4, num_refs ); m_me_data_set[1] = new MEData( m_encparams.XNumMB() , m_encparams.YNumMB() , m_encparams.XNumBlocks()/2 , m_encparams.YNumBlocks()/2, num_refs ); m_me_data_set[2] = &me_data; // Set up the lambdas to use per block m_me_data_set[0]->SetLambdaMap( 0 , me_data.LambdaMap() , 1.0/m_level_factor[0] ); m_me_data_set[1]->SetLambdaMap( 1 , me_data.LambdaMap() , 1.0/m_level_factor[1] ); // Set up the reference pictures m_ref1_updata = &(my_buffer.GetUpComponent( ref1 , Y_COMP)); if (num_refs>1) { ref2 = refs[1]; m_ref2_updata = &(my_buffer.GetUpComponent( ref2 , Y_COMP)); // Create an object for computing bi-directional prediction calculations if ( m_encparams.MVPrecision()==MV_PRECISION_EIGHTH_PIXEL ) m_bicheckdiff = new BiBlockEighthPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); else if ( m_encparams.MVPrecision()==MV_PRECISION_QUARTER_PIXEL ) m_bicheckdiff = new BiBlockQuarterPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); else m_bicheckdiff = new BiBlockHalfPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); } else { ref2 = ref1; } // Create an object for doing intra calculations m_intradiff = new IntraBlockDiff( *m_pic_data ); // Loop over all the macroblocks, doing the work // /////////////////////////////////////////////////// for (m_ymb_loc=0 ; m_ymb_loc<m_encparams.YNumMB() ; ++m_ymb_loc ) { for (m_xmb_loc=0 ; m_xmb_loc<m_encparams.XNumMB(); ++m_xmb_loc ) { DoMBDecn(); }//m_xmb_loc }//m_ymb_loc delete m_intradiff; if (num_refs>1) delete m_bicheckdiff; } }