void PictureCompressor::ModeDecisionME( EncQueue& my_buffer, int pnum ) { MEData& me_data = my_buffer.GetPicture(pnum).GetMEData(); PictureParams& pparams = my_buffer.GetPicture(pnum).GetPparams(); PicturePredParams& predparams = me_data.GetPicPredParams(); ModeDecider my_mode_dec( m_encparams ); my_mode_dec.DoModeDecn( my_buffer , pnum ); const int num_refs = pparams.NumRefs(); if (m_orig_prec == MV_PRECISION_PIXEL) { // FIXME: HACK HACK // Divide the motion vectors by 2 to convert back to pixel // accurate motion vectors and reset MV precision to // PIXEL accuracy MvArray &mv_arr1 = me_data.Vectors(1); for (int j = 0; j < mv_arr1.LengthY(); ++j) { for (int i = 0; i < mv_arr1.LengthX(); ++i) mv_arr1[j][i] = mv_arr1[j][i] >> 1; } if (num_refs > 1) { MvArray &mv_arr2 = me_data.Vectors(2); for (int j = 0; j < mv_arr2.LengthY(); ++j) { for (int i = 0; i < mv_arr2.LengthX(); ++i) mv_arr2[j][i] = mv_arr2[j][i]>>1; } }
void PictureCompressor::NormaliseComplexity( EncQueue& my_buffer, int pnum ) { EncPicture& my_picture = my_buffer.GetPicture( pnum ); if ( (my_picture.GetStatus()&DONE_PIC_COMPLEXITY) != 0 ){ std::vector<int> queue_members = my_buffer.Members(); double mean_complexity = 0.0; int count = 0; for (size_t i=0; i<queue_members.size(); ++ i){ int n = queue_members[i]; EncPicture& enc_pic = my_buffer.GetPicture( n ); if ( (enc_pic.GetStatus()&DONE_PIC_COMPLEXITY) != 0 && enc_pic.GetPparams().PicSort().IsInter() && n >= pnum - 10 && n <= pnum + 10){ mean_complexity += enc_pic.GetComplexity(); count++; } } mean_complexity /= count; my_picture.SetNormComplexity( my_picture.GetComplexity() / mean_complexity ); } }
void PictureCompressor::SubPixelME( EncQueue& my_buffer , int pnum ) { const std::vector<int>& refs = my_buffer.GetPicture(pnum).GetPparams().Refs(); const int num_refs = refs.size(); PictureParams& pparams = my_buffer.GetPicture(pnum).GetPparams(); MEData& me_data = my_buffer.GetPicture(pnum).GetMEData(); PicturePredParams& predparams = me_data.GetPicPredParams(); float lambda; if ( pparams.IsBPicture()) lambda = m_encparams.L2MELambda(); else lambda = m_encparams.L1MELambda(); //lambda *= my_buffer.GetPicture(pnum).GetNormComplexity(); // Set up the lambda to be used me_data.SetLambdaMap( num_refs , lambda ); m_orig_prec = predparams.MVPrecision(); // Step 2. // Pixel accurate vectors are then refined to sub-pixel accuracy if (m_orig_prec != MV_PRECISION_PIXEL) { SubpelRefine pelrefine( m_encparams ); pelrefine.DoSubpel( my_buffer , pnum ); } else { // FIXME: HACK HACK // Mutiplying the motion vectors by 2 and setting MV precision to // HALF_PIXEL to implement pixel accurate motion estimate MvArray &mv_arr1 = me_data.Vectors(1); for (int j = 0; j < mv_arr1.LengthY(); ++j) { for (int i = 0; i < mv_arr1.LengthX(); ++i) mv_arr1[j][i] = mv_arr1[j][i] << 1; } if (num_refs > 1) { MvArray &mv_arr2 = me_data.Vectors(2); for (int j = 0; j < mv_arr2.LengthY(); ++j) { for (int i = 0; i < mv_arr2.LengthX(); ++i) mv_arr2[j][i] = mv_arr2[j][i] << 1; } } predparams.SetMVPrecision(MV_PRECISION_HALF_PIXEL); } }
void PictureCompressor::CalcComplexity2( EncQueue& my_buffer, int pnum ) { // to be used after doing motion compensation EncPicture& my_picture = my_buffer.GetPicture( pnum ); const PicArray& pic_data = my_picture.Data( Y_COMP ); if ( (my_picture.GetStatus()&DONE_MC) != 0 ){ float cost; double total_sq_cost = 0.0; double total_cost = 0.0; for (int j=0; j<pic_data.LengthY(); ++j){ for (int i=0; i<pic_data.LengthX(); ++i){ cost = float( pic_data[j][i] ); total_cost += cost; total_sq_cost += cost*cost; } } total_cost /= ( pic_data.LengthX()*pic_data.LengthY() ); total_sq_cost /= ( pic_data.LengthX()*pic_data.LengthY() ); my_picture.SetComplexity( total_sq_cost - total_cost*total_cost ); } }
void SubpelRefine::DoSubpel( EncQueue& my_buffer,int pic_num ) { m_predparams = &(my_buffer.GetPicture(pic_num).GetMEData().GetPicPredParams() ); //main loop for the subpel refinement int ref1,ref2; const PictureSort psort = my_buffer.GetPicture(pic_num).GetPparams().PicSort(); if (psort.IsInter()) { // Get the references const vector<int>& refs = my_buffer.GetPicture(pic_num).GetPparams().Refs(); int num_refs = refs.size(); ref1 = refs[0]; if (num_refs>1) ref2 = refs[1]; else ref2 = ref1; const PicArray& pic_data = my_buffer.GetPicture(pic_num).DataForME(m_encparams.CombinedME()); const PicArray& refup1_data = my_buffer.GetPicture(ref1).UpDataForME(m_encparams.CombinedME()); const PicArray& refup2_data = my_buffer.GetPicture(ref2).UpDataForME(m_encparams.CombinedME()); MEData& me_data = my_buffer.GetPicture(pic_num).GetMEData(); // Now match the pictures MatchPic( pic_data , refup1_data , me_data ,1 ); if (ref1 != ref2 ) MatchPic( pic_data , refup2_data , me_data ,2 ); } }
void ModeDecider::DoModeDecn( EncQueue& my_buffer, int pic_num ) { // We've got 'raw' block motion vectors for up to two reference pictures. Now we want // to make a decision as to mode. In this initial implementation, this is bottom-up // i.e. find mvs for MBs and sub-MBs and see whether it's worthwhile merging. int ref1,ref2; // Initialise // //////////////// m_psort = my_buffer.GetPicture(pic_num).GetPparams().PicSort(); if (m_psort.IsInter()) { // Extract the references const vector<int>& refs = my_buffer.GetPicture(pic_num).GetPparams().Refs(); num_refs = refs.size(); ref1 = refs[0]; // The picture we're doing estimation from m_pic_data = &(my_buffer.GetPicture( pic_num ).OrigData(Y_COMP)); // Set up the hierarchy of motion vector data objects PicturePredParams predparams0 = m_predparams; predparams0.SetXNumBlocks( m_predparams.XNumBlocks()/4 ); predparams0.SetYNumBlocks( m_predparams.YNumBlocks()/4 ); PicturePredParams predparams1 = m_predparams; predparams1.SetXNumBlocks( m_predparams.XNumBlocks()/2 ); predparams1.SetYNumBlocks( m_predparams.YNumBlocks()/2 ); m_me_data_set[0] = new MEData( predparams0, num_refs ); m_me_data_set[1] = new MEData( predparams1, num_refs ); m_me_data_set[2] = &my_buffer.GetPicture(pic_num).GetMEData(); // Set up the lambdas to use per block m_me_data_set[0]->SetLambdaMap( 0 , m_me_data_set[2]->LambdaMap() , 1.0/m_level_factor[0] ); m_me_data_set[1]->SetLambdaMap( 1 , m_me_data_set[2]->LambdaMap() , 1.0/m_level_factor[1] ); // Set up the reference pictures m_ref1_updata = &(my_buffer.GetPicture( ref1 ).UpOrigData(Y_COMP)); if (num_refs>1) { ref2 = refs[1]; m_ref2_updata = &(my_buffer.GetPicture( ref2).UpOrigData(Y_COMP)); // Create an object for computing bi-directional prediction calculations if ( m_predparams.MVPrecision()==MV_PRECISION_EIGHTH_PIXEL ) m_bicheckdiff = new BiBlockEighthPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); else if ( m_predparams.MVPrecision()==MV_PRECISION_QUARTER_PIXEL ) m_bicheckdiff = new BiBlockQuarterPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); else m_bicheckdiff = new BiBlockHalfPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); } else { ref2 = ref1; } // Create an object for doing intra calculations m_intradiff = new IntraBlockDiff( *m_pic_data ); // Loop over all the macroblocks, doing the work // /////////////////////////////////////////////////// for (m_ymb_loc=0 ; m_ymb_loc<m_predparams.YNumMB() ; ++m_ymb_loc ) { for (m_xmb_loc=0 ; m_xmb_loc<m_predparams.XNumMB(); ++m_xmb_loc ) { DoMBDecn(); }//m_xmb_loc }//m_ymb_loc delete m_intradiff; if (num_refs>1) delete m_bicheckdiff; } // Finally, although not strictly part of motion estimation, // we have to assign DC values for chroma components for // blocks we're decided are intra. SetChromaDC( my_buffer , pic_num ); }
void ModeDecider::DoModeDecn( EncQueue& my_buffer, int pic_num ) { m_predparams = &(my_buffer.GetPicture(pic_num).GetMEData().GetPicPredParams() ); // The following factors normalise costs for sub-SBs and SBs to those of // blocks, so that the overlap is take into account (e.g. a sub-SB has // length XBLEN+XBSEP and YBLEN+YBSEP). The SB costs for a 1x1 // decomposition are not directly comprable to those for other decompositions // because of the block overlaps. These factors remove these effects, so that // all SAD costs are normalised to the area corresponding to non-overlapping // 16 blocks of size XBLEN*YBLEN. m_level_factor[0] = float( 16 * m_predparams->LumaBParams(2).Xblen() * m_predparams->LumaBParams(2).Yblen() )/ float( m_predparams->LumaBParams(0).Xblen() * m_predparams->LumaBParams(0).Yblen() ); m_level_factor[1] = float( 4 * m_predparams->LumaBParams(2).Xblen() * m_predparams->LumaBParams(2).Yblen() )/ float( m_predparams->LumaBParams(1).Xblen() * m_predparams->LumaBParams(1).Yblen() ); m_level_factor[2] = 1.0f; for (int i=0 ; i<=2 ; ++i) m_mode_factor[i] = 80.0*std::pow(0.8 , 2-i); // We've got 'raw' block motion vectors for up to two reference pictures. Now we want // to make a decision as to mode. In this initial implementation, this is bottom-up // i.e. find mvs for SBs and sub-SBs and see whether it's worthwhile merging. int ref1,ref2; // Initialise // //////////////// m_psort = my_buffer.GetPicture(pic_num).GetPparams().PicSort(); if (m_psort.IsInter()) { // Extract the references const vector<int>& refs = my_buffer.GetPicture(pic_num).GetPparams().Refs(); num_refs = refs.size(); ref1 = refs[0]; // The picture we're doing estimation from m_pic_data = &(my_buffer.GetPicture( pic_num ).DataForME(m_encparams.CombinedME()) ); // Set up the hierarchy of motion vector data objects PicturePredParams predparams0 = *m_predparams; predparams0.SetXNumBlocks( m_predparams->XNumBlocks()/4 ); predparams0.SetYNumBlocks( m_predparams->YNumBlocks()/4 ); PicturePredParams predparams1 = *m_predparams; predparams1.SetXNumBlocks( m_predparams->XNumBlocks()/2 ); predparams1.SetYNumBlocks( m_predparams->YNumBlocks()/2 ); m_me_data_set[0] = new MEData( predparams0, num_refs ); m_me_data_set[1] = new MEData( predparams1, num_refs ); m_me_data_set[2] = &my_buffer.GetPicture(pic_num).GetMEData(); // Set up the lambdas to use per block m_me_data_set[0]->SetLambdaMap( 0 , m_me_data_set[2]->LambdaMap() , 1.0/m_level_factor[0] ); m_me_data_set[1]->SetLambdaMap( 1 , m_me_data_set[2]->LambdaMap() , 1.0/m_level_factor[1] ); // Set up the reference pictures m_ref1_updata = &(my_buffer.GetPicture( ref1 ).UpDataForME(m_encparams.CombinedME()) ); if (num_refs>1) { ref2 = refs[1]; m_ref2_updata = &(my_buffer.GetPicture( ref2).UpDataForME(m_encparams.CombinedME()) ); // Create an object for computing bi-directional prediction calculations if ( m_predparams->MVPrecision()==MV_PRECISION_EIGHTH_PIXEL ) m_bicheckdiff = new BiBlockEighthPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); else if ( m_predparams->MVPrecision()==MV_PRECISION_QUARTER_PIXEL ) m_bicheckdiff = new BiBlockQuarterPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); else m_bicheckdiff = new BiBlockHalfPel( *m_ref1_updata , *m_ref2_updata , *m_pic_data ); } else { ref2 = ref1; } // Create an object for doing intra calculations m_intradiff = new IntraBlockDiff( *m_pic_data ); // Loop over all the superblocks, doing the work // /////////////////////////////////////////////////// for (m_ysb_loc=0 ; m_ysb_loc<m_predparams->YNumSB() ; ++m_ysb_loc ){ for (m_xsb_loc=0 ; m_xsb_loc<m_predparams->XNumSB(); ++m_xsb_loc ){ DoSBDecn(); }//m_xsb_loc }//m_ysb_loc delete m_intradiff; if (num_refs>1) delete m_bicheckdiff; } // Finally, although not strictly part of motion estimation, // we have to assign DC values for // blocks we're decided are intra. SetDC( my_buffer , pic_num ); }
void PictureCompressor::CalcComplexity( EncQueue& my_buffer, int pnum , const OLBParams& olbparams ) { EncPicture& my_picture = my_buffer.GetPicture( pnum ); PictureParams& pparams = my_picture.GetPparams(); if ( (my_picture.GetStatus()&DONE_PEL_ME) != 0 ){ MEData& me_data = my_picture.GetMEData(); TwoDArray<MvCostData>* pcosts1; TwoDArray<MvCostData>* pcosts2; pcosts1 = &me_data.PredCosts(1); if (pparams.NumRefs()>1) pcosts2 = &me_data.PredCosts(2); else pcosts2 = pcosts1; float cost1, cost2, cost; double total_cost1 = 0.0; double total_cost2 = 0.0; double total_cost = 0.0; int count1=0;int count=0; float cost_threshold = float(olbparams.Xblen()*olbparams.Yblen()*10); for (int j=4; j<pcosts1->LengthY()-4; ++j){ for (int i=4; i<pcosts1->LengthX()-4; ++i){ cost1 = (*pcosts1)[j][i].SAD; cost2 = (*pcosts2)[j][i].SAD; cost = std::min(cost1, cost2); total_cost1 += cost1; total_cost2 += cost2; total_cost += cost; if (pparams.NumRefs()>1 && cost<=cost_threshold){ ++count; if (cost1<=cost2) ++count1; } } } total_cost1 *= olbparams.Xbsep()*olbparams.Ybsep(); total_cost1 /= olbparams.Xblen()*olbparams.Yblen(); total_cost2 *= olbparams.Xbsep()*olbparams.Ybsep(); total_cost2 /= olbparams.Xblen()*olbparams.Yblen(); if (pparams.NumRefs()>1){ my_picture.SetPredBias(float(count1)/float(count)); } else my_picture.SetPredBias(0.5); total_cost *= olbparams.Xbsep()*olbparams.Ybsep(); total_cost /= olbparams.Xblen()*olbparams.Yblen(); // my_picture.SetComplexity( total_cost ); my_picture.SetComplexity( total_cost*total_cost ); } }