void FrameEncoder::setLambda(int qp, int row) { TComSlice* slice = m_pic->getSlice(); TComPicYuv *fenc = slice->getPic()->getPicYuvOrg(); double lambda = 0; if (m_pic->getSlice()->getSliceType() == I_SLICE) { lambda = X265_MAX(1, x265_lambda2_tab_I[qp]); } else { lambda = X265_MAX(1, x265_lambda2_non_I[qp]); } // for RDO // in RdCost there is only one lambda because the luma and chroma bits are not separated, // instead we weight the distortion of chroma. int chromaQPOffset = slice->getPPS()->getChromaCbQpOffset() + slice->getSliceQpDeltaCb(); int qpc = Clip3(0, 70, qp + chromaQPOffset); double cbWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset chromaQPOffset = slice->getPPS()->getChromaCrQpOffset() + slice->getSliceQpDeltaCr(); qpc = Clip3(0, 70, qp + chromaQPOffset); double crWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset double chromaLambda = lambda / crWeight; m_rows[row].m_search.setQPLambda(qp, lambda, chromaLambda); m_rows[row].m_search.m_me.setSourcePlane(fenc->getLumaAddr(), fenc->getStride()); m_rows[row].m_rdCost.setLambda(lambda); m_rows[row].m_rdCost.setCbDistortionWeight(cbWeight); m_rows[row].m_rdCost.setCrDistortionWeight(crWeight); }
Void updatePixel(TComDataCU* pCtu, Pixel** ppPixel) { UInt uiMaxCUWidth = pCtu->getSlice()->getSPS()->getMaxCUWidth(); // max cu width UInt uiMaxCUHeight = pCtu->getSlice()->getSPS()->getMaxCUHeight(); // max cu height // pic TComPic *pcPic = pCtu->getPic(); //TComPicYuv* pcPredYuv = pcPic->getPicYuvPred(); //TComPicYuv* pcResiYuv = pcPic->getPicYuvResi(); TComPicYuv* pcRecoYuv = pcPic->getPicYuvRec(); UInt uiNumValidCopmonent = pcPic->getNumberValidComponents(); for (UInt ch = 0; ch < uiNumValidCopmonent; ch++) { ComponentID cId = ComponentID(ch); // picture description UInt uiStride = pcRecoYuv->getStride(cId); // stride for a certain component UInt uiPicWidth = pcRecoYuv->getWidth(cId); // picture width for a certain component UInt uiPicHeight = pcRecoYuv->getHeight(cId); // picture height for a certain component UInt uiCUPelX = pCtu->getCUPelX() >> (pcRecoYuv->getComponentScaleX(cId)); // x of upper left corner of the cu UInt uiCUPelY = pCtu->getCUPelY() >> (pcRecoYuv->getComponentScaleY(cId));; // y of upper left corner of the UInt uiCBWidth = uiMaxCUWidth >> (pcRecoYuv->getComponentScaleX(cId)); // code block width for a certain component UInt uiCBHeight = uiMaxCUHeight >> (pcRecoYuv->getComponentScaleY(cId)); // code block height for a certain component // rectangle of the code block UInt uiTopX = Clip3((UInt)0, uiPicWidth, uiCUPelX); UInt uiTopY = Clip3((UInt)0, uiPicHeight, uiCUPelY); UInt uiBottomX = Clip3((UInt)0, uiPicWidth, uiCUPelX + uiCBWidth); UInt uiBottomY = Clip3((UInt)0, uiPicHeight, uiCUPelY + uiCBHeight); Pel* pBuffer = pcRecoYuv->getAddr(cId); for (UInt uiY = uiTopY; uiY < uiBottomY; uiY++) { for (UInt uiX = uiTopX; uiX < uiBottomX; uiX++) { UInt uiOrgX, uiOrgY; uiOrgX = g_auiRsmpldToOrg[cId][0][uiX]; uiOrgY = g_auiRsmpldToOrg[cId][1][uiY]; Pixel* pPixel = ppPixel[cId] + getSerialIndex(uiOrgX, uiOrgY, uiPicWidth); pPixel->m_bIsRec = true; pPixel->m_uiReco = pBuffer[uiY*uiStride + uiX]; } } } }
//! calculate AC and DC values for current original image Void WeightPredAnalysis::xCalcACDCParamSlice(TComSlice *const slice) { //===== calculate AC/DC value ===== TComPicYuv* pPic = slice->getPic()->getPicYuvOrg(); WPACDCParam weightACDCParam[MAX_NUM_COMPONENT]; for(Int componentIndex = 0; componentIndex < pPic->getNumberValidComponents(); componentIndex++) { const ComponentID compID = ComponentID(componentIndex); // calculate DC/AC value for channel const Int iStride = pPic->getStride(compID); const Int iWidth = pPic->getWidth(compID); const Int iHeight = pPic->getHeight(compID); const Int iSample = iWidth*iHeight; Int64 iOrgDC = 0; { const Pel *pPel = pPic->getAddr(compID); for(Int y = 0; y < iHeight; y++, pPel+=iStride ) { for(Int x = 0; x < iWidth; x++ ) { iOrgDC += (Int)( pPel[x] ); } } } const Int64 iOrgNormDC = ((iOrgDC+(iSample>>1)) / iSample); Int64 iOrgAC = 0; { const Pel *pPel = pPic->getAddr(compID); for(Int y = 0; y < iHeight; y++, pPel += iStride ) { for(Int x = 0; x < iWidth; x++ ) { iOrgAC += abs( (Int)pPel[x] - (Int)iOrgNormDC ); } } } const Int fixedBitShift = (slice->getSPS()->getSpsRangeExtension().getHighPrecisionOffsetsEnabledFlag())?RExt__PREDICTION_WEIGHTING_ANALYSIS_DC_PRECISION:0; weightACDCParam[compID].iDC = (((iOrgDC<<fixedBitShift)+(iSample>>1)) / iSample); weightACDCParam[compID].iAC = iOrgAC; } slice->setWpAcDcParam(weightACDCParam); }
/** * Calculate the MD5sum of pic, storing the result in digest. * MD5 calculation is performed on Y' then Cb, then Cr; each in raster order. * Pel data is inserted into the MD5 function in little-endian byte order, * using sufficient bytes to represent the picture bitdepth. Eg, 10bit data * uses little-endian two byte words; 8bit data uses single byte words. */ void calcMD5(TComPicYuv& pic, unsigned char digest[16]) { unsigned bitdepth = g_uiBitDepth + g_uiBitIncrement; /* choose an md5_plane packing function based on the system bitdepth */ typedef void (*MD5PlaneFunc)(MD5&, const Pel*, unsigned, unsigned, unsigned); MD5PlaneFunc md5_plane_func; md5_plane_func = bitdepth <= 8 ? (MD5PlaneFunc)md5_plane<1> : (MD5PlaneFunc)md5_plane<2>; MD5 md5; unsigned width = pic.getWidth(); unsigned height = pic.getHeight(); unsigned stride = pic.getStride(); md5_plane_func(md5, pic.getLumaAddr(), width, height, stride); width >>= 1; height >>= 1; stride >>= 1; md5_plane_func(md5, pic.getCbAddr(), width, height, stride); md5_plane_func(md5, pic.getCrAddr(), width, height, stride); md5.finalize(digest); }
Void TAppRendererTop::renderUsedPelsMap( ) { xCreateLib(); xInitLib(); // Create Buffers Input Views; std::vector<TComPicYuv*> apcPicYuvBaseVideo; std::vector<TComPicYuv*> apcPicYuvBaseDepth; // TemporalImprovement Filter std::vector<TComPicYuv*> apcPicYuvLastBaseVideo; std::vector<TComPicYuv*> apcPicYuvLastBaseDepth; Int aiPad[2] = { 0, 0 }; for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ ) { TComPicYuv* pcNewVideoPic = new TComPicYuv; TComPicYuv* pcNewDepthPic = new TComPicYuv; pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvBaseVideo.push_back(pcNewVideoPic); pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvBaseDepth.push_back(pcNewDepthPic); //Temporal improvement Filter if ( m_bTempDepthFilter ) { pcNewVideoPic = new TComPicYuv; pcNewDepthPic = new TComPicYuv; pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvLastBaseVideo.push_back(pcNewVideoPic); pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvLastBaseDepth.push_back(pcNewDepthPic); } } // Create Buffer for synthesized View TComPicYuv* pcPicYuvSynthOut = new TComPicYuv; pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); Bool bAnyEOS = false; Int iNumOfRenderedFrames = 0; Int iFrame = 0; while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS ) { if ( iFrame >= m_iFrameSkip ) { // read in depth and video for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ ) { m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ; apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder(); bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof(); m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ; apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder(); bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof(); if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) ) { m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) ); } } } else { std::cout << "Skipping Frame " << iFrame << std::endl; iFrame++; continue; } m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip ) ); for(Int iViewIdx=1; iViewIdx < m_iNumberOfInputViews; iViewIdx++ ) { std::cout << "Rendering UsedPelsMap for Frame " << iFrame << " of View " << (Double) m_cCameraData.getBaseViewNumbers()[iViewIdx] << std::endl; Int iViewSIdx = m_cCameraData.getBaseId2SortedId()[iViewIdx]; Int iFirstViewSIdx = m_cCameraData.getBaseId2SortedId()[0]; AOT( iViewSIdx == iFirstViewSIdx ); Bool bFirstIsLeft = (iFirstViewSIdx < iViewSIdx); m_pcRenTop->setShiftLUTs( m_cCameraData.getBaseViewShiftLUTD()[0][iViewIdx], m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx], m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx], m_cCameraData.getBaseViewShiftLUTD()[0][iViewIdx], m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx], m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx], -1 ); m_pcRenTop->getUsedSamplesMap( apcPicYuvBaseDepth[0], pcPicYuvSynthOut, bFirstIsLeft ); // Write Output m_apcTVideoIOYuvSynthOutput[iViewIdx-1]->write( pcPicYuvSynthOut, 0, 0, 0 ); } iFrame++; iNumOfRenderedFrames++; } // Delete Buffers for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ ) { apcPicYuvBaseVideo[uiBaseView]->destroy(); delete apcPicYuvBaseVideo[uiBaseView]; apcPicYuvBaseDepth[uiBaseView]->destroy(); delete apcPicYuvBaseDepth[uiBaseView]; // Temporal Filter if ( m_bTempDepthFilter ) { apcPicYuvLastBaseVideo[uiBaseView]->destroy(); delete apcPicYuvLastBaseVideo[uiBaseView]; apcPicYuvLastBaseDepth[uiBaseView]->destroy(); delete apcPicYuvLastBaseDepth[uiBaseView]; } } pcPicYuvSynthOut->destroy(); delete pcPicYuvSynthOut; xDestroyLib(); }
Void TAppRendererTop::xRenderModelFromNums() { xCreateLib(); xInitLib(); // Create Buffers Input Views; std::vector<TComPicYuv*> apcPicYuvBaseVideo; std::vector<TComPicYuv*> apcPicYuvBaseDepth; Int aiPad[2] = { 0, 0 }; // Init Model TRenModel cCurModel; AOT( m_iLog2SamplingFactor != 0 ); cCurModel.setupPart( 0, m_iSourceHeight ); #if H_3D_VSO_EARLY_SKIP cCurModel.create( m_iNumberOfInputViews, m_iNumberOfOutputViews, m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin, false ); #else cCurModel.create( m_iNumberOfInputViews, m_iNumberOfOutputViews, m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin ); #endif for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ ) { TComPicYuv* pcNewVideoPic = new TComPicYuv; TComPicYuv* pcNewDepthPic = new TComPicYuv; pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvBaseVideo.push_back(pcNewVideoPic); pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvBaseDepth.push_back(pcNewDepthPic); } for(Int iSynthViewIdx=0; iSynthViewIdx < m_iNumberOfOutputViews; iSynthViewIdx++ ) { Int iLeftBaseViewIdx = -1; Int iRightBaseViewIdx = -1; Bool bIsBaseView = false; Int iRelDistToLeft; m_cCameraData.getLeftRightBaseView( iSynthViewIdx, iLeftBaseViewIdx, iRightBaseViewIdx, iRelDistToLeft, bIsBaseView ); if (m_iRenderDirection == 1 ) { iRightBaseViewIdx = -1; AOT( iLeftBaseViewIdx == -1); } if (m_iRenderDirection == 2 ) { iLeftBaseViewIdx = -1; AOT( iRightBaseViewIdx == -1); } Int iLeftBaseViewSIdx = -1; Int iRightBaseViewSIdx = -1; if (iLeftBaseViewIdx != -1 ) { iLeftBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iLeftBaseViewIdx]; } if (iRightBaseViewIdx != -1 ) { iRightBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iRightBaseViewIdx]; } cCurModel.createSingleModel(-1, -1, iSynthViewIdx, iLeftBaseViewSIdx, iRightBaseViewSIdx, false, m_iBlendMode ); } // Create Buffer for synthesized View TComPicYuv* pcPicYuvSynthOut = new TComPicYuv; pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); Bool bAnyEOS = false; Int iNumOfRenderedFrames = 0; Int iFrame = 0; while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS ) { if ( iFrame >= m_iFrameSkip ) { // read in depth and video for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ ) { m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ; bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof(); m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ; bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof(); if ( iFrame >= m_iFrameSkip ) { Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx]; cCurModel.setBaseView( iBaseViewSIdx, apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], NULL, NULL ); } } } else { iFrame++; continue; } m_cCameraData.update( (UInt) (iFrame - m_iFrameSkip )); for(Int iSynthViewIdx=0; iSynthViewIdx < m_iNumberOfOutputViews; iSynthViewIdx++ ) { Int iLeftBaseViewIdx = -1; Int iRightBaseViewIdx = -1; Bool bIsBaseView = false; Int iRelDistToLeft; Bool bHasLRView = m_cCameraData.getLeftRightBaseView( iSynthViewIdx, iLeftBaseViewIdx, iRightBaseViewIdx, iRelDistToLeft, bIsBaseView ); Bool bHasLView = ( iLeftBaseViewIdx != -1 ); Bool bHasRView = ( iRightBaseViewIdx != -1 ); switch( m_iRenderDirection ) { /// INTERPOLATION case 0: assert( bHasLRView || bIsBaseView ); if ( !bHasLRView && bIsBaseView ) // View to render is BaseView { std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original } else // Render { std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; cCurModel.setSingleModel( iSynthViewIdx, m_cCameraData.getSynthViewShiftLUTI()[iLeftBaseViewIdx ][iSynthViewIdx] , m_cCameraData.getBaseViewShiftLUTI ()[iLeftBaseViewIdx ][iRightBaseViewIdx], m_cCameraData.getSynthViewShiftLUTI()[iRightBaseViewIdx][iSynthViewIdx] , m_cCameraData.getBaseViewShiftLUTI ()[iRightBaseViewIdx][iLeftBaseViewIdx] , iRelDistToLeft, NULL ); cCurModel.getSynthVideo ( iSynthViewIdx, VIEWPOS_MERGED, pcPicYuvSynthOut ); } break; /// EXTRAPOLATION FROM LEFT case 1: if ( !bHasLView ) // View to render is BaseView { std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original } else // Render { std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; cCurModel.setSingleModel( iSynthViewIdx, m_cCameraData.getSynthViewShiftLUTI()[iLeftBaseViewIdx ][iSynthViewIdx], NULL, NULL, NULL, -1, NULL); cCurModel.getSynthVideo ( iSynthViewIdx, VIEWPOS_LEFT, pcPicYuvSynthOut ); } break; /// EXTRAPOLATION FROM RIGHT case 2: // extrapolation from right if ( !bHasRView ) // View to render is BaseView { std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; apcPicYuvBaseVideo[iRightBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original } else // Render { std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; cCurModel.setSingleModel( iSynthViewIdx, NULL , NULL, m_cCameraData.getSynthViewShiftLUTI()[iRightBaseViewIdx ][iSynthViewIdx], NULL, -1, NULL); cCurModel.getSynthVideo ( iSynthViewIdx, VIEWPOS_RIGHT, pcPicYuvSynthOut ); } break; } // Write Output m_apcTVideoIOYuvSynthOutput[m_bSweep ? 0 : iSynthViewIdx]->write( pcPicYuvSynthOut, 0, 0, 0, 0 ); } iFrame++; iNumOfRenderedFrames++; } // Delete Buffers for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ ) { apcPicYuvBaseVideo[uiBaseView]->destroy(); delete apcPicYuvBaseVideo[uiBaseView]; apcPicYuvBaseDepth[uiBaseView]->destroy(); delete apcPicYuvBaseDepth[uiBaseView]; } pcPicYuvSynthOut->destroy(); delete pcPicYuvSynthOut; xDestroyLib(); }
Void TAppRendererTop::xRenderModelFromString() { xCreateLib(); xInitLib(); // Create Buffers Input Views; std::vector<TComPicYuv*> apcPicYuvBaseVideo; std::vector<TComPicYuv*> apcPicYuvBaseDepth; for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ ) { TComPicYuv* pcNewVideoPic = new TComPicYuv; TComPicYuv* pcNewDepthPic = new TComPicYuv; pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvBaseVideo.push_back(pcNewVideoPic); pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvBaseDepth.push_back(pcNewDepthPic); } Int aiPad[2] = { 0, 0 }; // Init Model TRenModel cCurModel; AOT( m_iLog2SamplingFactor != 0 ); #if H_3D_VSO_EARLY_SKIP cCurModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin, false ); #else cCurModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin ); #endif cCurModel.setupPart( 0, m_iSourceHeight ); for ( Int iViewIdx = 0; iViewIdx < m_iNumberOfInputViews; iViewIdx++ ) { Int iNumOfModels = m_cRenModStrParser.getNumOfModelsForView(iViewIdx, 1); for (Int iCurModel = 0; iCurModel < iNumOfModels; iCurModel++ ) { Int iModelNum; Int iLeftViewNum; Int iRightViewNum; Int iDump; Int iOrgRefNum; Int iBlendMode; m_cRenModStrParser.getSingleModelData ( iViewIdx, 1, iCurModel, iModelNum, iBlendMode, iLeftViewNum, iRightViewNum, iOrgRefNum, iDump ) ; cCurModel .createSingleModel ( iViewIdx, 1, iModelNum, iLeftViewNum, iRightViewNum, false, iBlendMode ); } } // Create Buffer for synthesized View TComPicYuv* pcPicYuvSynthOut = new TComPicYuv; pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); Bool bAnyEOS = false; Int iNumOfRenderedFrames = 0; Int iFrame = 0; while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS ) { if ( iFrame >= m_iFrameSkip ) { // read in depth and video for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ ) { m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ; bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof(); m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ; bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof(); } } else { iFrame++; continue; } for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ ) { TComPicYuv* pcPicYuvVideo = apcPicYuvBaseVideo[iBaseViewIdx]; TComPicYuv* pcPicYuvDepth = apcPicYuvBaseDepth[iBaseViewIdx]; Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx ]; cCurModel.setBaseView( iBaseViewSIdx, pcPicYuvVideo, pcPicYuvDepth, NULL, NULL ); } m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip )); for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ ) { // setup virtual views Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx]; cCurModel.setErrorMode( iBaseViewSIdx, 1, 0 ); Int iNumOfSV = m_cRenModStrParser.getNumOfModelsForView( iBaseViewSIdx, 1); for (Int iCurView = 0; iCurView < iNumOfSV; iCurView++ ) { Int iOrgRefBaseViewSIdx; Int iLeftBaseViewSIdx; Int iRightBaseViewSIdx; Int iSynthViewRelNum; Int iModelNum; Int iBlendMode; m_cRenModStrParser.getSingleModelData(iBaseViewSIdx, 1, iCurView, iModelNum, iBlendMode, iLeftBaseViewSIdx, iRightBaseViewSIdx, iOrgRefBaseViewSIdx, iSynthViewRelNum ); Int iLeftBaseViewIdx = -1; Int iRightBaseViewIdx = -1; TComPicYuv* pcPicYuvOrgRef = NULL; Int** ppiShiftLUTLeft = NULL; Int** ppiShiftLUTRight = NULL; Int** ppiBaseShiftLUTLeft = NULL; Int** ppiBaseShiftLUTRight = NULL; Int iDistToLeft = -1; Int iSynthViewIdx = m_cCameraData.synthRelNum2Idx( iSynthViewRelNum ); if ( iLeftBaseViewSIdx != -1 ) { iLeftBaseViewIdx = m_cCameraData.getBaseSortedId2Id() [ iLeftBaseViewSIdx ]; ppiShiftLUTLeft = m_cCameraData.getSynthViewShiftLUTI()[ iLeftBaseViewIdx ][ iSynthViewIdx ]; } if ( iRightBaseViewSIdx != -1 ) { iRightBaseViewIdx = m_cCameraData.getBaseSortedId2Id() [iRightBaseViewSIdx ]; ppiShiftLUTRight = m_cCameraData.getSynthViewShiftLUTI()[ iRightBaseViewIdx ][ iSynthViewIdx ]; } if ( iRightBaseViewSIdx != -1 && iLeftBaseViewSIdx != -1 ) { ppiBaseShiftLUTLeft = m_cCameraData.getBaseViewShiftLUTI() [ iLeftBaseViewIdx ][ iRightBaseViewIdx ]; ppiBaseShiftLUTRight = m_cCameraData.getBaseViewShiftLUTI() [ iRightBaseViewIdx ][ iLeftBaseViewIdx ]; iDistToLeft = m_cCameraData.getRelDistLeft( iSynthViewIdx , iLeftBaseViewIdx, iRightBaseViewIdx); } std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; cCurModel.setSingleModel( iModelNum, ppiShiftLUTLeft, ppiBaseShiftLUTLeft, ppiShiftLUTRight, ppiBaseShiftLUTRight, iDistToLeft, pcPicYuvOrgRef ); Int iViewPos; if (iLeftBaseViewSIdx != -1 && iRightBaseViewSIdx != -1) { iViewPos = VIEWPOS_MERGED; } else if ( iLeftBaseViewSIdx != -1 ) { iViewPos = VIEWPOS_LEFT; } else if ( iRightBaseViewSIdx != -1 ) { iViewPos = VIEWPOS_RIGHT; } else { AOT(true); } cCurModel.getSynthVideo ( iModelNum, iViewPos, pcPicYuvSynthOut ); // Write Output m_apcTVideoIOYuvSynthOutput[m_bSweep ? 0 : iModelNum]->write( pcPicYuvSynthOut, 0 ,0 ,0, 0 ); } } iFrame++; iNumOfRenderedFrames++; } // Delete Buffers for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ ) { apcPicYuvBaseVideo[uiBaseView]->destroy(); delete apcPicYuvBaseVideo[uiBaseView]; apcPicYuvBaseDepth[uiBaseView]->destroy(); delete apcPicYuvBaseDepth[uiBaseView]; } pcPicYuvSynthOut->destroy(); delete pcPicYuvSynthOut; xDestroyLib(); }
Void TAppRendererTop::render() { xCreateLib(); xInitLib(); // Create Buffers Input Views; std::vector<TComPicYuv*> apcPicYuvBaseVideo; std::vector<TComPicYuv*> apcPicYuvBaseDepth; // TemporalImprovement Filter std::vector<TComPicYuv*> apcPicYuvLastBaseVideo; std::vector<TComPicYuv*> apcPicYuvLastBaseDepth; Int aiPad[2] = { 0, 0 }; for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ ) { TComPicYuv* pcNewVideoPic = new TComPicYuv; TComPicYuv* pcNewDepthPic = new TComPicYuv; pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvBaseVideo.push_back(pcNewVideoPic); pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvBaseDepth.push_back(pcNewDepthPic); //Temporal improvement Filter if ( m_bTempDepthFilter ) { pcNewVideoPic = new TComPicYuv; pcNewDepthPic = new TComPicYuv; pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvLastBaseVideo.push_back(pcNewVideoPic); pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); apcPicYuvLastBaseDepth.push_back(pcNewDepthPic); } } // Create Buffer for synthesized View TComPicYuv* pcPicYuvSynthOut = new TComPicYuv; pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 ); Bool bAnyEOS = false; Int iNumOfRenderedFrames = 0; Int iFrame = 0; while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS ) { if ( iFrame >= m_iFrameSkip ) { // read in depth and video for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ ) { m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ; apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder(); bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof(); m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ; apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder(); bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof(); if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) ) { m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) ); } } } else { std::cout << "Skipping Frame " << iFrame << std::endl; iFrame++; continue; } m_cCameraData.update( (UInt)iFrame - m_iFrameSkip ); for(Int iSynthViewIdx=0; iSynthViewIdx < m_iNumberOfOutputViews; iSynthViewIdx++ ) { Int iLeftBaseViewIdx = -1; Int iRightBaseViewIdx = -1; Bool bIsBaseView = false; Int iRelDistToLeft; Bool bHasLRView = m_cCameraData.getLeftRightBaseView( iSynthViewIdx, iLeftBaseViewIdx, iRightBaseViewIdx, iRelDistToLeft, bIsBaseView ); Bool bHasLView = ( iLeftBaseViewIdx != -1 ); Bool bHasRView = ( iRightBaseViewIdx != -1 ); Bool bRender = true; Int iBlendMode = m_iBlendMode; Int iSimEnhBaseView = 0; switch( m_iRenderDirection ) { /// INTERPOLATION case 0: AOF( bHasLRView || bIsBaseView ); if ( !bHasLRView && bIsBaseView && m_iBlendMode == 0 ) { bRender = false; } else { if ( bIsBaseView ) { AOF( iLeftBaseViewIdx == iRightBaseViewIdx ); Int iSortedBaseViewIdx = m_cCameraData.getBaseId2SortedId() [iLeftBaseViewIdx]; if ( m_iBlendMode == 1 ) { if ( iSortedBaseViewIdx - 1 >= 0 ) { iLeftBaseViewIdx = m_cCameraData.getBaseSortedId2Id()[ iSortedBaseViewIdx - 1]; bRender = true; } else { bRender = false; } } else if ( m_iBlendMode == 2 ) { if ( iSortedBaseViewIdx + 1 < m_iNumberOfInputViews ) { iRightBaseViewIdx = m_cCameraData.getBaseSortedId2Id()[ iSortedBaseViewIdx + 1]; bRender = true; } else { bRender = false; } } } if ( m_iBlendMode == 3 ) { if ( bIsBaseView && (iLeftBaseViewIdx == 0) ) { bRender = false; } else { Int iDistLeft = abs( m_cCameraData.getBaseId2SortedId()[0] - m_cCameraData.getBaseId2SortedId() [iLeftBaseViewIdx ] ); Int iDistRight = abs( m_cCameraData.getBaseId2SortedId()[0] - m_cCameraData.getBaseId2SortedId() [iRightBaseViewIdx] ); Int iFillViewIdx = iDistLeft > iDistRight ? iLeftBaseViewIdx : iRightBaseViewIdx; if( m_cCameraData.getBaseId2SortedId()[0] < m_cCameraData.getBaseId2SortedId() [iFillViewIdx] ) { iBlendMode = 1; iLeftBaseViewIdx = 0; iRightBaseViewIdx = iFillViewIdx; } else { iBlendMode = 2; iLeftBaseViewIdx = iFillViewIdx; iRightBaseViewIdx = 0; } } } else { iBlendMode = m_iBlendMode; } } if ( m_bSimEnhance ) { if ( m_iNumberOfInputViews == 3 && m_cCameraData.getRelSynthViewNumbers()[ iSynthViewIdx ] < VIEW_NUM_PREC ) { iSimEnhBaseView = 2; // Take middle view } else { iSimEnhBaseView = 1; // Take left view } } if ( bRender ) { std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx ] / VIEW_NUM_PREC << " Left BaseView: " << (Double) m_cCameraData.getBaseViewNumbers() [iLeftBaseViewIdx ] / VIEW_NUM_PREC << " Right BaseView: " << (Double) m_cCameraData.getBaseViewNumbers() [iRightBaseViewIdx] / VIEW_NUM_PREC << " BlendMode: " << iBlendMode << std::endl; m_pcRenTop->setShiftLUTs( m_cCameraData.getSynthViewShiftLUTD()[iLeftBaseViewIdx ][iSynthViewIdx], m_cCameraData.getSynthViewShiftLUTI()[iLeftBaseViewIdx ][iSynthViewIdx], m_cCameraData.getBaseViewShiftLUTI ()[iLeftBaseViewIdx ][iRightBaseViewIdx], m_cCameraData.getSynthViewShiftLUTD()[iRightBaseViewIdx][iSynthViewIdx], m_cCameraData.getSynthViewShiftLUTI()[iRightBaseViewIdx][iSynthViewIdx], m_cCameraData.getBaseViewShiftLUTI ()[iRightBaseViewIdx][iLeftBaseViewIdx ], iRelDistToLeft ); m_pcRenTop->interpolateView( apcPicYuvBaseVideo[iLeftBaseViewIdx ], apcPicYuvBaseDepth[iLeftBaseViewIdx ], apcPicYuvBaseVideo[iRightBaseViewIdx], apcPicYuvBaseDepth[iRightBaseViewIdx], pcPicYuvSynthOut, iBlendMode, iSimEnhBaseView ); } else { AOT(iLeftBaseViewIdx != iRightBaseViewIdx ); apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); std::cout << "Copied Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << " (BaseView) " << std::endl; } break; /// EXTRAPOLATION FROM LEFT case 1: if ( !bHasLView ) // View to render is BaseView { bRender = false; } if ( bIsBaseView ) { AOF( iLeftBaseViewIdx == iRightBaseViewIdx ); Int iSortedBaseViewIdx = m_cCameraData.getBaseId2SortedId() [iLeftBaseViewIdx]; if ( iSortedBaseViewIdx - 1 >= 0 ) { iLeftBaseViewIdx = m_cCameraData.getBaseSortedId2Id()[ iSortedBaseViewIdx - 1]; } else { std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original bRender = false; } } if (bRender) { std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; m_pcRenTop->setShiftLUTs( m_cCameraData.getSynthViewShiftLUTD()[iLeftBaseViewIdx ][iSynthViewIdx], m_cCameraData.getSynthViewShiftLUTI()[iLeftBaseViewIdx ][iSynthViewIdx], NULL, NULL, NULL, NULL, -1 ); m_pcRenTop->extrapolateView( apcPicYuvBaseVideo[iLeftBaseViewIdx ], apcPicYuvBaseDepth[iLeftBaseViewIdx ], pcPicYuvSynthOut, true ); } break; /// EXTRAPOLATION FROM RIGHT case 2: // extrapolation from right if ( !bHasRView ) // View to render is BaseView { bRender = false; } if ( bIsBaseView ) { AOF( iLeftBaseViewIdx == iRightBaseViewIdx ); Int iSortedBaseViewIdx = m_cCameraData.getBaseId2SortedId() [iLeftBaseViewIdx]; if ( iSortedBaseViewIdx + 1 < m_iNumberOfInputViews ) { iRightBaseViewIdx = m_cCameraData.getBaseSortedId2Id()[ iSortedBaseViewIdx + 1]; } else { std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original bRender = false; } } if ( bRender ) { std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl; m_pcRenTop->setShiftLUTs( NULL, NULL,NULL, m_cCameraData.getSynthViewShiftLUTD()[iRightBaseViewIdx ][iSynthViewIdx], m_cCameraData.getSynthViewShiftLUTI()[iRightBaseViewIdx ][iSynthViewIdx],NULL, iRelDistToLeft); m_pcRenTop->extrapolateView( apcPicYuvBaseVideo[iRightBaseViewIdx ], apcPicYuvBaseDepth[iRightBaseViewIdx ], pcPicYuvSynthOut, false); } break; } // Write Output m_apcTVideoIOYuvSynthOutput[m_bSweep ? 0 : iSynthViewIdx]->write( pcPicYuvSynthOut, 0, 0, 0, 0 ); } iFrame++; iNumOfRenderedFrames++; } // Delete Buffers for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ ) { apcPicYuvBaseVideo[uiBaseView]->destroy(); delete apcPicYuvBaseVideo[uiBaseView]; apcPicYuvBaseDepth[uiBaseView]->destroy(); delete apcPicYuvBaseDepth[uiBaseView]; // Temporal Filter if ( m_bTempDepthFilter ) { apcPicYuvLastBaseVideo[uiBaseView]->destroy(); delete apcPicYuvLastBaseVideo[uiBaseView]; apcPicYuvLastBaseDepth[uiBaseView]->destroy(); delete apcPicYuvLastBaseDepth[uiBaseView]; } } pcPicYuvSynthOut->destroy(); delete pcPicYuvSynthOut; xDestroyLib(); }
int main(int argc, const char** argv) { bool do_help; string filename_in, filename_out; unsigned int width, height; unsigned int bitdepth_in, bitdepth_out; unsigned int num_frames; unsigned int num_frames_skip; po::Options opts; opts.addOptions() ("help", do_help, false, "this help text") ("InputFile,i", filename_in, string(""), "input file to convert") ("OutputFile,o", filename_out, string(""), "output file") ("SourceWidth", width, 0u, "source picture width") ("SourceHeight", height, 0u, "source picture height") ("InputBitDepth", bitdepth_in, 8u, "bit-depth of input file") ("OutputBitDepth", bitdepth_out, 8u, "bit-depth of output file") ("NumFrames", num_frames, 0xffffffffu, "number of frames to process") ("FrameSkip,-fs", num_frames_skip, 0u, "Number of frames to skip at start of input YUV") ; po::setDefaults(opts); po::scanArgv(opts, argc, argv); if (argc == 1 || do_help) { /* argc == 1: no options have been specified */ po::doHelp(cout, opts); return EXIT_FAILURE; } TVideoIOYuv input; TVideoIOYuv output; input.open((char*)filename_in.c_str(), false, bitdepth_in, bitdepth_in, bitdepth_out, bitdepth_out); output.open((char*)filename_out.c_str(), true, bitdepth_out, bitdepth_out, bitdepth_out, bitdepth_out); input.skipFrames(num_frames_skip, width, height); TComPicYuv frame; frame.create( width, height, 1, 1, 0 ); int pad[2] = {0, 0}; unsigned int num_frames_processed = 0; while (!input.isEof()) { if (! input.read(&frame, pad)) { break; } #if 0 Pel* img = frame.getLumaAddr(); for (int y = 0; y < height; y++) { for (int x = 0; x < height; x++) img[x] = 0; img += frame.getStride(); } img = frame.getLumaAddr(); img[0] = 1; #endif output.write(&frame); num_frames_processed++; if (num_frames_processed == num_frames) break; } input.close(); output.close(); return EXIT_SUCCESS; }
void FrameEncoder::compressFrame() { PPAScopeEvent(FrameEncoder_compressFrame); int64_t startCompressTime = x265_mdate(); TEncEntropy* entropyCoder = getEntropyCoder(0); TComSlice* slice = m_pic->getSlice(); m_nalCount = 0; int qp = slice->getSliceQp(); double lambda = 0; if (slice->getSliceType() == I_SLICE) { lambda = X265_MAX(1, x265_lambda2_tab_I[qp]); } else { lambda = X265_MAX(1, x265_lambda2_non_I[qp]); } // for RDO // in RdCost there is only one lambda because the luma and chroma bits are not separated, // instead we weight the distortion of chroma. int qpc; int chromaQPOffset = slice->getPPS()->getChromaCbQpOffset() + slice->getSliceQpDeltaCb(); qpc = Clip3(0, 57, qp + chromaQPOffset); double cbWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset chromaQPOffset = slice->getPPS()->getChromaCrQpOffset() + slice->getSliceQpDeltaCr(); qpc = Clip3(0, 57, qp + chromaQPOffset); double crWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset double chromaLambda = lambda / crWeight; TComPicYuv *fenc = slice->getPic()->getPicYuvOrg(); for (int i = 0; i < m_numRows; i++) { m_rows[i].m_search.setQPLambda(qp, lambda, chromaLambda); m_rows[i].m_search.m_me.setSourcePlane(fenc->getLumaAddr(), fenc->getStride()); m_rows[i].m_rdCost.setLambda(lambda); m_rows[i].m_rdCost.setCbDistortionWeight(cbWeight); m_rows[i].m_rdCost.setCrDistortionWeight(crWeight); } m_frameFilter.m_sao.lumaLambda = lambda; m_frameFilter.m_sao.chromaLambda = chromaLambda; switch (slice->getSliceType()) { case I_SLICE: m_frameFilter.m_sao.depth = 0; break; case P_SLICE: m_frameFilter.m_sao.depth = 1; break; case B_SLICE: m_frameFilter.m_sao.depth = 2 + !slice->isReferenced(); break; } slice->setSliceQpDelta(0); slice->setSliceQpDeltaCb(0); slice->setSliceQpDeltaCr(0); int numSubstreams = m_cfg->param.bEnableWavefront ? m_pic->getPicSym()->getFrameHeightInCU() : 1; // TODO: these two items can likely be FrameEncoder member variables to avoid re-allocs TComOutputBitstream* bitstreamRedirect = new TComOutputBitstream; TComOutputBitstream* outStreams = new TComOutputBitstream[numSubstreams]; if (m_cfg->getUseASR() && !slice->isIntra()) { int pocCurr = slice->getPOC(); int maxSR = m_cfg->param.searchRange; int numPredDir = slice->isInterP() ? 1 : 2; for (int dir = 0; dir <= numPredDir; dir++) { for (int refIdx = 0; refIdx < slice->getNumRefIdx(dir); refIdx++) { int refPOC = slice->getRefPic(dir, refIdx)->getPOC(); int newSR = Clip3(8, maxSR, (maxSR * ADAPT_SR_SCALE * abs(pocCurr - refPOC) + 4) >> 3); for (int i = 0; i < m_numRows; i++) { m_rows[i].m_search.setAdaptiveSearchRange(dir, refIdx, newSR); } } } }
//index== 0 above 1 left 2 above and left 3 above and right Int GolombCode_Predict_SingleNeighbor(TComYuv *pcResiYuv, TComTU& rTu, const ComponentID compID, UInt uiCUHandleAddr, UInt uiAIndex, TCoeff* pcCoeff) { const Bool bIsLuma = isLuma(compID); const TComRectangle &rect = rTu.getRect(compID); TComDataCU *pcCU = rTu.getCU(); UInt uiCUAddr = pcCU->getCtuRsAddr(); //if ((int)uiCUHandleAddr < 0) return -1; TComPicYuv *pcPicYuvResi = pcCU->getPic()->getPicYuvResi(); if (pcPicYuvResi == NULL) return -1; const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU(); // const UInt uiZOrder = pcCU->getZorderIdxInCU() +uiAbsPartIdx; const UInt uiTrDepth = rTu.GetTransformDepthRelAdj(compID); const UInt uiFullDepth = rTu.GetTransformDepthTotal(); const UInt uiLog2TrSize = rTu.GetLog2LumaTrSize(); const ChromaFormat chFmt = pcCU->getPic()->getChromaFormat(); const UInt uiWidth = rect.width; const UInt uiHeight = rect.height; const UInt uiStride = pcResiYuv->getStride(compID); UInt uiAddr = pcCU->getCtuRsAddr(); TComYuv *pcTemp; pcTemp = new TComYuv; UInt uiSrc1Stride = pcPicYuvResi->getStride(compID); UInt CUPelX, CUPelY; CUPelX = (uiCUHandleAddr % pcCU->getPic()->getFrameWidthInCtus()) * g_uiMaxCUWidth; CUPelY = (uiCUHandleAddr / pcCU->getPic()->getFrameWidthInCtus()) * g_uiMaxCUHeight; CUPelX = CUPelX + g_auiRasterToPelX[g_auiZscanToRaster[uiAbsPartIdx]]; CUPelY = CUPelY + g_auiRasterToPelY[g_auiZscanToRaster[uiAbsPartIdx]]; //for(int m=0;m<256;m++) cout<<g_auiZscanToRaster[m] <<" ";cout<<endl; //for(int m=0;m<256;m++) cout<<g_auiRasterToPelX[m] <<" ";cout<<endl; //for(int m=0;m<256;m++) cout<<g_auiRasterToPelY[m] <<" ";cout<<endl; //Pel *pSrc1 = pcPicYuvResi->getAddr(compID) +CUPelY * uiSrc1Stride + CUPelX; Pel *pSrc1 = pcPicYuvResi->getAddr(compID, uiCUHandleAddr, uiAbsPartIdx + pcCU->getZorderIdxInCtu()); /* if( compID != COMPONENT_Y) { pSrc1 = pcPicYuvResi->getAddr(COMPONENT_Y, uiCUHandleAddr, uiAbsPartIdx + pcCU->getZorderIdxInCU()); }*/ pcTemp->create(uiWidth, uiHeight, chFmt); // pcTemp->copyFromPicComponent(compID,pcPicYuvResi,uiCUHandleAddr, pcCU->getZorderIdxInCU()+uiAbsPartIdx); UInt uiTempStride = pcTemp->getStride(compID); Pel *pTemp = pcTemp->getAddr(compID); for (Int y = 0; y < uiHeight; y++) { for (Int x = 0; x < uiWidth; x++) { pTemp[x] = pSrc1[x]; } pTemp += uiTempStride; pSrc1 += uiSrc1Stride; } int srclx = 0; int srcly = 0; int srclv = 0; int srchasleft = 1; Pel srcpel; int srclist[3][64 * 64]; int srcindex = 0; memset(srclist, -1, 3 * 64 * 64 * sizeof(int)); int cursrclistindex = 0; Pel* piSrc = pcTemp->getAddr(compID); //Pel* piSrc = pcTemp->getAddr(compID, uiAbsPartIdx); Pel* pSrc = piSrc; //found the source list while (srchasleft) { int ndis = 1000; int nx = -1; int ny = -1; pSrc = piSrc; for (UInt y = 0; y < uiHeight; y++) { for (UInt x = 0; x<uiWidth; x++) { assert(pSrc[x] >-256 && pSrc[x] < 256); if (pSrc[x] != 0) { int dis = 0; dis += getG0Bits((x - srclx)); dis += getG0Bits((y - srcly)); if (dis < ndis) { nx = x; ny = y; ndis = dis; } } } pSrc += uiTempStride; } if (nx != -1 && ny != -1) { srcpel = *(piSrc + ny*uiTempStride + nx); srclx = nx; srcly = ny; srclv = srcpel; srclist[0][srcindex] = srclx; srclist[1][srcindex] = srcly; srclist[2][srcindex] = srcpel; srcindex++; *(piSrc + ny*uiTempStride + nx) = 0; } else { srchasleft = 0; } } if (srcindex == 0) { pcTemp->destroy(); delete pcTemp; pcTemp = NULL; return -1; } //// TComPicYuv *pcPicOrg = pcCU->getPic()->getPicYuvOrg(); Pel* piOrg = pcPicOrg->getAddr(compID, pcCU->getCtuRsAddr(), pcCU->getZorderIdxInCtu() + uiAbsPartIdx); const UInt uiOrgStride = pcPicOrg->getStride(compID); //// Pel* piResi = pcResiYuv->getAddr(compID, uiAbsPartIdx); Pel* pResi = piResi; int dstindex = 0; int indexlist[64 * 64][5]; memset(indexlist, 0, 5 * 64 * 64 * sizeof(int)); int contz = 0; int contnz = 0; int cs = 0; int bits = 0; // int temp; int lx = 0; int ly = 0; int lv = 0; int hasleft = 1; Pel pel; while (hasleft) { //found the least distance point int ndis = 1000; int nx = -1; int ny = -1; pResi = piResi; for (UInt y = 0; y < uiHeight; y++) { for (UInt x = 0; x < uiWidth; x++) { if (pResi[x] != 0) { int dis = 0; dis += getG0Bits((x - lx)); dis += getG0Bits((y - ly)); if (dis < ndis) { nx = x; ny = y; ndis = dis; } } } pResi += uiStride; } if (nx != -1 && ny != -1) { pel = *(piResi + ny*uiStride + nx); int srcdis = 1024 * 4; int srccur = -1; for (UInt s = 0; s < srcindex; s++) { int curdis = 0; curdis += getG0Bits((nx - srclist[0][s])); curdis += getG0Bits((ny - srclist[1][s])); // curdis += getG0Bits( (pel-srclist[2][s]));// getG0Bits can handle -512 && 512 if (curdis < srcdis) { srccur = s; srcdis = curdis; } } if (srccur != -1) { indexlist[dstindex][0] = nx - srclist[0][srccur]; indexlist[dstindex][1] = ny - srclist[1][srccur]; assert(pel != 0); indexlist[dstindex][4] = srccur; dstindex++; cursrclistindex = srccur; } else { assert(true); } lx = nx; ly = ny; lv = pel; *(piResi + ny*uiStride + nx) = 0; } else { hasleft = 0; } } pcTemp->destroy(); delete pcTemp; pcTemp = NULL; if (dstindex == 0) { assert(bits == 0); return bits; } qsort(indexlist, dstindex, sizeof(int)* 5, compare5); for (UInt x = dstindex - 1; x > 0; x--) { indexlist[x][4] -= indexlist[x - 1][4]; } //bits += getG0Bits( (indexlist[0][0])); //bits += getG0Bits( (indexlist[0][1])); int maxlength = 0; UInt truebits = 0; bool vlcf[3] = { false, false, false };// dx & dy residual srcindex int z01 = 0; for (UInt x = 1; x < dstindex; x++) { if (indexlist[x][0] == indexlist[x - 1][0] && indexlist[x][1] == indexlist[x - 1][1]) { maxlength++; } else { bits += getG0Bits((maxlength)); bits += getG0Bits((indexlist[x - 1][0])); bits += getG0Bits((indexlist[x - 1][1])); maxlength = 0; } if (indexlist[x][0] == 0 && indexlist[x][1] == 0) z01++; } bits += getG0Bits((maxlength)); bits += getG0Bits((indexlist[dstindex - 1][0])); bits += getG0Bits((indexlist[dstindex - 1][1])); UInt sbits = 0; for (UInt x = 0; x < dstindex; x++) { sbits += getG0Bits((indexlist[x][0])); sbits += getG0Bits((indexlist[x][1])); } // printf("gain %6d position before %6d after %6d\n",sbits-bits,sbits,bits); if (sbits < bits) { vlcf[0] = true; bits = sbits; } sbits = bits + 1; //bits += getG0Bits( PTable(compID,indexlist[0][2])); { maxlength = 0; for (UInt x = 1; x < dstindex; x++) { if (indexlist[x][2] == indexlist[x - 1][2] && indexlist[x][3] == indexlist[x - 1][3]) { maxlength++; } else { bits += getG0Bits((maxlength)); bits += getG0Bits(indexlist[x - 1][3]); bits += getG0Bits(indexlist[x - 1][2]); maxlength = 0; } } bits += getG0Bits((maxlength)); bits += getG0Bits(indexlist[dstindex - 1][3]); bits += getG0Bits((indexlist[dstindex - 1][2])); } UInt vbits = 0; sbits = bits - sbits; for (UInt x = 0; x < dstindex; x++) { { vbits += getG0Bits(indexlist[x][2]); vbits += getG0Bits(indexlist[x][3]); } } // printf("gain %6d delta resi before %6d after %6d\n",vbits-sbits,vbits,sbits); if (vbits < sbits) { vlcf[1] = true; bits = bits - sbits + vbits; } sbits = bits + 1; //bits += getG0Bits( (indexlist[0][3])); Int srcPosIndex; srcPosIndex = 4; maxlength = 0; for (UInt x = 1; x < dstindex; x++) { if (indexlist[x][srcPosIndex] == indexlist[x - 1][srcPosIndex]) { maxlength++; } else { bits += getG0Bits((maxlength)); bits += getG0Bits((indexlist[x - 1][srcPosIndex])); maxlength = 0; } } bits += getG0Bits((maxlength)); bits += getG0Bits((indexlist[dstindex - 1][srcPosIndex])); sbits = bits - sbits; vbits = 0; for (UInt x = 0; x < dstindex; x++) { vbits += getG0Bits((indexlist[x][srcPosIndex])); } // printf("gain %6d delta index before %6d after %6d\n",vbits-sbits,vbits,sbits); if (vbits < sbits) { vlcf[2] = true; bits = bits - sbits + vbits; } //#if INTRA_PR_DEBUG // if( pcCU->getAddr()==INTRA_PR_CUNUM ) // printf("position distance zero %6d of %6d total bits %6d\n",z01,dstindex,bits+1); //#endif truebits = 0; ExpGolombEncode(uiAIndex, pcCoeff, truebits); //--------------encode srcindex if (vlcf[2]) { ExpGolombEncode(0, pcCoeff, truebits); for (UInt x = 0; x < dstindex; x++) { //cout<<" "<<indexlist[x][3] ; ExpGolombEncode((indexlist[x][srcPosIndex]), pcCoeff, truebits); } } else{ ExpGolombEncode(1, pcCoeff, truebits); maxlength = 0; for (UInt x = 1; x<dstindex; x++) { if (indexlist[x][srcPosIndex] == indexlist[x - 1][srcPosIndex]) { maxlength++; } else { ExpGolombEncode((maxlength), pcCoeff, truebits); ExpGolombEncode((indexlist[x - 1][srcPosIndex]), pcCoeff, truebits); maxlength = 0; } } assert(maxlength>-1); ExpGolombEncode((maxlength), pcCoeff, truebits); ExpGolombEncode((indexlist[dstindex - 1][srcPosIndex]), pcCoeff, truebits); } ExpGolombEncode(-1, pcCoeff, truebits); //---------------encode residual if (vlcf[1]) { ExpGolombEncode(0, pcCoeff, truebits); for (UInt x = 0; x < dstindex; x++) { { ExpGolombEncode(indexlist[x][2], pcCoeff, truebits); // if( !bNoResidual) // ExpGolombEncode( indexlist[x][3],pcCoeff,truebits); } } } else{ ExpGolombEncode(1, pcCoeff, truebits); //ExpGolombEncode( (indexlist[0][2]),pcCoeff,truebits); maxlength = 0; { for (UInt x = 1; x < dstindex; x++) { if (indexlist[x][2] == indexlist[x - 1][2] && indexlist[x][3] == indexlist[x - 1][3]) { maxlength++; } else { ExpGolombEncode((maxlength), pcCoeff, truebits); //assert( (maxlength)>=0); ExpGolombEncode(indexlist[x - 1][2], pcCoeff, truebits); // if( !bNoResidual) // ExpGolombEncode( indexlist[x-1][3],pcCoeff,truebits); maxlength = 0; } } ExpGolombEncode((maxlength), pcCoeff, truebits); ExpGolombEncode(indexlist[dstindex - 1][2], pcCoeff, truebits); //if( !bNoResidual) // ExpGolombEncode( indexlist[dstindex-1][3] , pcCoeff, truebits); } } //--------------encode dx and dy ----------- if (vlcf[0]) { ExpGolombEncode(0, pcCoeff, truebits); for (UInt x = 0; x < dstindex; x++) { ExpGolombEncode((indexlist[x][0]), pcCoeff, truebits); ExpGolombEncode((indexlist[x][1]), pcCoeff, truebits); } } else{ ExpGolombEncode(1, pcCoeff, truebits); //ExpGolombEncode( (indexlist[0][0]),pcCoeff,truebits); //ExpGolombEncode( (indexlist[0][1]),pcCoeff,truebits); maxlength = 0; for (UInt x = 1; x < dstindex; x++) { if (indexlist[x][0] == indexlist[x - 1][0] && indexlist[x][1] == indexlist[x - 1][1]) { maxlength++; } else { ExpGolombEncode((maxlength), pcCoeff, truebits); ExpGolombEncode((indexlist[x - 1][0]), pcCoeff, truebits); ExpGolombEncode((indexlist[x - 1][1]), pcCoeff, truebits); maxlength = 0; } } ExpGolombEncode((maxlength), pcCoeff, truebits); ExpGolombEncode((indexlist[dstindex - 1][0]), pcCoeff, truebits); ExpGolombEncode((indexlist[dstindex - 1][1]), pcCoeff, truebits); } return truebits;/* bits +1*/; }
Void matchTemplate(TComDataCU*& rpcTempCU, Pixel** ppPixel) { // template matching UInt uiCUPelX = rpcTempCU->getCUPelX(); // x of upper left corner of the cu UInt uiCUPelY = rpcTempCU->getCUPelY(); // y of upper left corner of the cu UInt uiMaxCUWidth = rpcTempCU->getSlice()->getSPS()->getMaxCUWidth(); // max cu width UInt uiMaxCUHeight = rpcTempCU->getSlice()->getSPS()->getMaxCUHeight(); // max cu height // pic TComPic* pcPic = rpcTempCU->getPic(); TComPicYuv* pcPredYuv = pcPic->getPicYuvPred(); TComPicYuv* pcResiYuv = pcPic->getPicYuvResi(); UInt uiNumValidCopmonent = pcPic->getNumberValidComponents(); vector<PixelTemplate> vInsertList; for (UInt ch = 0; ch < uiNumValidCopmonent; ch++) { int all = 0; int average = 0; int afind = 0; int maxfind = 0, minfind = INT_MAX; int ax = 0, ay = 0; int adiff = 0; ComponentID cId = ComponentID(ch); // picture description UInt uiStride = pcPredYuv->getStride(cId); // stride for a certain component UInt uiPicWidth = pcPredYuv->getWidth(cId); // picture width for a certain component UInt uiPicHeight = pcPredYuv->getHeight(cId); // picture height for a certain component UInt uiCBWidth = uiMaxCUWidth >> (pcPredYuv->getComponentScaleX(cId)); // code block width for a certain component UInt uiCBHeight = uiMaxCUHeight >> (pcPredYuv->getComponentScaleY(cId)); // code block height for a certain component // rectangle of the code block UInt uiTopX = Clip3((UInt)0, uiPicWidth, uiCUPelX); UInt uiTopY = Clip3((UInt)0, uiPicHeight, uiCUPelY); UInt uiBottomX = Clip3((UInt)0, uiPicWidth, uiCUPelX + uiCBWidth); UInt uiBottomY = Clip3((UInt)0, uiPicHeight, uiCUPelY + uiCBHeight); for (UInt uiY = uiTopY; uiY < uiBottomY; uiY++) { for (UInt uiX = uiTopX; uiX < uiBottomX; uiX++) { UInt uiOrgX, uiOrgY; uiOrgX = g_auiRsmpldToOrg[cId][0][uiX]; uiOrgY = g_auiRsmpldToOrg[cId][1][uiY]; // template match UInt uiHashValue1, uiHashValue2; // get hash values getHashValue(uiOrgX, uiOrgY, uiPicWidth, ppPixel[cId],uiHashValue1,uiHashValue2); Pixel* pCurPixel = ppPixel[cId] + getSerialIndex(uiOrgX, uiOrgY, uiPicWidth); //pCurPixel->m_uiHashValue = uiHashValue1; assert(uiHashValue1 >= 0 && uiHashValue1 < MAX_PT_NUM); // lookup table PixelTemplate* pLookupTable = g_pLookupTable[cId][uiHashValue1]; // number of available template pixels UInt uiNumTemplate = getNumTemplate(uiOrgX, uiOrgY, uiPicWidth, ppPixel[cId]); // if uiNumTemplate < 1, predict target with default value and do not insert template if (uiNumTemplate < 1) { UInt uiIdx = uiY * uiStride + uiX; pcPredYuv->getAddr(cId)[uiIdx] = pCurPixel->m_uiPred; pcResiYuv->getAddr(cId)[uiIdx] = pCurPixel->m_iResi = pCurPixel->m_uiOrg - pCurPixel->m_uiPred; continue; } // if lookuptable is empty, predict target with default value and insert template if (pLookupTable == NULL) { // vInsertList.push_back(PixelTemplate(uiOrgX, uiOrgY, uiHashValue1,uiHashValue2,uiNumTemplate,NEW)); UInt uiIdx = uiY*uiStride + uiX; pcPredYuv->getAddr(cId)[uiIdx] = pCurPixel->m_uiPred; pcResiYuv->getAddr(cId)[uiIdx] = pCurPixel->m_iResi = pCurPixel->m_uiOrg - pCurPixel->m_uiPred; continue; } MatchMetric mmBestMetric; UInt uiListLength = 0; PixelTemplate* pBestMatch = NULL; PixelTemplate* pPixelTemplate = pLookupTable; #if PGR_DEBUG int length = 0; int a = 0; int find = 0; int fx = 0, fy = 0; int diff = 0; #endif UInt uiRemoved = 0; // find best matched template while (pPixelTemplate != NULL) { UInt uiCX = pPixelTemplate->m_PX; UInt uiCY = pPixelTemplate->m_PY; MatchMetric mmTmp; #if PGR_DEBUG length++; #endif tryMatch(uiOrgX, uiOrgY, uiCX, uiCY, mmTmp, uiPicWidth, ppPixel[cId]); // set best matched template if (mmTmp.m_uiAbsDiff < mmBestMetric.m_uiAbsDiff || (mmTmp.m_uiAbsDiff == mmBestMetric.m_uiAbsDiff)&&(mmTmp.m_uiNumValidPoints > mmBestMetric.m_uiNumValidPoints)) { mmBestMetric = mmTmp; pBestMatch = pPixelTemplate; find = length; fx = abs(int(uiOrgX - uiCX)) / 64; fy = abs(int(uiOrgY - uiCY)) / 64; diff = mmTmp.m_uiAbsDiff; } // replace useless template if (mmTmp.m_uiAbsDiff < INSERT_LIMIT && (uiNumTemplate > pPixelTemplate->m_uiNumTemplate || uiNumTemplate == 21) && pPixelTemplate->m_uiState != TO_BE_REMOVED) { pPixelTemplate->m_uiState = TO_BE_REMOVED; uiRemoved++; } pPixelTemplate = pPixelTemplate->m_pptNext; } // predict target with best matched candidate if (pBestMatch != NULL) { UInt uiCX = pBestMatch->m_PX; UInt uiCY = pBestMatch->m_PY; if ((ppPixel[cId] + getSerialIndex(uiCX, uiCY, uiPicWidth))->m_bIsRec) { pCurPixel->m_mmMatch = mmBestMetric; pBestMatch->m_uiNumUsed++; Pixel* pRefPixel = ppPixel[cId] + getSerialIndex(mmBestMetric.m_uiX, mmBestMetric.m_uiY, uiPicWidth); pCurPixel->m_uiPred = pRefPixel->m_uiReco; // prediction pCurPixel->m_iResi = pCurPixel->m_uiOrg - pCurPixel->m_uiPred; // residue } } // insert new template if (mmBestMetric.m_uiAbsDiff > INSERT_LIMIT) vInsertList.push_back(PixelTemplate(uiOrgX, uiOrgY, uiHashValue1,uiHashValue2,uiNumTemplate,NEW)); // replace old template if(uiNumTemplate < 21 && uiRemoved>0 || uiNumTemplate == 21 && uiRemoved > 1) vInsertList.push_back(PixelTemplate(uiOrgX, uiOrgY, uiHashValue1, uiHashValue2, uiNumTemplate, DISPLACE)); UInt uiIdx = uiY*uiStride + uiX; pcPredYuv->getAddr(cId)[uiIdx] = pCurPixel->m_uiPred; pcResiYuv->getAddr(cId)[uiIdx] = pCurPixel->m_iResi = pCurPixel->m_uiOrg - pCurPixel->m_uiPred; #if PGR_DEBUG all += length; afind += find; ax += fx; ay += fy; adiff += diff; minfind = min(minfind, find); maxfind = max(maxfind, find); #endif }// end for x }// end for y //fc.close(); #if PGR_DEBUG cout <<"search:" <<all / 4096 << endl; cout << "find:" << afind / 4096 << endl; cout << "max:" << maxfind << "\t" << "min:" << minfind << endl; cout << "x:" << ax/4096 << "\t" << "y:" << ay/4096 << endl; cout << "diff:" << adiff / 4096 << endl; #endif // insert template for (vector<PixelTemplate>::iterator it = vInsertList.begin(); it != vInsertList.end(); it++) { UInt uiHashValue = it->m_uiHashValue1; if (it->m_uiState == DISPLACE) { PixelTemplate* p = g_pLookupTable[cId][uiHashValue]; PixelTemplate* pre = NULL; while (p != NULL) { if (p->m_uiState == TO_BE_REMOVED) { PixelTemplate* q = p; if (pre == NULL) { g_pLookupTable[cId][uiHashValue] = p = p->m_pptNext; } else { pre->m_pptNext = p = p->m_pptNext; } delete q; q = NULL; } else { pre = p; p = p->m_pptNext; } } } it->m_uiState = NEW; PixelTemplate* pNewTemplate = new PixelTemplate(*it); pNewTemplate->m_pptNext = g_pLookupTable[cId][uiHashValue]; g_pLookupTable[cId][uiHashValue] = pNewTemplate; } vInsertList.clear(); }// end for ch }
Void derivePGRPLT(TComDataCU* pcCtu) { TComPic* pcPic = pcCtu->getPic(); TComPicYuv* pcOrgYuv = pcPic->getPicYuvOrg(); UInt uiNumValidComponents = pcOrgYuv->getNumberValidComponents(); UInt uiMaxCUWidth = pcCtu->getSlice()->getSPS()->getMaxCUWidth(); UInt uiMaxCUHeight = pcCtu->getSlice()->getSPS()->getMaxCUWidth(); for (UInt ch = 0; ch < uiNumValidComponents; ch++) { ComponentID cId = ComponentID(ch); UInt uiPicWidth = pcOrgYuv->getWidth(cId); UInt uiPicHeight = pcOrgYuv->getHeight(cId); UInt uiStride = pcOrgYuv->getStride(cId); UInt uiCUPelX = pcCtu->getCUPelX() >> (pcOrgYuv->getComponentScaleX(cId)); // x of upper left corner of the cu UInt uiCUPelY = pcCtu->getCUPelY() >> (pcOrgYuv->getComponentScaleY(cId));; // y of upper left corner of the UInt uiCBWidth = uiMaxCUWidth >> (pcOrgYuv->getComponentScaleX(cId)); UInt uiCBHeight = uiMaxCUHeight >> (pcOrgYuv->getComponentScaleY(cId)); uiCBWidth = Clip3((UInt)0, uiPicWidth - uiCUPelX, uiCBWidth); uiCBHeight = Clip3((UInt)0, uiPicHeight - uiCUPelY, uiCBHeight); // statistics PelCount* pPixelCount[256]; for (int i = 0; i < 256; i++) pPixelCount[i] = new PelCount(i); Pel* pOrg = pcOrgYuv->getAddr(cId, pcCtu->getCtuRsAddr()); for (UInt uiY = 0; uiY < uiCBHeight; uiY++) { for (UInt uiX = 0; uiX < uiCBWidth; uiX++) { pPixelCount[pOrg[uiX]]->m_uiCount++; } pOrg += uiStride; } // sort sort(pPixelCount, pPixelCount + 256, cmpPelCount); g_ppCTUPalette[cId].m_uiSize = 0; // insert entry for (int i = 0, k = 0; k < 4; i++) { bool bDuplicate = false; for (int j = 0; j < 4; j++) { // duplicate if (g_ppCTUPalette[cId].m_pEntry[i] == g_ppPalette[cId].m_pEntry[j]) { bDuplicate = true; break; } } if (!bDuplicate) { g_ppCTUPalette[cId].m_pEntry[k++] = pPixelCount[i]->m_uiVal; g_ppCTUPalette[cId].m_uiSize++; } } for (int i = 0; i < 256; i++) delete pPixelCount[i]; } }
/** Analyze source picture and compute local image characteristics used for QP adaptation * \param pcEPic Picture object to be analyzed * \return Void */ Void TEncPreanalyzer::xPreanalyze( TEncPic* pcEPic ) { TComPicYuv* pcPicYuv = pcEPic->getPicYuvOrg(); const Int iWidth = pcPicYuv->getWidth(COMPONENT_Y); const Int iHeight = pcPicYuv->getHeight(COMPONENT_Y); const Int iStride = pcPicYuv->getStride(COMPONENT_Y); for ( UInt d = 0; d < pcEPic->getMaxAQDepth(); d++ ) { const Pel* pLineY = pcPicYuv->getAddr(COMPONENT_Y); TEncPicQPAdaptationLayer* pcAQLayer = pcEPic->getAQLayer(d); const UInt uiAQPartWidth = pcAQLayer->getAQPartWidth(); const UInt uiAQPartHeight = pcAQLayer->getAQPartHeight(); TEncQPAdaptationUnit* pcAQU = pcAQLayer->getQPAdaptationUnit(); Double dSumAct = 0.0; for ( UInt y = 0; y < iHeight; y += uiAQPartHeight ) { const UInt uiCurrAQPartHeight = min(uiAQPartHeight, iHeight-y); for ( UInt x = 0; x < iWidth; x += uiAQPartWidth, pcAQU++ ) { const UInt uiCurrAQPartWidth = min(uiAQPartWidth, iWidth-x); const Pel* pBlkY = &pLineY[x]; UInt64 uiSum[4] = {0, 0, 0, 0}; UInt64 uiSumSq[4] = {0, 0, 0, 0}; UInt uiNumPixInAQPart = 0; UInt by = 0; for ( ; by < uiCurrAQPartHeight>>1; by++ ) { UInt bx = 0; for ( ; bx < uiCurrAQPartWidth>>1; bx++, uiNumPixInAQPart++ ) { uiSum [0] += pBlkY[bx]; uiSumSq[0] += pBlkY[bx] * pBlkY[bx]; } for ( ; bx < uiCurrAQPartWidth; bx++, uiNumPixInAQPart++ ) { uiSum [1] += pBlkY[bx]; uiSumSq[1] += pBlkY[bx] * pBlkY[bx]; } pBlkY += iStride; } for ( ; by < uiCurrAQPartHeight; by++ ) { UInt bx = 0; for ( ; bx < uiCurrAQPartWidth>>1; bx++, uiNumPixInAQPart++ ) { uiSum [2] += pBlkY[bx]; uiSumSq[2] += pBlkY[bx] * pBlkY[bx]; } for ( ; bx < uiCurrAQPartWidth; bx++, uiNumPixInAQPart++ ) { uiSum [3] += pBlkY[bx]; uiSumSq[3] += pBlkY[bx] * pBlkY[bx]; } pBlkY += iStride; } Double dMinVar = DBL_MAX; for ( Int i=0; i<4; i++) { const Double dAverage = Double(uiSum[i]) / uiNumPixInAQPart; const Double dVariance = Double(uiSumSq[i]) / uiNumPixInAQPart - dAverage * dAverage; dMinVar = min(dMinVar, dVariance); } const Double dActivity = 1.0 + dMinVar; pcAQU->setActivity( dActivity ); dSumAct += dActivity; } pLineY += iStride * uiCurrAQPartHeight; } const Double dAvgAct = dSumAct / (pcAQLayer->getNumAQPartInWidth() * pcAQLayer->getNumAQPartInHeight()); pcAQLayer->setAvgActivity( dAvgAct ); } }
Int main(Int argc, const char** argv) { Bool do_help; string filename_in, filename_out; UInt width, height; UInt bitdepth_in, bitdepth_out, chromaFormatRaw; UInt num_frames; UInt num_frames_skip; po::Options opts; opts.addOptions() ("help", do_help, false, "this help text") ("InputFile,i", filename_in, string(""), "input file to convert") ("OutputFile,o", filename_out, string(""), "output file") ("SourceWidth", width, 0u, "source picture width") ("SourceHeight", height, 0u, "source picture height") ("InputBitDepth", bitdepth_in, 8u, "bit-depth of input file") ("OutputBitDepth", bitdepth_out, 8u, "bit-depth of output file") ("ChromaFormat", chromaFormatRaw, 420u, "chroma format. 400, 420, 422 or 444 only") ("NumFrames", num_frames, 0xffffffffu, "number of frames to process") ("FrameSkip,-fs", num_frames_skip, 0u, "Number of frames to skip at start of input YUV") ; po::setDefaults(opts); po::scanArgv(opts, argc, argv); if (argc == 1 || do_help) { /* argc == 1: no options have been specified */ po::doHelp(cout, opts); return EXIT_FAILURE; } ChromaFormat chromaFormatIDC=CHROMA_420; switch (chromaFormatRaw) { case 400: chromaFormatIDC=CHROMA_400; break; case 420: chromaFormatIDC=CHROMA_420; break; case 422: chromaFormatIDC=CHROMA_422; break; case 444: chromaFormatIDC=CHROMA_444; break; default: fprintf(stderr, "Bad chroma format string\n"); return EXIT_FAILURE; } TVideoIOYuv input; TVideoIOYuv output; Int inputBitDepths [MAX_NUM_CHANNEL_TYPE]; Int outputBitDepths[MAX_NUM_CHANNEL_TYPE]; for (UInt channelTypeIndex = 0; channelTypeIndex < MAX_NUM_CHANNEL_TYPE; channelTypeIndex++) { inputBitDepths [channelTypeIndex] = bitdepth_in; outputBitDepths[channelTypeIndex] = bitdepth_out; } input.open((char*)filename_in.c_str(), false, inputBitDepths, inputBitDepths, outputBitDepths); output.open((char*)filename_out.c_str(), true, outputBitDepths, outputBitDepths, outputBitDepths); input.skipFrames(num_frames_skip, width, height, chromaFormatIDC); TComPicYuv frame; frame.create( width, height, chromaFormatIDC, 1, 1, 0 ); Int pad[2] = {0, 0}; TComPicYuv cPicYuvTrueOrg; cPicYuvTrueOrg.create( width, height, chromaFormatIDC, 1, 1, 0 ); UInt num_frames_processed = 0; while (!input.isEof()) { if (! input.read(&frame, &cPicYuvTrueOrg, IPCOLOURSPACE_UNCHANGED, pad)) { break; } #if 0 Pel* img = frame.getAddr(COMPONENT_Y); for (Int y = 0; y < height; y++) { for (Int x = 0; x < height; x++) img[x] = 0; img += frame.getStride(); } img = frame.getAddr(COMPONENT_Y); img[0] = 1; #endif output.write(&frame, IPCOLOURSPACE_UNCHANGED); num_frames_processed++; if (num_frames_processed == num_frames) break; } input.close(); output.close(); cPicYuvTrueOrg.destroy(); return EXIT_SUCCESS; }
Void TEncTop::encode(Bool flush, TComPicYuv* pcPicYuvOrg, TComPicYuv* pcPicYuvTrueOrg, const InputColourSpaceConversion snrCSC, TComList<TComPicYuv*>& rcListPicYuvRecOut, std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded, Bool isTff) { iNumEncoded = 0; for (Int fieldNum=0; fieldNum<2; fieldNum++) { if (pcPicYuvOrg) { /* -- field initialization -- */ const Bool isTopField=isTff==(fieldNum==0); TComPic *pcField; xGetNewPicBuffer( pcField ); pcField->setReconMark (false); // where is this normally? if (fieldNum==1) // where is this normally? { TComPicYuv* rpcPicYuvRec; // org. buffer if ( rcListPicYuvRecOut.size() >= (UInt)m_iGOPSize+1 ) // need to maintain field 0 in list of RecOuts while processing field 1. Hence +1 on m_iGOPSize. { rpcPicYuvRec = rcListPicYuvRecOut.popFront(); } else { rpcPicYuvRec = new TComPicYuv; rpcPicYuvRec->create( m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, m_maxCUWidth, m_maxCUHeight, m_maxTotalCUDepth, true); } rcListPicYuvRecOut.pushBack( rpcPicYuvRec ); } pcField->getSlice(0)->setPOC( m_iPOCLast ); // superfluous? pcField->getPicYuvRec()->setBorderExtension(false);// where is this normally? pcField->setTopField(isTopField); // interlaced requirement for (UInt componentIndex = 0; componentIndex < pcPicYuvOrg->getNumberValidComponents(); componentIndex++) { const ComponentID component = ComponentID(componentIndex); const UInt stride = pcPicYuvOrg->getStride(component); separateFields((pcPicYuvOrg->getBuf(component) + pcPicYuvOrg->getMarginX(component) + (pcPicYuvOrg->getMarginY(component) * stride)), pcField->getPicYuvOrg()->getAddr(component), pcPicYuvOrg->getStride(component), pcPicYuvOrg->getWidth(component), pcPicYuvOrg->getHeight(component), isTopField); separateFields((pcPicYuvTrueOrg->getBuf(component) + pcPicYuvTrueOrg->getMarginX(component) + (pcPicYuvTrueOrg->getMarginY(component) * stride)), pcField->getPicYuvTrueOrg()->getAddr(component), pcPicYuvTrueOrg->getStride(component), pcPicYuvTrueOrg->getWidth(component), pcPicYuvTrueOrg->getHeight(component), isTopField); } // compute image characteristics if ( getUseAdaptiveQP() ) { m_cPreanalyzer.xPreanalyze( dynamic_cast<TEncPic*>( pcField ) ); } } if ( m_iNumPicRcvd && ((flush&&fieldNum==1) || (m_iPOCLast/2)==0 || m_iNumPicRcvd==m_iGOPSize ) ) { // compress GOP m_cGOPEncoder.compressGOP(m_iPOCLast, m_iNumPicRcvd, m_cListPic, rcListPicYuvRecOut, accessUnitsOut, true, isTff, snrCSC, m_printFrameMSE,&m_cSearch); iNumEncoded += m_iNumPicRcvd; m_uiNumAllPicCoded += m_iNumPicRcvd; m_iNumPicRcvd = 0; } } }