/** * @author JIA Pei, YAO Wei * @version 2010-05-20 * @brief Additive ASM ND Profiles Fitting, for dynamic image sequence * @param iImg Input - image to be fitted * @param ioShape Input and output - the shape * @param oImg Output - the fitted image * @param dim Input - profile dimension, 1, 2, 4 or 8 * @param epoch Input - the iteration epoch * @param pyramidlevel Input - pyramid level, 1, 2, 3 or 4 at most * @note Refer to "AAM Revisited, page 34, figure 13", particularly, those steps. */ float VO_FittingASMNDProfiles::VO_ASMNDProfileFitting( const cv::Mat& iImg, VO_Shape& ioShape, cv::Mat& oImg, unsigned int epoch, unsigned int pyramidlevel, unsigned int dim) { this->m_VOFittingShape.clone(ioShape); double t = (double)cv::getTickCount(); this->m_iNbOfPyramidLevels = pyramidlevel; this->SetProcessingImage(iImg, this->m_VOASMNDProfile); this->m_iIteration = 0; // Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity this->m_VOASMNDProfile->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape, this->m_MatModelAlignedShapeParam, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity); this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); // Explained by YAO Wei, 2008-2-9. // Scale this->m_VOFittingShape, so face width is a constant StdFaceWidth. //this->m_fScale2 = this->m_VOASMNDProfile->m_VOReferenceShape.GetWidth() / this->m_VOFittingShape.GetWidth(); this->m_fScale2 = this->m_VOASMNDProfile->m_VOReferenceShape.GetCentralizedShapeSize() / this->m_VOFittingShape.GetCentralizedShapeSize(); this->m_VOFittingShape *= this->m_fScale2; int w = (int)(iImg.cols*this->m_fScale2); int h = (int)(iImg.rows*this->m_fScale2); cv::Mat SearchImage = cv::Mat(cv::Size( w, h ), this->m_ImageProcessing.type(), this->m_ImageProcessing.channels() ); float PyrScale = pow(2.0f, (float) (this->m_iNbOfPyramidLevels-1.0f) ); this->m_VOFittingShape /= PyrScale; const int nQualifyingDisplacements = (int)(this->m_VOASMNDProfile->m_iNbOfPoints * VO_Fitting2DSM::pClose); // for each level in the image pyramid for (int iLev = this->m_iNbOfPyramidLevels-1; iLev >= 0; iLev--) { // Set image roi, instead of cvCreateImage a new image to speed up cv::Mat siROI = SearchImage(cv::Rect(0, 0, (int)(w/PyrScale), (int)(h/PyrScale) ) ); cv::resize(this->m_ImageProcessing, siROI, siROI.size()); this->m_VOEstimatedShape = this->m_VOFittingShape; this->PyramidFit( this->m_VOEstimatedShape, SearchImage, iLev, VO_Fitting2DSM::pClose, epoch, dim); this->m_VOFittingShape = this->m_VOEstimatedShape; if (iLev != 0) { PyrScale /= 2.0f; this->m_VOFittingShape *= 2.0f; } } // Explained by YAO Wei, 2008-02-09. // this->m_fScale2 back to original size this->m_VOFittingShape /= this->m_fScale2; ioShape.clone(this->m_VOFittingShape); VO_Fitting2DSM::VO_DrawMesh(ioShape, this->m_VOASMNDProfile, oImg); t = ((double)cv::getTickCount() - t )/ (cv::getTickFrequency()*1000.); printf("MRASM fitting time cost: %.2f millisec\n", t); this->m_fFittingTime = t; return t; }
/** * @author JIA Pei * @version 2010-05-20 * @brief Basic AAM Fitting, for dynamic image sequence * @param iImg Input - image to be fitted * @param ioShape Input and Output - the fitted shape * @param oImg Output - the fitted image * @param epoch Input - the iteration epoch */ float VO_FittingAAMBasic::VO_BasicAAMFitting(const Mat& iImg, VO_Shape& ioShape, Mat& oImg, unsigned int epoch) { this->m_VOFittingShape.clone(ioShape); double t = (double)cvGetTickCount(); this->SetProcessingImage(iImg, this->m_VOAAMBasic); this->m_iIteration = 0; // Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity this->m_VOAAMBasic->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape, this->m_MatModelAlignedShapeParam, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity); this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); // Get m_MatModelNormalizedTextureParam VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape, this->m_ImageProcessing, this->m_vTriangle2D, this->m_vPointWarpInfo, this->m_VOFittingTexture ); // estimate the texture model parameters this->m_VOAAMBasic->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam); // Calculate m_MatCurrentC this->m_VOAAMBasic->VO_SParamTParamProjectToCParam( this->m_MatModelAlignedShapeParam, this->m_MatModelNormalizedTextureParam, this->m_MatCurrentC ); // Set m_MatCurrentT, m_MatDeltaT, m_MatEstimatedT, m_MatDeltaC, m_MatEstimatedC, etc. this->m_MatCurrentT = Mat_<float>::zeros(this->m_MatCurrentT.size()); this->m_MatDeltaT = Mat_<float>::zeros(this->m_MatDeltaT.size()); this->m_MatEstimatedT = Mat_<float>::zeros(this->m_MatEstimatedT.size()); this->m_MatDeltaC = Mat_<float>::zeros(this->m_MatDeltaC.size()); this->m_MatEstimatedC = Mat_<float>::zeros(this->m_MatEstimatedC.size()); ////////////////////////////////////////////////////////////////////////////////////////////////////// // explained by JIA Pei. 2010-05-20 // For the first round, this->m_VOFittingShape should not change after calling "VO_CParamTParam2FittingShape" // But this is not the case. why? // Before calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by // a) assigning m_VOTemplateAlignedShape // b) align to the real-size face using detected eyes and mouth // c) constrain the shape within the image // d) constrain the shape parameters and calculate those rigid transform parameters // cout << this->m_VOFittingShape << endl; ////////////////////////////////////////////////////////////////////////////////////////////////////// // Estimate m_VOFittingShape and m_VOFittingTexture this->VO_CParamTParam2FittingShape( this->m_MatCurrentC, this->m_MatCurrentT, this->m_VOModelNormalizedTexture, this->m_VOFittingShape, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity ); this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); // Remember to call ConstrainShapeInImage() whenever you update m_VOFittingShape ////////////////////////////////////////////////////////////////////////////////////////////////////// // When calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by // a) c parameters to reconstruct shape parameters // b) shape parameters to reconstruct shape // c) align to the real-size face by global shape normalization // cout << this->m_VOFittingShape << endl; ////////////////////////////////////////////////////////////////////////////////////////////////////// this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing, this->m_VOFittingShape, this->m_VOModelNormalizedTexture, this->m_VOTextureError); do { float estScale = this->m_fScale; vector<float> estRotateAngles = this->m_vRotateAngles; Mat_<float> estCOG = this->m_MatCenterOfGravity.clone(); bool cBetter = false; bool poseBetter = false; /**First shape parameters, c parameters. refer to equation (9.3) * Cootes "Statistical Model of Appearance for Computer Vision" */ cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRc, -1, Mat(), 0.0, this->m_MatDeltaC, GEMM_2_T); // damp -- C for(unsigned int i = 0; i < k_values.size(); i++) { // make damped c prediction cv::scaleAdd(this->m_MatDeltaC, k_values[i], this->m_MatCurrentC, this->m_MatEstimatedC); // make sure m_MatEstimatedC are constrained this->m_VOAAMBasic->VO_AppearanceParameterConstraint(this->m_MatEstimatedC); this->VO_CParamTParam2FittingShape( this->m_MatEstimatedC, this->m_MatCurrentT, this->m_VOModelNormalizedTexture, this->m_VOEstimatedShape, estScale, estRotateAngles, estCOG); if ( !VO_ShapeModel::VO_IsShapeInsideImage(this->m_VOEstimatedShape, this->m_ImageProcessing) ) continue; else this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing, this->m_VOEstimatedShape, this->m_VOModelNormalizedTexture, this->m_VOEstimatedTextureError); if (this->m_E < this->m_E_previous) { this->m_MatEstimatedC.copyTo(this->m_MatCurrentC); this->m_VOFittingShape.clone(this->m_VOEstimatedShape); this->m_VOTextureError.clone(this->m_VOEstimatedTextureError); this->m_E_previous = this->m_E; cBetter = true; this->m_fScale = estScale; this->m_vRotateAngles = estRotateAngles; this->m_MatCenterOfGravity = estCOG.clone(); break; } } /** Second pose, t parameters. refer to equation (9.3) * Cootes "Statistical Model of Appearance for Computer Vision" */ cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRt, -1, Mat(), 0, this->m_MatDeltaT, GEMM_2_T); // damp -- T for(unsigned int i = 0; i < k_values.size(); i++) { // make damped c/pose prediction cv::scaleAdd(this->m_MatDeltaT, k_values[i], this->m_MatCurrentT, this->m_MatEstimatedT); this->VO_CParamTParam2FittingShape( this->m_MatCurrentC, this->m_MatEstimatedT, this->m_VOModelNormalizedTexture, this->m_VOEstimatedShape, estScale, estRotateAngles, estCOG); if ( !VO_ShapeModel::VO_IsShapeInsideImage(this->m_VOEstimatedShape, this->m_ImageProcessing) ) continue; else this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing, this->m_VOEstimatedShape, this->m_VOModelNormalizedTexture, this->m_VOEstimatedTextureError); if (this->m_E < this->m_E_previous) { // Since m_fScale, m_vRotateAngles and m_MatCenterOfGravity have been updated, // m_MatCurrentT should be assigned to 0 now! this->m_MatCurrentT = Mat_<float>::zeros(this->m_MatCurrentT.size()); // this->m_MatEstimatedT.copyTo(this->m_MatCurrentT); this->m_VOFittingShape.clone(this->m_VOEstimatedShape); this->m_VOTextureError.clone(this->m_VOEstimatedTextureError); this->m_E_previous = this->m_E; poseBetter = true; this->m_fScale = estScale; this->m_vRotateAngles = estRotateAngles; this->m_MatCenterOfGravity = estCOG.clone(); break; } } if( cBetter || poseBetter) { ioShape.clone(this->m_VOFittingShape); } else break; ++this->m_iIteration; }while( ( fabs(this->m_E) > FLT_EPSILON ) && (this->m_iIteration < epoch)/* && (cv::norm(this->m_MatDeltaC) > FLT_EPSILON) */ ); t = ((double)cvGetTickCount() - t )/ (cvGetTickFrequency()*1000.); cout << "Basic fitting time cost: " << t << " millisec" << endl; this->m_fFittingTime = t; VO_Fitting2DSM::VO_DrawMesh(ioShape, this->m_VOAAMBasic, oImg); return t; }
/** * @author JIA Pei * @version 2010-05-20 * @brief CMU ICIA AAM Fitting, for dynamic image sequence * @param iImg Input - image to be fitted * @param ioShape Input and Output - the fitted shape * @param oImg Output - the fitted image * @param epoch Input - the iteration epoch */ float VO_FittingAAMInverseIA::VO_ICIAAAMFitting(const Mat& iImg, VO_Shape& ioShape, Mat& oImg, unsigned int epoch) { this->m_VOFittingShape.clone(ioShape); this->m_VOEstimatedShape.clone(this->m_VOFittingShape); double t = (double)cvGetTickCount(); this->SetProcessingImage(iImg, this->m_VOAAMInverseIA); this->m_iIteration = 0; // Get m_MatCurrentP and m_MatCurrentQ this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape, this->m_MatCurrentP, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity); this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); this->m_MatDeltaP = Mat_<float>::zeros(this->m_MatDeltaP.size()); this->m_MatDeltaQ = Mat_<float>::zeros(this->m_MatDeltaQ.size()); this->m_MatCurrentQ = Mat_<float>::zeros(this->m_MatCurrentQ.size()); this->m_MatDeltaPQ = Mat_<float>::zeros(this->m_MatDeltaPQ.size()); // Step (1) Warp I with W(x;p) followed by N(x;q) to compute I(N(W(x;p);q)) this->VO_PParamQParam2FittingShape( this->m_MatCurrentP, this->m_MatCurrentQ, this->m_VOFittingShape, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity ); this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); // Step (2) Compute the error image I(N(W(x;p);q))-A0(x) this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing, this->m_VOFittingShape, this->m_VOTemplateNormalizedTexture, this->m_VOTextureError); do { ++this->m_iIteration; // Step (7) -- a bit modification cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMInverseIA->m_MatICIAPreMatrix, -1, Mat(), 0, this->m_MatDeltaPQ, GEMM_2_T); // Step (8) -- a bit modification. Get DeltaP DeltaQ respectively this->m_MatDeltaQ = this->m_MatDeltaPQ(Rect( 0, 0, this->m_MatDeltaQ.cols, 1)); this->m_MatDeltaP = this->m_MatDeltaPQ(Rect( this->m_MatDeltaQ.cols, 0, this->m_MatDeltaP.cols, 1)); // Step (9) -- CMU Inverse Compositional this->VO_CMUInverseCompositional( this->m_MatDeltaP, this->m_MatDeltaQ, this->m_VOFittingShape, this->m_VOEstimatedShape ); // Ensure Inverse Compositional still satisfies global shape constraints this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOEstimatedShape, this->m_MatEstimatedP, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity); this->m_VOEstimatedShape.ConstrainShapeInImage(this->m_ImageProcessing); this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing, this->m_VOEstimatedShape, this->m_VOTemplateNormalizedTexture, this->m_VOEstimatedTextureError); if (this->m_E < this->m_E_previous) { // Unlike what's happening in Basic AAM, // since m_fScale, m_vRotateAngles and m_MatCenterOfGravity have not been updated in ICIA, // m_MatCurrentT should not be assigned to 0 now! // this->m_MatCurrentQ = Mat_<float>::zeros(this->m_MatCurrentQ.size()); this->m_VOFittingShape.clone(this->m_VOEstimatedShape); this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape, this->m_MatCurrentP, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity); this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); this->m_VOTextureError.clone(this->m_VOEstimatedTextureError); this->m_E_previous = this->m_E; } else break; }while( ( fabs(this->m_E) > FLT_EPSILON ) && ( this->m_iIteration < epoch ) ); VO_Fitting2DSM::VO_DrawMesh(this->m_VOFittingShape, this->m_VOAAMInverseIA, oImg); // Recalculate all parameters finally, this is also optional. this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape, this->m_MatCurrentP, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity ); // Step (10) (Option step), Post-computation. Get m_MatModelNormalizedTextureParam VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape, this->m_ImageProcessing, this->m_vTriangle2D, this->m_vPointWarpInfo, this->m_VOFittingTexture ); // estimate the texture model parameters this->m_VOAAMInverseIA->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam); ioShape.clone(this->m_VOFittingShape); t = ((double)cvGetTickCount() - t )/ (cvGetTickFrequency()*1000.); cout << "ICIA AAM fitting time cost: " << t << " millisec" << endl; this->m_fFittingTime = t; return t; }