/**
 * @author     JIA Pei
 * @version    2010-06-07
 * @brief      Constrain all points respetively
 * @param      ioShape     	Input and Output - the input and output shape
*/
void VO_Point2DDistributionModel::VO_ConstrainAllPoints(VO_Shape& ioShape)
{
    unsigned int NbOfPoints = ioShape.GetNbOfPoints();
    Point2f pt;

    for(unsigned int i = 0; i < NbOfPoints; i++)
    {
        pt = ioShape.GetA2DPoint(i);
        VO_Point2DDistributionModel::VO_ConstrainSinglePoint( pt, this->m_VONormalizedEllipses[i] );
        ioShape.SetA2DPoint(pt, i);
    }
}
float CRecognitionAlgs::CalcFaceYaw(const vector<float>& iLine,
                                    const VO_Shape& iShape,
                                    const VO_FaceParts& iFaceParts)
{
    float yaw = 0.0f;
    int dim = iShape.GetNbOfDim();

    // Theoretically, using eye corner is correct, but it's not stable at all. Therefore, here we use COG_left and COG_right instead.
    ///////////////////////////////////////////////////////////////////////////////
    //     float leftDist = 0.0f, rightDist = 0.0f;    
    //     vector<unsigned int> eyeCornerPoints = iFaceParts.GetEyeCornerPoints().GetIndexes();
    //     Point2f leftmostEyeCorner = Point2f(FLT_MAX, 0.0f);
    //     Point2f rightmostEyeCorner = Point2f(0.0f, 0.0f);
    // 
    //     for(unsigned int i = 0; i < eyeCornerPoints.size(); ++i)
    //     {
    //         if(leftmostEyeCorner.x > iShape.GetAShape(dim*eyeCornerPoints[i]) )
    //         {
    //             leftmostEyeCorner.x = iShape.GetAShape(dim*eyeCornerPoints[i]);
    //             leftmostEyeCorner.y = iShape.GetAShape(dim*eyeCornerPoints[i]+1);
    //         }
    //         if(rightmostEyeCorner.x < iShape.GetAShape(dim*eyeCornerPoints[i]) )
    //         {
    //             rightmostEyeCorner.x = iShape.GetAShape(dim*eyeCornerPoints[i]);
    //             rightmostEyeCorner.y = iShape.GetAShape(dim*eyeCornerPoints[i]+1);
    //         }
    //     }
    //     leftDist = cvDistFromAPoint2ALine2D(leftmostEyeCorner,  iLine);
    //     rightDist = cvDistFromAPoint2ALine2D(rightmostEyeCorner,  iLine);
    //     float r = leftDist/rightDist;
    // Refer to my PhD dissertation. Chapter 4
    //     yaw = atan ( ( 0.65*(r-1) ) / ( 0.24 * (r+1) ) ) * 180.0f / CV_PI;
    ///////////////////////////////////////////////////////////////////////////////

    float leftDist = 0.0f, rightDist = 0.0f;
    vector<unsigned int> leftSidePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTSIDEPOINTS).GetIndexes();
    vector<unsigned int> rightSidePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTSIDEPOINTS).GetIndexes();
    for(unsigned int i = 0; i < leftSidePoints.size(); ++i)
    {
        leftDist += cvDistFromAPoint2ALine2D(Point2f(iShape.GetAShape(dim*leftSidePoints[i]), iShape.GetAShape(dim*leftSidePoints[i]+1)),  iLine);
    }
    for(unsigned int i = 0; i < rightSidePoints.size(); ++i)
    {
        rightDist += cvDistFromAPoint2ALine2D(Point2f(iShape.GetAShape(dim*rightSidePoints[i]), iShape.GetAShape(dim*rightSidePoints[i]+1)),  iLine);
    }

    float r = leftDist/rightDist;
    // Refer to my PhD dissertation. Chapter 4
    // yaw = atan ( ( 0.65*(r-1) ) / ( 0.24 * (r+1) ) ) * 180.0f / CV_PI;
    yaw = atan ( ( (r-1) ) / ((r+1) ) ) * safeDoubleToFloat(180.0 / CV_PI);

    return yaw;
}
/**
 * @author      YAO Wei, JIA Pei
 * @version     2010-05-20
 * @brief       Find the best offset for one point
 * @param       ioShape     Input and output - the input and output shape
 * @param       iImg        Input - image to be fitted
 * @param       oImages     Output - the output images
 * @param       iLev        Input - current pyramid level
 * @param       PClose      Input - percentage of converged points. Say, 0.9 means if 90% of the points
 *                                  are judged as converged, the iteration of this pyramid can stop
 * @param       epoch       Input - the maximum iteration times
 * @param       profdim     Input - dimension used during fitting. For example, the trained data could be 4D, but the user may only use 1D
 * @note        Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
void VO_FittingASMNDProfiles::PyramidFit(   VO_Shape& ioShape,
                                            const cv::Mat& iImg,
                                            std::vector<cv::Mat>& oImages,
                                            unsigned int iLev,
                                            float PClose,
                                            unsigned int epoch,
                                            unsigned int profdim,
                                            bool record)
{
    VO_Shape tempShape = ioShape;
    int nGoodLandmarks = 0;
    float PyrScale = pow(2.0f, (float) (iLev) );

    const int nQualifyingDisplacements = (int)(this->m_VOASMNDProfile->m_iNbOfPoints * PClose);

    for(unsigned int iter = 0; iter < epoch; iter++)
    {
        this->m_iIteration++;
        // estimate the best ioShape by profile matching the landmarks in this->m_VOFittingShape
        nGoodLandmarks = VO_FittingASMNDProfiles::UpdateShape(  this->m_VOASMNDProfile,
                                                                iImg,
                                                                tempShape,
                                                                this->m_vShape2DInfo,
                                                                this->m_VOASMNDProfile->m_vvMeanNormalizedProfile[iLev],
                                                                this->m_VOASMNDProfile->m_vvvCVMInverseOfSg[iLev],
                                                                3,
                                                                profdim);

        // conform ioShape to the shape model
        this->m_VOASMNDProfile->VO_CalcAllParams4AnyShapeWithConstrain( tempShape,
                                                                        this->m_MatModelAlignedShapeParam,
                                                                        this->m_fScale,
                                                                        this->m_vRotateAngles,
                                                                        this->m_MatCenterOfGravity );
        tempShape.ConstrainShapeInImage(iImg);

if(record)
{
        // If we get better fitting result, record this fitting result
        cv::Mat temp3 = cv::Mat(this->m_ImageInput.size(), this->m_ImageInput.type(), this->m_ImageInput.channels());
        cv::Mat temp3ROI = temp3(cv::Range (0, (int)(this->m_ImageInput.rows/PyrScale) ), cv::Range (0, (int)(this->m_ImageInput.cols/PyrScale) ) );
        cv::resize(this->m_ImageInput, temp3ROI, temp3ROI.size());
        VO_Fitting2DSM::VO_DrawMesh(tempShape / this->m_fScale2, this->m_VOASMNDProfile, temp3);
        oImages.push_back(temp3);
}

        // the fitting result is good enough to stop the iteration
        if(nGoodLandmarks > nQualifyingDisplacements)
            break;
    }
    ioShape = tempShape;
}
/**
 * @param	fd					- input		folder name
 * @param	fnIdx				- input		fitting result
 * @param	deviation			- input		what is the deviation from refShape to fittedShape
 * @param	ptErrorFreq			- input		for curve to display frequency -- point distance
 * @param	fittedShape			- input		fitting result
 * @return	whether the fitting is acceptable
 */
void CRecognitionAlgs::SaveShapeResults(		const string& fd,
												const string& fnIdx,
												float deviation,
												vector<float>& ptDists,
												vector<float>& ptErrorFreq,
												const VO_Shape& fittedShape)
{
    string fn;
    fn = fd + "/" + fnIdx + ".res";
    
    fstream fp;
    fp.open(fn.c_str (), ios::out);

	fp << "Error per point -- Distance from ground truth" << endl;
	for(unsigned int i = 0; i < ptDists.size(); ++i){
		fp << ptDists[i] << endl;
	}
	fp << endl;

	fp << "Total landmark error" << endl;
	float errSum = std::accumulate(ptDists.begin(),ptDists.end(),0.0f);
	fp << errSum << endl;
	fp <<"Average landmark distance" << endl;
	fp << errSum / ptDists.size() << endl;
	fp << endl;

    fp << "Total Deviation" << endl << deviation << endl;				// deviation
    fp << "Point Error -- Frequency" << endl;
    for(unsigned int i = 0; i < ptErrorFreq.size(); i++)
    {
        fp << ptErrorFreq[i] << " ";
    }
	fp << endl;
	fp << endl;
	fp << "Fitted points" << endl;
	//output actual points along with error frequency
	unsigned int NbOfShapeDim   = fittedShape.GetNbOfDim();
	unsigned int NbOfPoints     = fittedShape.GetNbOfPoints();
	for(unsigned int i = 0; i < NbOfPoints; i++)
	{
		for(unsigned int j = 0; j < NbOfShapeDim; j++)
		{
			fp << fittedShape.GetAShape(j*NbOfPoints+i) << " ";
		}
		fp << endl;
	}
    fp << endl;
	
    fp.close();fp.clear();
}
Beispiel #5
0
/**
 * @author      JIA Pei
 * @version     2010-02-07
 * @brief       Read a file and obtain all annotation data in VO_Shape
 * @param       filename    input parameter     -   which .asf annotation file to read
 * @param       oShape      output parameter    -   save annotation data to AAM shape data structure
*/
void CAnnotationDBIO::ReadASF(  const std::string &filename,
                                VO_Shape& oShape )
{
    oShape.SetAnnotationFileName(filename);

    std::fstream fp;
    fp.open(filename.c_str (), std::ios::in);

    std::stringstream ss;
    std::string temp;
    float tempFloat = 0.0f;

    // Just for the specific .asf
    for(unsigned int i = 0; i < 10; i++)
        //fp >> temp;
        std::getline(fp, temp);

    unsigned int NbOfPoints = atoi(temp.c_str ());
    oShape.Resize(2, NbOfPoints);

    // Just for the specific .asf
    for(unsigned int i = 0; i < 6; i++)
        //fp >> temp;
        std::getline(fp, temp);

    for (unsigned int i = 0; i < NbOfPoints; i++)
    {
        fp >> temp >> temp >> temp;
        // In DTU IMM , x means rows from left to right
        ss << temp;
        ss >> tempFloat;
        ss.clear();
        oShape(0, i) = tempFloat;
        fp >> temp;
        // In DTU IMM , y means cols from top to bottom
        ss << temp;
        ss >> tempFloat;
        ss.clear();
        //fp >> temp;
        std::getline(fp, temp);
        // In sum, topleft is (0,0), right bottom is (640,480)
        oShape(1, i) = tempFloat;
    }

    // Just for the specific .asf
    for(unsigned int i = 0; i < 5; i++)
        fp >> temp;

    fp.close ();fp.clear();
}
Beispiel #6
0
/**
 * @author      JIA Pei
 * @version     2010-05-07
 * @brief       draw a point on the image
 * @param       iShape          Input -- the input shape
 * @param       theSubshape     Output -- and input, the image drawn with the point
 * @param       iLine           Input -- the line
 * @param       oImg            Output--  output image
 * @param       dir             Input -- direction
 * @param       ws              Input --
 * @param       offset          Input -- add some offset at both ends of the line segment itself
 * @param       ci              Input -- color index
 * @return      void
 */
void VO_Fitting2DSM::VO_DrawAline(  const VO_Shape& iShape,
                                    const VO_Shape& theSubshape,
                                    const std::vector<float>& iLine,
                                    cv::Mat& oImg,
                                    unsigned int dir,
                                    bool ws,
                                    unsigned int offset,
                                    unsigned int ci)
{
    switch(dir)
    {
    case VERTICAL:
    {
        float A = iLine[0];
        float B = iLine[1];
        float C = iLine[2];
        cv::Point2f ptf1, ptf2;
        if(ws)
        {
            ptf1.y = iShape.MinY() - offset;
            ptf2.y = iShape.MaxY() + offset;
        }
        else
        {
            ptf1.y = theSubshape.MinY() - offset;
            ptf2.y = theSubshape.MaxY() + offset;
        }
        ptf1.x = -(C + B*ptf1.y)/A;
        ptf2.x = -(C + B*ptf2.y)/A;
        cv::Point pt1 = cvPointFrom32f( ptf1 );
        cv::Point pt2 = cvPointFrom32f( ptf2 );
        cv::line( oImg, pt1, pt2, colors[ci], 2, 0, 0);
    }
    break;
    case HORIZONTAL:
    default:
    {
        float A = iLine[0];
        float B = iLine[1];
        float C = iLine[2];
        cv::Point2f ptf1, ptf2;
        if(ws)
        {
            ptf1.x = iShape.MinX() - offset;
            ptf2.x = iShape.MaxX() + offset;
        }
        else
        {
            ptf1.x = theSubshape.MinX() - offset;
            ptf2.x = theSubshape.MaxX() + offset;
        }
        ptf1.y = -(C + A*ptf1.x)/B;
        ptf2.y = -(C + A*ptf2.x)/B;
        cv::Point pt1 = cvPointFrom32f( ptf1 );
        cv::Point pt2 = cvPointFrom32f( ptf2 );
        cv::line( oImg, pt1, pt2, colors[ci], 2, 0, 0);
    }
    break;
    }
}
/**
* @param    trackalg-   input and output    the track algorithm,
                        will record some information for every frame
* @param    iImg    - input     input image
* @param    iShape  - input     the current tracked shape
* @return   bool    whether the tracked shape is acceptable?
*/
bool CRecognitionAlgs::EvaluateFaceTrackedByProbabilityImage(
    CTrackingAlgs* trackalg,
    const Mat& iImg,
    const VO_Shape& iShape,
    Size smallSize,
    Size bigSize)
{
    double t = (double)cvGetTickCount();

    Rect rect = iShape.GetShapeBoundRect();

    trackalg->SetConfiguration( CTrackingAlgs::CAMSHIFT,
                                CTrackingAlgs::PROBABILITYIMAGE);
    trackalg->Tracking( rect,
                        iImg,
                        smallSize,
                        bigSize );

    bool res = false;
    if( !trackalg->IsObjectTracked() )
        res = false;
    else if ( ((double)rect.height/(double)rect.width <= 0.75)
        || ((double)rect.height/(double)rect.width >= 2.5) )
        res = false;
    else
        res = true;

    t = ((double)cvGetTickCount() -  t )
        / (cvGetTickFrequency()*1000.);
    cout << "Camshift Tracking time cost: " << t << "millisec" << endl;

    return res;
}
Beispiel #8
0
/**
 * @author      JIA Pei
 * @version     2016-08-24
 * @brief       a pair of shape and texture, respectively decomposed to a shape and a texture
 * @param       iPairShapeTexture   Input - the pair of shape and texture
 * @param       oShapeParams        Output - shape parameters
 * @param       oTextureParams      Output - texture parameters
 * @return      void
*/
void VO_AXM::SplitShapeTextureParams(const std::pair<VO_Shape, VO_Texture>& iPairShapeTexture,
                                     cv::Mat_<float>& oShapeParams,
                                     cv::Mat_<float>& oTextureParams )
{
    VO_Shape iShape = iPairShapeTexture.first;
    VO_Texture iTexture = iPairShapeTexture.second;

    unsigned int NbOfShapeDim = iShape.GetNbOfDim();
    float tempNorm = 0.0f;
    std::vector<float> tempTheta;
    tempTheta.resize(NbOfShapeDim == 2? 1:3);
    cv::Mat_<float> tempCOG = cv::Mat_<float>::zeros(1, NbOfShapeDim);

    this->VO_CalcAllParams4AnyShapeWithConstrain(iShape, oShapeParams, tempNorm, tempTheta, tempCOG);
    this->VO_CalcAllParams4AnyTexture(iTexture, oTextureParams);
}
Beispiel #9
0
/**
 * @author      Yao Wei
 * @brief       CMU Inverse Compositional !!
 * @param       - matDeltaP     Input -- deltap
 * @param       - matDeltaQ     Input -- deltaq
 * @param       - s             Input -- the shape
 * @param       - estShape      Output -- newly estimated shape by Inverse compositional
 */
void VO_FittingAAMInverseIA::VO_CMUInverseCompositional(const Mat_<float>& matDeltaP,
                                                        const Mat_<float>& matDeltaQ,
                                                        const VO_Shape& s,
                                                        VO_Shape& estShape)
{
    VO_Shape S0;
    this->VO_PParamQParam2ModelAlignedShape( matDeltaP, matDeltaQ, S0);
//    cvConvertScale(dpq, __inv_pq, -1);
//    __shape.CalcShape(__inv_pq, __update_s0);    // __update_s0 = N.W(s0, -delta_p, -delta_q)

    //Secondly: Composing the Incremental Warp with the Current Warp Estimate.
    Point2f res, tmp;
    int count = 0;
    vector<unsigned int> vertexIdxes;

    for(unsigned int i = 0; i < this->m_VOAAMInverseIA->m_iNbOfPoints; i++)
    {
        res.x = 0.0;    res.y = 0.0;
        count = 0;
        //The only problem with this approach is which triangle do we use?
        //In general there will be several triangles that share the i-th vertex.
        for(unsigned j = 0; j < this->m_VOAAMInverseIA->m_iNbOfTriangles; j++)    // see Figure (11)
        {
            if ( this->m_vTriangle2D[j].HasNode(i) )
            {
                vertexIdxes = this->m_vTriangle2D[j].GetVertexIndexes();

                VO_WarpingPoint::WarpOnePoint(  S0.GetA2DPoint(i),
                                                this->m_vTriangle2D[j], 
                                                tmp,
                                                s.GetA2DPoint(vertexIdxes[0]),
                                                s.GetA2DPoint(vertexIdxes[1]),
                                                s.GetA2DPoint(vertexIdxes[2]) );
                res.x += tmp.x;
                res.y += tmp.y;
                count++;
            }
        }
        // average the result so as to smooth the warp at each vertex
        if(count == 0)
            cerr << "There must be something wrong when CMU Inverse Compositional !" << endl;
        res.x /= count;
        res.y /= count;
        estShape.SetA2DPoint(res, i);
    }
}
Beispiel #10
0
/**
* @brief    Calculate some key lines on the face
* @param    oLine       Output  output those lines
* @param    iShape      Input   the known shape
* @param    iFaceParts  Input   the faceparts
* @param    oSubshape   Output  the output subshape, namely, the line is represented by a VO_Shape
* @param    partIdx     Input   which part is it
* @return   void
 */
void VO_KeyPoint::CalcFaceKeyline(
    std::vector<float>& oLine,
    const VO_Shape& iShape,
    const VO_FaceParts& iFaceParts,
    VO_Shape& oSubshape,
    unsigned int partIdx)
{
    oLine.resize(3);
    int dim = iShape.GetNbOfDim();

    cv::Vec4f line;
    std::vector<unsigned int> linePoints;

    switch(partIdx)
    {
    case VO_FacePart::NOSTRIL:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
        break;
    case VO_FacePart::MOUTHCORNERPOINTS:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::MOUTHCORNERPOINTS).GetIndexes();
        break;
    case VO_FacePart::PITCHAXISLINEPOINTS:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::PITCHAXISLINEPOINTS).GetIndexes();
        break;
    case VO_FacePart::EYECORNERPOINTS:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::EYECORNERPOINTS).GetIndexes();
        break;
    case VO_FacePart::MIDLINEPOINTS:
    default:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::EYECORNERPOINTS).GetIndexes();
        break;
    }
    
    oSubshape = iShape.GetSubShape(linePoints);

    // Explained by JIA Pei, some times, there is no linePoints, which means the specified parts are not in one of the database
    if(linePoints.size() >= 2 )
    {
        cv::fitLine( oSubshape.GetTheShape(), line, CV_DIST_L2, 0, 0.001, 0.001 );

        // Ax+By+C = 0
        oLine[0] = -line[1];
        oLine[1] = line[0];
        oLine[2] = line[1]*line[2]-line[0]*line[3];
    }
}
Beispiel #11
0
/**
 * @author      JIA Pei
 * @version     2010-02-07
 * @brief       Read a file and obtain all annotation data in VO_Shape
 * @param       filename    input parameter, which .pts annotation file to read
 * @param       oShape      output parameter, save annotation data to AAM shape data structure
*/
void CAnnotationDBIO::ReadPTS(  const std::string &filename,
                                VO_Shape& oShape)
{
    oShape.SetAnnotationFileName(filename);

    std::fstream fp;
    fp.open(filename.c_str (), std::ios::in);

    std::string temp, oneLine;
    std::stringstream ss;
    float tempFloat = 0.0f;

    do
    {
        fp >> temp;
    }while (temp!="n_points:");

    fp >> temp;
    ss << temp;
    unsigned int NbOfPoints;
    ss >> NbOfPoints;
    ss.clear();
    oShape.Resize(2, NbOfPoints);

    fp >> temp;

    for (unsigned int i = 0; i < NbOfPoints; i++)
    {
        fp >> temp;
        // x refers to a row from left to right
        ss << temp;
        ss >> tempFloat;
        ss.clear();
        oShape(0, i) = tempFloat;
        fp >> temp;
        // y refers to a col from top to bottom
        ss << temp;
        ss >> tempFloat;
        ss.clear();
        // In sum, topleft is (0,0), right bottom is (720,576)
        oShape(1, i) = tempFloat;
    }

    fp.close ();fp.clear();
}
/**
 * @author      YAO Wei, JIA Pei
 * @version     2010-05-20
 * @brief       Find the best offset for one point
 * @param       iImg        Input - image to be fitted
 * @param       ioShape     Input and output - the input and output shape
 * @param       iShapeInfo  Input - the shape information
 * @param       iLev        Input - current pyramid level
 * @param       PClose      Input - percentage of converged points. Say, 0.9 means if 90% of the points
 *                                  are judged as converged, the iteration of this pyramid can stop
 * @param       epoch       Input - the maximum iteration times
 * @param       profdim     Input - dimension used during fitting. For example, the trained data could be 4D, but the user may only use 1D
 * @note        Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
void VO_FittingASMNDProfiles::PyramidFit(   VO_Shape& ioShape,
                                            const cv::Mat& iImg,
                                            unsigned int iLev,
                                            float PClose,
                                            unsigned int epoch,
                                            unsigned int profdim)
{
    VO_Shape tempShape = ioShape;
    int nGoodLandmarks = 0;
    float PyrScale = pow(2.0f, (float) (iLev-1.0f) );

    const int nQualifyingDisplacements = (int)(this->m_VOASMNDProfile->m_iNbOfPoints * PClose);

    for(unsigned int iter = 0; iter < epoch; iter++)
    {
        this->m_iIteration++;
        // estimate the best ioShape by profile matching the landmarks in this->m_VOFittingShape
        nGoodLandmarks = VO_FittingASMNDProfiles::UpdateShape(  this->m_VOASMNDProfile,
                                                                iImg,
                                                                tempShape,
                                                                this->m_vShape2DInfo,
                                                                this->m_VOASMNDProfile->m_vvMeanNormalizedProfile[iLev],
                                                                this->m_VOASMNDProfile->m_vvvCVMInverseOfSg[iLev],
                                                                3,
                                                                profdim);

        // conform ioShape to the shape model
        this->m_VOASMNDProfile->VO_CalcAllParams4AnyShapeWithConstrain( tempShape,
                                                                        this->m_MatModelAlignedShapeParam,
                                                                        this->m_fScale,
                                                                        this->m_vRotateAngles,
                                                                        this->m_MatCenterOfGravity );
        tempShape.ConstrainShapeInImage(iImg);

        // the fitting result is good enough to stop the iteration
        if(nGoodLandmarks > nQualifyingDisplacements)
            break;
    }
    ioShape = tempShape;
}
/**
* @brief    Calculate face fitting effect
* @param    refShape    - input     reference shape
* @param    fittedShape - input     fitting result
* @param    deviation   - output    what is the deviation from refShape to fittedShape
* @param    ptErrorFreq - output    point error frequency
* @param    nb          - input     how many evaluation levels that is to be used
* @return   whether the fitting is acceptable
*/
void CRecognitionAlgs::CalcShapeFittingEffect(	const VO_Shape& refShape,
												const VO_Shape& fittedShape,
												float& deviation,
												vector<float>& ptErrorFreq,
												int nb,
												vector<float>* ptErrPerPoint)
{
    assert(refShape.GetNbOfDim() == fittedShape.GetNbOfDim());
	assert(refShape.GetNbOfPoints() == fittedShape.GetNbOfPoints());
    unsigned int NbOfShapeDim   = refShape.GetNbOfDim();
    unsigned int NbOfPoints     = refShape.GetNbOfPoints();
	ptErrorFreq.resize(nb);

	vector<float> ptDists(NbOfPoints, 0.0f);
	
	for(unsigned int i = 0; i < NbOfPoints; i++)
	{
		ptDists[i] = 0.0f;
		for(unsigned int j = 0; j < NbOfShapeDim; j++)
		{
			ptDists[i] += pow(refShape.GetAShape(j*NbOfPoints+i) - fittedShape.GetAShape(j*NbOfPoints+i), 2.0f);
		}
		ptDists[i] = sqrt(ptDists[i]);
	}
	
	ptErrorFreq.resize(nb);
	for(int i = 0; i < nb; i++)
	{
		for (unsigned int j = 0; j < NbOfPoints; j++)
		{
			if (ptDists[j] < i)
			{
				ptErrorFreq[i]++;
			}
		}
		ptErrorFreq[i] /= static_cast<float>(NbOfPoints);
	}
	float sumPtDist = 0.0;
	for(unsigned int i = 0; i<NbOfPoints;++i){
		sumPtDist += ptDists[i];
	}
	printf("Avg ptDists = %f\n",sumPtDist/NbOfPoints);

    deviation = CRecognitionAlgs::ShapeDistance(refShape, fittedShape);
	if(ptErrPerPoint != 0){
		(*ptErrPerPoint) = ptDists;
	}
}
Beispiel #14
0
/**
 * @author      JIA Pei
 * @version     2010-02-07
 * @brief       Write all annotation data in VO_Shape to a file
 * @param       filename    output parameter, which .pts annotation file to write
 * @param       iAAMShape   input parameter, save annotation data from AAM shape data structure
*/
void CAnnotationDBIO::WritePTS( const std::string &filename,
                                const VO_Shape& iAAMShape)
{
    std::fstream fp;
    fp.open(filename.c_str (), std::ios::out);

    std::string temp, oneLine;
    std::stringstream ss;
    float tempFloat = 0.0f;
    unsigned int NbOfPoints = iAAMShape.GetNbOfPoints();

    fp << "version: 1" << std::endl
    << "n_points: " << NbOfPoints << std::endl
    << "{" << std::endl;

    for (unsigned int i = 0; i < NbOfPoints; i++)
    {
        fp << iAAMShape.GetA2DPoint(i).x << " " << iAAMShape.GetA2DPoint(i).y << std::endl;
    }

    fp << "}" << std::endl << std::endl;

    fp.close ();
}
Beispiel #15
0
/**
 * @author      JIA Pei
 * @version     2010-02-22
 * @brief       Build wavelet for key points
 * @param       iImg            Input    -- the concerned image
 * @param       theShape        Input    -- the concerned shape
 * @param       ptIdx           Input    -- which point?
 * @param       imgSize         Input    -- the image size
 * @param       mtd             Input    -- LTC method
 * @param       shiftX          Input    -- shift in X direction
 * @param       shiftY          Input    -- shift in Y direction
 * @return      cv::Mat_<float> Output   -- the extracted LTC
 */
void VO_ASMLTCs::VO_LoadLTC4OneAnnotatedPoint(  const cv::Mat& iImg,
                                                const VO_Shape& theShape,
                                                unsigned int ptIdx,
                                                cv::Size imgSize,
                                                VO_Features* vofeatures,
                                                int shiftX,
                                                int shiftY)
{
    cv::Point2f pt                 = theShape.GetA2DPoint(ptIdx);
    pt.x                        += shiftX;
    pt.y                        += shiftY;
    cv::Rect rect                 = VO_ASMLTCs::VO_CalcImagePatchRect(iImg, pt, imgSize);
    cv::Mat imgPatch             = iImg(rect);
    vofeatures->VO_GenerateAllFeatures(imgPatch);
}
Beispiel #16
0
/**
 * @author      JIA Pei
 * @version     2010-05-07
 * @brief       draw a point on the image
 * @param       iShape          Input -- the input shape
 * @param       iAAMModel       Input -- the model
 * @param       ioImg           Input and Output -- the image
 * @return      void
 */
void VO_Fitting2DSM::VO_DrawMesh(const VO_Shape& iShape, const VO_AXM* iModel, cv::Mat& ioImg)
{
    cv::Point iorg,idst;
    std::vector<VO_Edge> edges = iModel->GetEdge();
    unsigned int NbOfEdges = iModel->GetNbOfEdges();

    for (unsigned int i = 0; i < NbOfEdges; i++)
    {
        iorg = cvPointFrom32f( iShape.GetA2DPoint( edges[i].GetIndex1() ) );
        idst = cvPointFrom32f( iShape.GetA2DPoint( edges[i].GetIndex2() ) );
        // Edge
        cv::line( ioImg, iorg, idst, colors[8], 1, 0, 0 );
        // Key points
        cv::circle( ioImg, iorg, 2, colors[0], -1, 8, 0 );
        cv::circle( ioImg, idst, 2, colors[0], -1, 8, 0 );
    }
}
Beispiel #17
0
/**
 * @author      JIA Pei
 * @version     2010-02-22
 * @brief       Build wavelet for key points
 * @param       iImg        Input    -- the concerned image
 * @param       theShape    Input    -- the concerned shape
 * @param       ptIdx       Input    -- which point?
 * @param       imgSize     Input    -- the image size
 * @param       mtd         Input    -- LTC method
 * @return      cv::Mat_<float>     Output    -- the extracted LTC
 */
cv::Mat_<float> VO_AFM::VO_LoadLTC4OneAnnotatedPoint(const cv::Mat& iImg,
                                                        const VO_Shape& theShape,
                                                        unsigned int ptIdx,
                                                        cv::Size imgSize,
                                                        unsigned int mtd)
{
    cv::Mat_<float> resLTC;
    cv::Point2f pt                     = theShape.GetA2DPoint(ptIdx);
    cv::Rect rect                     = this->VO_CalcImagePatchRect(iImg, pt, imgSize);
    cv::Mat imgPatch                 = iImg(rect);

    switch(mtd)
    {
        case VO_Features::LBP:
        {
            // initialize the image before wavelet transform 
            for(unsigned int i = 0; i < rect.height; ++i)
            {
                for(unsigned int j = 0; j < rect.width; ++j)
                {
                    
                }
            }

            bool showWaveletImage =  true;
            if(showWaveletImage)
            {
                imwrite("originalImage.jpg", imgPatch);
    //            this->VO_HardSaveWaveletSingleChannelImage("waveletImage.jpg", waveParamsGray, imgSize);
    //            this->VO_HardSaveWaveletSingleChannelImage("inverseWaveletImage.jpg", waveParamsGray, imgSize);
            }
        }
        default:
        break;
    }
    
    return resLTC;
}
Beispiel #18
0
/**
 * @brief First Estimation of the fitted shape by scaling only
 * @param iShape -- input shape
 * @param rect   -- the rectangle to calculate the scalar
 * @return VO_Shape -- the scaled shape
 */
VO_Shape VO_Fitting2DSM::VO_FirstEstimationByScaling(   const VO_Shape& iShape,
        const cv::Rect& rect )
{
    VO_Shape res = iShape;
    cv::Rect_<float> rect0 = iShape.GetShapeRect();
    float fScaleX = (float)rect.width/rect0.width *0.80;
    float fScaleY = (float)rect.height/rect0.height *0.80;
    res.ScaleX(fScaleX);
    res.ScaleY(fScaleY);
    rect0 = iShape.GetShapeBoundRect();
    cv::Mat_<float> translation = cv::Mat_<float>::zeros(2, 1);
    float centerX = (float)rect.x + (float)rect.width/2.0f;
    float centerY = (float)rect.y + (float)rect.height/2.0f;
    float center0X = (float)rect0.x + (float)rect0.width/2.0f;
    float center0Y = (float)rect0.x + (float)rect0.height/2.0f;
    translation(0,0) = centerX - center0X;
    translation(1,0) = centerY - center0Y;
    res.Translate( translation );
    return res;
}
Beispiel #19
0
/**
 * @brief       Calculate some key points on the face
 * @param       oPoint      output  point list
 * @param       iShape      input   shape
 * @param       iFaceParts  inut    faceparts
 * @param       ptType      input   point type
 * @return      void
 */
void VO_KeyPoint::CalcFaceKeyPoint( cv::Point2f& oPoint,
                                    const VO_Shape& iShape,
                                    const VO_FaceParts& iFaceParts,
                                    unsigned int ptType)
{
    std::vector<unsigned int> facePartsPoints;
    VO_Shape subiShape;
    // Very very very very important.
    // Explained by JIA Pei.
    // "resize()" is just for resize;
    // it doesn't always set what's already inside the the std::vector to "0"
    // Therefore, clear() is a must before resize().

    switch(ptType)
    {
    case CENTEROFGRAVITY:
        if (iShape.GetNbOfPoints() > 0)
            oPoint = iShape.GetA2DPoint( VO_Shape::CENTER);
        break;
    case LEFTEYELEFTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
            }
        }
        break;
    case LEFTEYERIGHTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
            }
        }
        break;
    case LEFTEYECENTER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
            }
        }
        break;
    case RIGHTEYELEFTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
            }
        }
        break;
    case RIGHTEYERIGHTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
            }
        }
        break;
    case RIGHTEYECENTER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
            }
        }
        break;
    case NOSETIPKEY:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSETIP).GetIndexes();    // Just one point
            if (facePartsPoints.size() == 1)
                oPoint = iShape.GetA2DPoint(facePartsPoints[0]);
        }
        break;
    case NOSTRILLEFT:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
            }
        }
        break;
    case NOSTRILRIGHT:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
            }
        }
        break;
    case NOSECENTER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
            }
        }
        break;
    case MOUTHLEFTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LIPOUTERLINE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
            }
        }
        break;
    case MOUTHRIGHTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LIPOUTERLINE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
            }
        }
        break;
    case MOUTHCENTER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LIPOUTERLINE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
            }
        }
        break;
    case EARLOBELEFT:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEAR).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
            }
        }
        break;
    case EARLOBERIGHT:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEAR).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
            }
        }
        break;
    }
}
Beispiel #20
0
/**
 * @author     	JIA Pei
 * @version    	2010-05-20
 * @brief      	Basic AAM Fitting, for dynamic image sequence
 * @param      	iImg			Input - image to be fitted
 * @param      	ioShape         Input and Output - the fitted shape
 * @param      	oImg            Output - the fitted image
 * @param		epoch			Input - the iteration epoch
*/
float VO_FittingAAMBasic::VO_BasicAAMFitting(const Mat& iImg,
											VO_Shape& ioShape,
											Mat& oImg,
											unsigned int epoch)
{
	this->m_VOFittingShape.clone(ioShape);
double t = (double)cvGetTickCount();

    this->SetProcessingImage(iImg, this->m_VOAAMBasic);
    this->m_iIteration = 0;

    // Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity
    this->m_VOAAMBasic->VO_CalcAllParams4AnyShapeWithConstrain(	this->m_VOFittingShape,
																this->m_MatModelAlignedShapeParam,
																this->m_fScale,
																this->m_vRotateAngles,
																this->m_MatCenterOfGravity);
	this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);

	// Get m_MatModelNormalizedTextureParam
    VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape,
												this->m_ImageProcessing,
												this->m_vTriangle2D,
												this->m_vPointWarpInfo,
												this->m_VOFittingTexture );
	// estimate the texture model parameters
    this->m_VOAAMBasic->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam);

    // Calculate m_MatCurrentC
    this->m_VOAAMBasic->VO_SParamTParamProjectToCParam(	this->m_MatModelAlignedShapeParam,
														this->m_MatModelNormalizedTextureParam,
														this->m_MatCurrentC );
    // Set m_MatCurrentT, m_MatDeltaT, m_MatEstimatedT, m_MatDeltaC, m_MatEstimatedC, etc.
	this->m_MatCurrentT 	= Mat_<float>::zeros(this->m_MatCurrentT.size());
	this->m_MatDeltaT 		= Mat_<float>::zeros(this->m_MatDeltaT.size());
	this->m_MatEstimatedT 	= Mat_<float>::zeros(this->m_MatEstimatedT.size());
	this->m_MatDeltaC 		= Mat_<float>::zeros(this->m_MatDeltaC.size());
	this->m_MatEstimatedC 	= Mat_<float>::zeros(this->m_MatEstimatedC.size());
	
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// explained by JIA Pei. 2010-05-20
	// For the first round, this->m_VOFittingShape should not change after calling "VO_CParamTParam2FittingShape"
	// But this is not the case. why?
	// Before calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by 
	// a) assigning m_VOTemplateAlignedShape
	// b) align to the real-size face using detected eyes and mouth
	// c) constrain the shape within the image
	// d) constrain the shape parameters and calculate those rigid transform parameters
	// cout << this->m_VOFittingShape << endl;
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// Estimate m_VOFittingShape and m_VOFittingTexture
	this->VO_CParamTParam2FittingShape(	this->m_MatCurrentC,
										this->m_MatCurrentT,
										this->m_VOModelNormalizedTexture,
										this->m_VOFittingShape,
										this->m_fScale,
										this->m_vRotateAngles,
										this->m_MatCenterOfGravity );
	this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);		// Remember to call ConstrainShapeInImage() whenever you update m_VOFittingShape
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// When calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by
	// a) c parameters to reconstruct shape parameters
	// b) shape parameters to reconstruct shape
	// c) align to the real-size face by global shape normalization
	// cout << this->m_VOFittingShape << endl;
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	
	this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
															this->m_VOFittingShape,
															this->m_VOModelNormalizedTexture,
															this->m_VOTextureError);

    do
    {
		float estScale = this->m_fScale;
		vector<float> estRotateAngles = this->m_vRotateAngles;
		Mat_<float> estCOG = this->m_MatCenterOfGravity.clone();
		bool cBetter 	= false;
		bool poseBetter = false;

        /**First shape parameters, c parameters. refer to equation (9.3)
		* Cootes "Statistical Model of Appearance for Computer Vision" */
        cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRc, -1, Mat(), 0.0, this->m_MatDeltaC, GEMM_2_T);

        // damp -- C
        for(unsigned int i = 0; i < k_values.size(); i++)
        {
            // make damped c prediction
            cv::scaleAdd(this->m_MatDeltaC, k_values[i], this->m_MatCurrentC, this->m_MatEstimatedC);

            // make sure m_MatEstimatedC are constrained
			this->m_VOAAMBasic->VO_AppearanceParameterConstraint(this->m_MatEstimatedC);
			this->VO_CParamTParam2FittingShape(	this->m_MatEstimatedC,
												this->m_MatCurrentT,
												this->m_VOModelNormalizedTexture,
												this->m_VOEstimatedShape,
												estScale,
												estRotateAngles,
												estCOG);
			if ( !VO_ShapeModel::VO_IsShapeInsideImage(this->m_VOEstimatedShape, this->m_ImageProcessing) )
				continue;
			else
				this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
													this->m_VOEstimatedShape,
													this->m_VOModelNormalizedTexture,
													this->m_VOEstimatedTextureError);

            if (this->m_E < this->m_E_previous)
            {
                this->m_MatEstimatedC.copyTo(this->m_MatCurrentC);
				this->m_VOFittingShape.clone(this->m_VOEstimatedShape);
                this->m_VOTextureError.clone(this->m_VOEstimatedTextureError);
                this->m_E_previous = this->m_E;
                cBetter = true;
				this->m_fScale = estScale;
				this->m_vRotateAngles = estRotateAngles;
				this->m_MatCenterOfGravity = estCOG.clone();
                break;
            }
        }

		/** Second pose, t parameters. refer to equation (9.3)
		* Cootes "Statistical Model of Appearance for Computer Vision" */
        cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRt, -1, Mat(), 0, this->m_MatDeltaT, GEMM_2_T);

        // damp -- T
        for(unsigned int i = 0; i < k_values.size(); i++)
        {
            // make damped c/pose prediction
            cv::scaleAdd(this->m_MatDeltaT, k_values[i], this->m_MatCurrentT, this->m_MatEstimatedT);
			this->VO_CParamTParam2FittingShape(	this->m_MatCurrentC,
												this->m_MatEstimatedT,
												this->m_VOModelNormalizedTexture,
												this->m_VOEstimatedShape,
												estScale,
												estRotateAngles,
												estCOG);
			if ( !VO_ShapeModel::VO_IsShapeInsideImage(this->m_VOEstimatedShape, this->m_ImageProcessing) )
				continue;
			else
				this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
													this->m_VOEstimatedShape,
													this->m_VOModelNormalizedTexture,
													this->m_VOEstimatedTextureError);

            if (this->m_E < this->m_E_previous)
            {
				// Since m_fScale, m_vRotateAngles and m_MatCenterOfGravity have been updated,
				// m_MatCurrentT should be assigned to 0 now!
				this->m_MatCurrentT = Mat_<float>::zeros(this->m_MatCurrentT.size());
				//                this->m_MatEstimatedT.copyTo(this->m_MatCurrentT);
				this->m_VOFittingShape.clone(this->m_VOEstimatedShape);
				this->m_VOTextureError.clone(this->m_VOEstimatedTextureError);
                this->m_E_previous = this->m_E;
                poseBetter = true;
				this->m_fScale = estScale;
				this->m_vRotateAngles = estRotateAngles;
				this->m_MatCenterOfGravity = estCOG.clone();
                break;
            }
        }

        if( cBetter || poseBetter)
        {
			ioShape.clone(this->m_VOFittingShape);
        }
        else
        break;

        ++this->m_iIteration;

    }while( ( fabs(this->m_E) > FLT_EPSILON ) && (this->m_iIteration < epoch)/* && (cv::norm(this->m_MatDeltaC) > FLT_EPSILON) */ );

	t = ((double)cvGetTickCount() -  t )/  (cvGetTickFrequency()*1000.);
	cout << "Basic fitting time cost: " << t << " millisec" << endl;
this->m_fFittingTime = t;

	VO_Fitting2DSM::VO_DrawMesh(ioShape, this->m_VOAAMBasic, oImg);

	return t;
}
Beispiel #21
0
/**
 * @author      JIA Pei
 * @version     2010-05-20
 * @brief       CMU ICIA AAM Fitting, for dynamic image sequence
 * @param       iImg            Input - image to be fitted
 * @param       ioShape         Input and Output - the fitted shape
 * @param       oImg            Output - the fitted image
 * @param       epoch           Input - the iteration epoch
*/
float VO_FittingAAMInverseIA::VO_ICIAAAMFitting(const Mat& iImg,
                                                VO_Shape& ioShape,
                                                Mat& oImg,
                                                unsigned int epoch)
{
    this->m_VOFittingShape.clone(ioShape);
    this->m_VOEstimatedShape.clone(this->m_VOFittingShape);
double t = (double)cvGetTickCount();

    this->SetProcessingImage(iImg, this->m_VOAAMInverseIA);
    this->m_iIteration = 0;

    // Get m_MatCurrentP and m_MatCurrentQ
    this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
                                                                    this->m_MatCurrentP,
                                                                    this->m_fScale,
                                                                    this->m_vRotateAngles,
                                                                    this->m_MatCenterOfGravity);
    this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);

    this->m_MatDeltaP       = Mat_<float>::zeros(this->m_MatDeltaP.size());
    this->m_MatDeltaQ       = Mat_<float>::zeros(this->m_MatDeltaQ.size());
    this->m_MatCurrentQ     = Mat_<float>::zeros(this->m_MatCurrentQ.size());
    this->m_MatDeltaPQ      = Mat_<float>::zeros(this->m_MatDeltaPQ.size());

    // Step (1) Warp I with W(x;p) followed by N(x;q) to compute I(N(W(x;p);q))
    this->VO_PParamQParam2FittingShape( this->m_MatCurrentP,
                                        this->m_MatCurrentQ,
                                        this->m_VOFittingShape,
                                        this->m_fScale,
                                        this->m_vRotateAngles,
                                        this->m_MatCenterOfGravity );
    this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);

    // Step (2) Compute the error image I(N(W(x;p);q))-A0(x)
    this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
                                                            this->m_VOFittingShape,
                                                            this->m_VOTemplateNormalizedTexture,
                                                            this->m_VOTextureError);

    do
    {
        ++this->m_iIteration;

        // Step (7) -- a bit modification
        cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMInverseIA->m_MatICIAPreMatrix, -1, Mat(), 0, this->m_MatDeltaPQ, GEMM_2_T);

        // Step (8) -- a bit modification. Get DeltaP DeltaQ respectively
        this->m_MatDeltaQ = this->m_MatDeltaPQ(Rect( 0, 0, this->m_MatDeltaQ.cols, 1));
        this->m_MatDeltaP = this->m_MatDeltaPQ(Rect( this->m_MatDeltaQ.cols, 0, this->m_MatDeltaP.cols, 1));

        // Step (9) -- CMU Inverse Compositional
        this->VO_CMUInverseCompositional( this->m_MatDeltaP, this->m_MatDeltaQ, this->m_VOFittingShape, this->m_VOEstimatedShape );
        
        // Ensure Inverse Compositional still satisfies global shape constraints
        this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOEstimatedShape,
                                                                        this->m_MatEstimatedP,
                                                                        this->m_fScale,
                                                                        this->m_vRotateAngles,
                                                                        this->m_MatCenterOfGravity);
        this->m_VOEstimatedShape.ConstrainShapeInImage(this->m_ImageProcessing);

        this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
                                            this->m_VOEstimatedShape,
                                            this->m_VOTemplateNormalizedTexture,
                                            this->m_VOEstimatedTextureError);

        if (this->m_E < this->m_E_previous)
        {
            // Unlike what's happening in Basic AAM, 
            // since m_fScale, m_vRotateAngles and m_MatCenterOfGravity have not been updated in ICIA,
            // m_MatCurrentT should not be assigned to 0 now!
//            this->m_MatCurrentQ = Mat_<float>::zeros(this->m_MatCurrentQ.size());
            this->m_VOFittingShape.clone(this->m_VOEstimatedShape);
            this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
                                                                            this->m_MatCurrentP,
                                                                            this->m_fScale,
                                                                            this->m_vRotateAngles,
                                                                            this->m_MatCenterOfGravity);
            this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);
            this->m_VOTextureError.clone(this->m_VOEstimatedTextureError);
            this->m_E_previous = this->m_E;

        }
        else
            break;

    }while( ( fabs(this->m_E) > FLT_EPSILON ) && ( this->m_iIteration < epoch ) );

    VO_Fitting2DSM::VO_DrawMesh(this->m_VOFittingShape, this->m_VOAAMInverseIA, oImg);

    // Recalculate all parameters finally, this is also optional.
    this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
                                                                    this->m_MatCurrentP,
                                                                    this->m_fScale,
                                                                    this->m_vRotateAngles,
                                                                    this->m_MatCenterOfGravity );

    // Step (10) (Option step), Post-computation. Get m_MatModelNormalizedTextureParam
    VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape,
                                                this->m_ImageProcessing,
                                                this->m_vTriangle2D,
                                                this->m_vPointWarpInfo,
                                                this->m_VOFittingTexture );
    // estimate the texture model parameters
    this->m_VOAAMInverseIA->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam);
    ioShape.clone(this->m_VOFittingShape);

t = ((double)cvGetTickCount() -  t )/  (cvGetTickFrequency()*1000.);
cout << "ICIA AAM fitting time cost: " << t << " millisec" << endl;
this->m_fFittingTime = t;

    return t;
}
/**
 * @author      JIA Pei, YAO Wei
 * @version     2010-05-20
 * @brief       Additive ASM ND Profiles Fitting, for dynamic image sequence
 * @param       iImg            Input - image to be fitted
 * @param       ioShape         Input and output - the shape
 * @param       oImg            Output - the fitted image
 * @param       dim             Input - profile dimension, 1, 2, 4 or 8
 * @param       epoch           Input - the iteration epoch
 * @param       pyramidlevel    Input - pyramid level, 1, 2, 3 or 4 at most
 * @note        Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
float VO_FittingASMNDProfiles::VO_ASMNDProfileFitting(  const cv::Mat& iImg,
                                                        VO_Shape& ioShape,
                                                        cv::Mat& oImg,
                                                        unsigned int epoch,
                                                        unsigned int pyramidlevel,
                                                        unsigned int dim)
{
    this->m_VOFittingShape.clone(ioShape);
double t = (double)cv::getTickCount();

    this->m_iNbOfPyramidLevels = pyramidlevel;
    this->SetProcessingImage(iImg, this->m_VOASMNDProfile);
    this->m_iIteration = 0;

    // Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity
    this->m_VOASMNDProfile->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
                                                                    this->m_MatModelAlignedShapeParam,
                                                                    this->m_fScale,
                                                                    this->m_vRotateAngles,
                                                                    this->m_MatCenterOfGravity);
    this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);

    // Explained by YAO Wei, 2008-2-9.
    // Scale this->m_VOFittingShape, so face width is a constant StdFaceWidth.
    //this->m_fScale2 = this->m_VOASMNDProfile->m_VOReferenceShape.GetWidth() / this->m_VOFittingShape.GetWidth();
    this->m_fScale2 = this->m_VOASMNDProfile->m_VOReferenceShape.GetCentralizedShapeSize() / this->m_VOFittingShape.GetCentralizedShapeSize();
    this->m_VOFittingShape *= this->m_fScale2;

    int w = (int)(iImg.cols*this->m_fScale2);
    int h = (int)(iImg.rows*this->m_fScale2);
    cv::Mat SearchImage = cv::Mat(cv::Size( w, h ), this->m_ImageProcessing.type(), this->m_ImageProcessing.channels() );

    float PyrScale = pow(2.0f, (float) (this->m_iNbOfPyramidLevels-1.0f) );
    this->m_VOFittingShape /= PyrScale;

    const int nQualifyingDisplacements = (int)(this->m_VOASMNDProfile->m_iNbOfPoints * VO_Fitting2DSM::pClose);

    // for each level in the image pyramid
    for (int iLev = this->m_iNbOfPyramidLevels-1; iLev >= 0; iLev--)
    {
        // Set image roi, instead of cvCreateImage a new image to speed up
        cv::Mat siROI = SearchImage(cv::Rect(0, 0, (int)(w/PyrScale), (int)(h/PyrScale) ) );
        cv::resize(this->m_ImageProcessing, siROI, siROI.size());

        this->m_VOEstimatedShape = this->m_VOFittingShape;
        this->PyramidFit(   this->m_VOEstimatedShape,
                            SearchImage,
                            iLev,
                            VO_Fitting2DSM::pClose,
                            epoch,
                            dim);
        this->m_VOFittingShape = this->m_VOEstimatedShape;

        if (iLev != 0)
        {
            PyrScale /= 2.0f;
            this->m_VOFittingShape *= 2.0f;
        }
    }

    // Explained by YAO Wei, 2008-02-09.
    // this->m_fScale2 back to original size
    this->m_VOFittingShape /= this->m_fScale2;

    ioShape.clone(this->m_VOFittingShape);
    VO_Fitting2DSM::VO_DrawMesh(ioShape, this->m_VOASMNDProfile, oImg);

t = ((double)cv::getTickCount() -  t )/  (cv::getTickFrequency()*1000.);
printf("MRASM fitting time cost: %.2f millisec\n", t);
this->m_fFittingTime = t;

    return t;
}
// Refer to my PhD thesis, chapter 4
float CRecognitionAlgs::CalcFacePitch(  const VO_Shape& iShape,
                                        const VO_FaceParts& iFaceParts)
{
    float pitch = 0.0f;
    int dim = iShape.GetNbOfDim();
    float NNQ, ENQ, EQ, NO;

    // Theoretically, using eye corner is correct, but it's not quite stable at all. It's better we use two nostrils first if nostirl is defined in faceparts
    ///////////////////////////////////////////////////////////////////////////////
    //     unsigned int nosetipBottom = 0;
    //     vector<unsigned int> nosePoints             = iFaceParts.GetNose().GetIndexes();
    //     vector<unsigned int> midlinePoints         = iFaceParts.GetMidlinePoints().GetIndexes();
    //     vector<unsigned int> pitchAxisPoints    = iFaceParts.GetPitchAxisLinePoints().GetIndexes();
    //     VO_Shape nose, midLine, pitchAxis;
    //     nose.SetDim(dim);
    //     midLine.SetDim(dim);
    //     pitchAxis.SetDim(dim);
    //     nose.SetSize( nosePoints.size()*dim );
    //     midLine.SetSize( midlinePoints.size()*dim );
    //     pitchAxis.SetSize(pitchAxisPoints.size()*dim );
    // 
    //     for(unsigned int i = 0; i < nosePoints.size(); ++i)
    //     {
    //         for(unsigned int j = 0; j < midlinePoints.size(); ++j)
    //         {
    //             if(nosePoints[i] == midlinePoints[j])
    //             {
    //                 nosetipBottom = nosePoints[i];
    //                 break;
    //             }
    //         }
    //     }
    // 
    //     Point2f ntPoint  = Point2f(iShape.GetAShape(dim*nosetipBottom), iShape.GetAShape(dim*nosetipBottom+1));
    //     Point2f paPoint1 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[0]), iShape.GetAShape(dim*pitchAxisPoints[0]+1));
    //     Point2f paPoint2 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[1]), iShape.GetAShape(dim*pitchAxisPoints[1]+1));
    // 
    //     float NNQ = ( (ntPoint.y - paPoint1.y) + (ntPoint.y - paPoint2.y) ) / 2.0f;
    //     float ENQ = fabs(ntPoint.x - paPoint1.x) > fabs(paPoint2.x - ntPoint.x) ? fabs(ntPoint.x - paPoint1.x) : fabs(paPoint2.x - ntPoint.x);
    //     float EQ = sqrt(ENQ*ENQ + NNQ*NNQ);
    //     float NO = sqrt(2.0f)/2.0f*EQ;
    ///////////////////////////////////////////////////////////////////////////////

    vector<unsigned int> nostrilPoints          = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
    if(nostrilPoints.size() != 0)
    {
        vector<unsigned int> pitchAxisPoints    = iFaceParts.VO_GetOneFacePart(VO_FacePart::PITCHAXISLINEPOINTS).GetIndexes();

        Point2f ntPoint1 = Point2f(iShape.GetAShape(dim*nostrilPoints[0]), iShape.GetAShape(dim*nostrilPoints[0]+1));
        Point2f ntPoint2 = Point2f(iShape.GetAShape(dim*nostrilPoints[1]), iShape.GetAShape(dim*nostrilPoints[1]+1));
        Point2f paPoint1 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[0]), iShape.GetAShape(dim*pitchAxisPoints[0]+1));
        Point2f paPoint2 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[1]), iShape.GetAShape(dim*pitchAxisPoints[1]+1));

        NNQ = ( (ntPoint1.y - paPoint1.y) + (ntPoint2.y - paPoint2.y) ) / 2.0f;
        ENQ = fabs(ntPoint1.x - paPoint1.x) > fabs(paPoint2.x - ntPoint2.x) ? fabs(ntPoint1.x - paPoint1.x + (ntPoint2.x - ntPoint1.x) / 2.0f) : fabs(paPoint2.x - ntPoint2.x + (ntPoint2.x - ntPoint1.x) / 2.0f);
        EQ = sqrt(ENQ*ENQ + NNQ*NNQ);
        NO = sqrt(2.0f)/2.0f*EQ;
    }
    else
    {
        unsigned int nosetipBottom = 0;
        vector<unsigned int> nosePoints         = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSE).GetIndexes();
        vector<unsigned int> midlinePoints      = iFaceParts.VO_GetOneFacePart(VO_FacePart::MIDLINEPOINTS).GetIndexes();
        vector<unsigned int> pitchAxisPoints    = iFaceParts.VO_GetOneFacePart(VO_FacePart::PITCHAXISLINEPOINTS).GetIndexes();

        for(unsigned int i = 0; i < nosePoints.size(); ++i)
        {
            for(unsigned int j = 0; j < midlinePoints.size(); ++j)
            {
                if(nosePoints[i] == midlinePoints[j])
                {
                    nosetipBottom = nosePoints[i];
                    break;
                }
            }
        }

        Point2f ntPoint  = Point2f(iShape.GetAShape(dim*nosetipBottom), iShape.GetAShape(dim*nosetipBottom+1));
        Point2f paPoint1 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[0]), iShape.GetAShape(dim*pitchAxisPoints[0]+1));
        Point2f paPoint2 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[1]), iShape.GetAShape(dim*pitchAxisPoints[1]+1));

        NNQ = ( (ntPoint.y - paPoint1.y) + (ntPoint.y - paPoint2.y) ) / 2.0f;
        ENQ = fabs(ntPoint.x - paPoint1.x) > fabs(paPoint2.x - ntPoint.x) ? fabs(ntPoint.x - paPoint1.x) : fabs(paPoint2.x - ntPoint.x);
        EQ = sqrt(ENQ*ENQ + NNQ*NNQ);
        NO = sqrt(2.0f)/2.0f*EQ;
    }

    if( fabs(NNQ/NO) < 1.0f)
        pitch = asin ( NNQ / NO ) * safeDoubleToFloat(180.0 / CV_PI);
    else if (NNQ * NO < 0.0f)
        pitch = -90.0f;
    else
        pitch = 90.0f;

    return pitch;
}
// Estimate face absolute orientations
vector<float> CRecognitionAlgs::CalcAbsoluteOrientations(
    const VO_Shape& iShape2D,
    const VO_Shape& iShape3D,
    VO_Shape& oShape2D)
{
    assert (iShape2D.GetNbOfPoints() == iShape3D.GetNbOfPoints() );
    unsigned int NbOfPoints = iShape3D.GetNbOfPoints();
    Point3f pt3d;
    Point2f pt2d;
    float height1 = iShape2D.GetHeight();
    float height2 = iShape3D.GetHeight();
    VO_Shape tempShape2D = iShape2D;
    tempShape2D.Scale(height2/height1);

    //Create the model points
    std::vector<CvPoint3D32f> modelPoints;
    for(unsigned int i = 0; i < NbOfPoints; ++i)
    {
        pt3d = iShape3D.GetA3DPoint(i);
        modelPoints.push_back(cvPoint3D32f(pt3d.x, pt3d.y, pt3d.z));
    }

    //Create the image points
    std::vector<CvPoint2D32f> srcImagePoints;
    for(unsigned int i = 0; i < NbOfPoints; ++i)
    {
        pt2d = tempShape2D.GetA2DPoint(i);
        srcImagePoints.push_back(cvPoint2D32f(pt2d.x, pt2d.y));
    }

    //Create the POSIT object with the model points
    CvPOSITObject *positObject = cvCreatePOSITObject( &modelPoints[0], NbOfPoints );

    //Estimate the pose
    CvMatr32f rotation_matrix = new float[9];
    CvVect32f translation_vector = new float[3];
    CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
    cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );

    //rotation_matrix to Euler angles, refer to VO_Shape::GetRotation
    float sin_beta  = -rotation_matrix[0 * 3 + 2];
    float tan_alpha = rotation_matrix[1 * 3 + 2] / rotation_matrix[2 * 3 + 2];
    float tan_gamma = rotation_matrix[0 * 3 + 1] / rotation_matrix[0 * 3 + 0];

    //Project the model points with the estimated pose
    oShape2D = tempShape2D;
    for ( unsigned int i=0; i < NbOfPoints; ++i )
    {
        pt3d.x = rotation_matrix[0] * modelPoints[i].x +
            rotation_matrix[1] * modelPoints[i].y +
            rotation_matrix[2] * modelPoints[i].z +
            translation_vector[0];
        pt3d.y = rotation_matrix[3] * modelPoints[i].x +
            rotation_matrix[4] * modelPoints[i].y +
            rotation_matrix[5] * modelPoints[i].z +
            translation_vector[1];
        pt3d.z = rotation_matrix[6] * modelPoints[i].x +
            rotation_matrix[7] * modelPoints[i].y +
            rotation_matrix[8] * modelPoints[i].z +
            translation_vector[2];
        if ( pt3d.z != 0 )
        {
            pt2d.x = FOCAL_LENGTH * pt3d.x / pt3d.z;
            pt2d.y = FOCAL_LENGTH * pt3d.y / pt3d.z;
        }
        oShape2D.SetA2DPoint(pt2d, i);
    }

    //return Euler angles
    vector<float> pos(3);
    pos[0] = atan(tan_alpha);    // yaw
    pos[1] = asin(sin_beta);     // pitch
    pos[2] = atan(tan_gamma);    // roll
    return pos;
}
/**
 * @param	fd					- input		folder name
 * @param	fnIdx				- input		fitting result
 * @param	deviation			- input		what is the deviation from refShape to fittedShape
 * @param	ptErrorFreq			- input		for curve to display frequency -- point distance
 * @param	fittedShape			- input		fitting result
 * @param	gt_cp				- input		ground truth canidate points
 * @param	t_cp				- input		tested canidate points (l eye, r eye, mouth)
 * @return	whether the fitting is acceptable
 */
void CRecognitionAlgs::SaveFittingResults(		const string& fd,
												const string& fnIdx,
												float deviation,
												vector<float>& ptDists,
												vector<float>& ptErrorFreq,
												const VO_Shape& fittedShape,
												cv::Point2f* gt_cP,
												cv::Point2f* t_cP,
												float fitTime)
{
    string fn;
    fn = fd + "/" + fnIdx + ".res";
    
    fstream fp;
    fp.open(fn.c_str (), ios::out);

	fp << "Error per point -- Distance from ground truth" << endl;
	for(unsigned int i = 0; i < ptDists.size(); ++i){
		fp << ptDists[i] << endl;
	}
	fp << endl;

	fp << "Total landmark error" << endl;
	float errSum = std::accumulate(ptDists.begin(),ptDists.end(),0.0f);
	fp << errSum << endl;
	fp << "Average landmark distance" << endl;
	fp << errSum / ptDists.size() << endl;
	fp << "Candidate point error (Left eye, Right eye, Mouth)" << endl;
	//messy distance, too lazy
	float le_dist = sqrt(pow(gt_cP[0].x - t_cP[0].x,2) + pow(gt_cP[0].y - t_cP[0].y,2));
	float re_dist = sqrt(pow(gt_cP[1].x - t_cP[1].x,2) + pow(gt_cP[1].y - t_cP[1].y,2));
	float m_dist = sqrt(pow(gt_cP[2].x - t_cP[2].x,2) + pow(gt_cP[2].y - t_cP[2].y,2));

	fp << le_dist << endl;
	fp << re_dist << endl;
	fp << m_dist << endl;
	fp << endl;
	fp << "Fitting time" << endl;
	fp << fitTime << endl;
	fp << endl;

    fp << "Total deviation" << endl << deviation << endl;				// deviation
    fp << "Point error -- Frequency" << endl;
    for(unsigned int i = 0; i < ptErrorFreq.size(); i++)
    {
        fp << ptErrorFreq[i] << " ";
    }
	fp << endl;
	fp << endl;
	fp << "Canidate points" << endl;
	fp << t_cP[0].x << " " << t_cP[0].y << endl;
	fp << t_cP[1].x << " " << t_cP[1].y << endl;
	fp << t_cP[2].x << " " << t_cP[2].y << endl;
	fp << "Fitted points" << endl;
	//output actual points along with error frequency
	unsigned int NbOfShapeDim   = fittedShape.GetNbOfDim();
	unsigned int NbOfPoints     = fittedShape.GetNbOfPoints();
	for(unsigned int i = 0; i < NbOfPoints; i++)
	{
		for(unsigned int j = 0; j < NbOfShapeDim; j++)
		{
			fp << fittedShape.GetAShape(j*NbOfPoints+i) << " ";
		}
		fp << endl;
	}
    fp << endl;
	
    fp.close();fp.clear();
}
/**
 * @author      YAO Wei, JIA Pei
 * @version     2010-05-20
 * @brief       Find the best offset for one point
 * @param       asmmodel        Input - the ASM model
 * @param       iImg            Input - image to be fitted
 * @param       ioShape         Input and output - the input and output shape
 * @param       iShapeInfo      Input - the shape information
 * @param       iMean           Input - mean profile
 * @param       iCovInverse     Input - covariance inverse
 * @param       Lev             Input - current pyramid level
 * @param       offSetTolerance Input - offset tolerance, which is used to determine whether this point is convergede or not
 * @param       profdim         Input - specify the dimension that is going to be used when updating shape.
 *                              Sometimes, the trained data is of 4D profiles, but the user may only use 1D to test.
 * @note        Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
int VO_FittingASMNDProfiles::UpdateShape(   const VO_ASMNDProfiles* asmmodel,
                                            const cv::Mat& iImg,
                                            VO_Shape& ioShape,
                                            const std::vector<VO_Shape2DInfo>& iShapeInfo,
                                            const std::vector< VO_Profile >& iMean,
                                            const std::vector< std::vector< cv::Mat_<float> > >& iCovInverse,
                                            unsigned int offSetTolerance,
                                            unsigned int profdim)
{
    int nGoodLandmarks = 0;
    std::vector<int> nBestOffset(profdim, 0);
    unsigned int NbOfPoints     = ioShape.GetNbOfPoints();
    unsigned int NbOfShapeDim   = ioShape.GetNbOfDim();
    unsigned int ProfileLength    = iMean[0].GetProfileLength();
    //std::vector<float> dists(NbOfPoints, 0.0f);
    cv::Point2f pt;

    // Take care of the 1st direction first.
    for (unsigned int i = 0; i < NbOfPoints; i++)
    {
        /////////////////////////////////////////////////////////////////////////////
        ///Calculate profile norm direction//////////////////////////////////////////
        /** Here, this is not compatible with 3D */
        cv::Point2f PrevPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetFrom() );
        cv::Point2f ThisPoint = ioShape.GetA2DPoint ( i );
        cv::Point2f NextPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetTo() );

        float deltaX, deltaY;
        float normX, normY;
        float sqrtsum;
        float bestXOffset, bestYOffset;

        // left side (connected from side)
        deltaX = ThisPoint.x - PrevPoint.x;
        deltaY = ThisPoint.y - PrevPoint.y;
        sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
        if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
        deltaX /= sqrtsum; deltaY /= sqrtsum;         // Normalize
        // Firstly, normX normY record left side norm.
        normX = -deltaY;
        normY = deltaX;

        // right side (connected to side)
        deltaX = NextPoint.x - ThisPoint.x;
        deltaY = NextPoint.y - ThisPoint.y;
        sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
        if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
        deltaX /= sqrtsum; deltaY /= sqrtsum;         // Normalize
        // Secondly, normX normY will average both left side and right side norm.
        normX += -deltaY;
        normY += deltaX;

        // Average left right side
        sqrtsum = sqrt ( normX*normX + normY*normY );
        if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
        normX /= sqrtsum;
        normY /= sqrtsum;                             // Final Normalize
        /////////////////////////////////////////////////////////////////////////////

        nBestOffset[0] = VO_FittingASMNDProfiles::VO_FindBestMatchingProfile1D( iImg,
                                                                                ThisPoint,
                                                                                iMean[i].Get1DimProfile(0),
                                                                                iCovInverse[i][0],
                                                                                ProfileLength,
                                                                                offSetTolerance,
                                                                                normX,
                                                                                normY);

        // set OutShape(iPoint) to best offset from current position
        // one dimensional profile: must move point along the whisker
        bestXOffset = nBestOffset[0] * normX;
        bestYOffset = nBestOffset[0] * normY;
        pt.x = ThisPoint.x + bestXOffset;
        pt.y = ThisPoint.y + bestYOffset;
        ioShape.SetA2DPoint(pt, i);
        //dists[i] = sqrt( pow( (double)bestXOffset, 2.0) + pow( (double)bestYOffset, 2.0) );

        //if (abs(nBestOffset[0]) <= offSetTolerance/2)
        if(profdim == 1)
        {
            if (abs(nBestOffset[0]) <= 1)
                nGoodLandmarks++;
        }
    }
    
    // Originality from JIA Pei!! Now, take care of the 2nd direction now.
    if(profdim == 2)
    {
        for (unsigned int i = 0; i < NbOfPoints; i++)
        {
            /////////////////////////////////////////////////////////////////////////////
            ///Calculate profile norm direction//////////////////////////////////////////
            /** Here, this is not compatible with 3D */
            cv::Point2f PrevPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetFrom() );
            cv::Point2f ThisPoint = ioShape.GetA2DPoint ( i );
            cv::Point2f NextPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetTo() );

            float deltaX, deltaY;
            float normX, normY;
            float tangentX, tangentY;
            float sqrtsum;
            float bestXOffset, bestYOffset;

            // left side (connected from side)
            deltaX = ThisPoint.x - PrevPoint.x;
            deltaY = ThisPoint.y - PrevPoint.y;
            sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
            if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
            deltaX /= sqrtsum; deltaY /= sqrtsum;         // Normalize
            // Firstly, normX normY record left side norm.
            normX = -deltaY;
            normY = deltaX;

            // right side (connected to side)
            deltaX = NextPoint.x - ThisPoint.x;
            deltaY = NextPoint.y - ThisPoint.y;
            sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
            if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
            deltaX /= sqrtsum; deltaY /= sqrtsum;         // Normalize
            // Secondly, normX normY will average both left side and right side norm.
            normX += -deltaY;
            normY += deltaX;

            // Average left right side
            sqrtsum = sqrt ( normX*normX + normY*normY );
            if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
            normX /= sqrtsum;
            normY /= sqrtsum;                             // Final Normalize
            tangentX     =     -normY;
            tangentY    =    normX;                        // Final tangent
            /////////////////////////////////////////////////////////////////////////////

            nBestOffset[1] = VO_FittingASMNDProfiles::VO_FindBestMatchingProfile1D( iImg,
                                                                                    ThisPoint,
                                                                                    iMean[i].Get1DimProfile(1),
                                                                                    iCovInverse[i][1],
                                                                                    ProfileLength,
                                                                                    1,    // in tangent direction, offset = 1
                                                                                    tangentX,
                                                                                    tangentY);

            // set OutShape(iPoint) to best offset from current position
            // one dimensional profile: must move point along the whisker
            bestXOffset = nBestOffset[1] * tangentX;
            bestYOffset = nBestOffset[1] * tangentY;
            pt.x = ThisPoint.x + bestXOffset;
            pt.y = ThisPoint.y + bestYOffset;
            ioShape.SetA2DPoint(pt, i);
            //dists[i] += sqrt( pow((double)bestXOffset, 2.0) + pow((double)bestYOffset, 2.0) );

            //if (abs(nBestOffset) <= offSetTolerance/2)
            if (abs(nBestOffset[0]) <= 1 && abs(nBestOffset[1]) <= 1)
                nGoodLandmarks++;
        }
    }

    return nGoodLandmarks;
}
/**
* @brief    whether the tracked shape is really a face?
*           If we can detect both eyes and mouth
*           according to some prior knowledge due to its shape,
*           we may regard this shape correctly describe a face.
* @param    iImg        - input     input image
* @param    iShape      - input     the current tracked shape
* @param    iShapeInfo  - input     shape info
* @param    iFaceParts  - input     face parts
* @return   bool    whether the tracked shape is acceptable?
*/
bool CRecognitionAlgs::EvaluateFaceTrackedByCascadeDetection(
    const CFaceDetectionAlgs* fd,
    const Mat& iImg,
    const VO_Shape& iShape,
    const vector<VO_Shape2DInfo>& iShapeInfo, 
    const VO_FaceParts& iFaceParts)
{
    double t = (double)cvGetTickCount();

    unsigned int ImgWidth       = iImg.cols;
    unsigned int ImgHeight      = iImg.rows;

    vector<unsigned int> leftEyePoints      = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
    vector<unsigned int> rightEyePoints     = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
    vector<unsigned int> lipOuterLinerPoints= iFaceParts.VO_GetOneFacePart(VO_FacePart::LIPOUTERLINE).GetIndexes();

    VO_Shape leftEyeShape       = iShape.GetSubShape(leftEyePoints);
    VO_Shape rightEyeShape      = iShape.GetSubShape(rightEyePoints);
    VO_Shape lipOuterLinerShape = iShape.GetSubShape(lipOuterLinerPoints);

    float dolEye = 12.0f;
    float dolMouth = 12.0f;

    unsigned int possibleLeftEyeMinX    = 0.0f > (leftEyeShape.MinX() - dolEye) ? 0: (int)(leftEyeShape.MinX() - dolEye);
    unsigned int possibleLeftEyeMinY    = 0.0f > (leftEyeShape.MinY() - dolEye) ? 0: (int)(leftEyeShape.MinY() - dolEye);
    unsigned int possibleLeftEyeMaxX    = (leftEyeShape.MaxX() + dolEye) > ImgWidth ? ImgWidth : (int)(leftEyeShape.MaxX() + dolEye);
    unsigned int possibleLeftEyeMaxY    = (leftEyeShape.MaxY() + dolEye) > ImgHeight ? ImgHeight : (int)(leftEyeShape.MaxY() + dolEye);
    unsigned int possibleLeftEyeWidth   = possibleLeftEyeMaxX - possibleLeftEyeMinX;
    unsigned int possibleLeftEyeHeight  = possibleLeftEyeMaxY - possibleLeftEyeMinY;
    unsigned int possibleRightEyeMinX   = 0.0f > (rightEyeShape.MinX() - dolEye) ? 0: (int)(rightEyeShape.MinX() - dolEye);
    unsigned int possibleRightEyeMinY   = 0.0f > (rightEyeShape.MinY() - dolEye) ? 0: (int)(rightEyeShape.MinY() - dolEye);
    unsigned int possibleRightEyeMaxX   = (rightEyeShape.MaxX() + dolEye) > ImgWidth ? ImgWidth : (int)(rightEyeShape.MaxX() + dolEye);
    unsigned int possibleRightEyeMaxY   = (rightEyeShape.MaxY() + dolEye) > ImgHeight ? ImgHeight : (int)(rightEyeShape.MaxY() + dolEye);
    unsigned int possibleRightEyeWidth  = possibleRightEyeMaxX - possibleRightEyeMinX;
    unsigned int possibleRightEyeHeight = possibleRightEyeMaxY - possibleRightEyeMinY;
    unsigned int possibleMouthMinX      = 0.0f > (lipOuterLinerShape.MinX() - dolMouth) ? 0: (int)(lipOuterLinerShape.MinX() - dolMouth);
    unsigned int possibleMouthMinY      = 0.0f > (lipOuterLinerShape.MinY() - dolMouth) ? 0: (int)(lipOuterLinerShape.MinY() - dolMouth);
    unsigned int possibleMouthMaxX      = (lipOuterLinerShape.MaxX() + dolMouth) > ImgWidth ? ImgWidth : (int)(lipOuterLinerShape.MaxX() + dolMouth);
    unsigned int possibleMouthMaxY      = (lipOuterLinerShape.MaxY() + dolMouth) > ImgHeight ? ImgHeight : (int)(lipOuterLinerShape.MaxY() + dolMouth);
    unsigned int possibleMouthWidth     = possibleMouthMaxX - possibleMouthMinX;
    unsigned int possibleMouthHeight    = possibleMouthMaxY - possibleMouthMinY;

    Rect LeftEyePossibleWindow  = Rect( possibleLeftEyeMinX, possibleLeftEyeMinY, possibleLeftEyeWidth, possibleLeftEyeHeight );
    Rect RightEyePossibleWindow = Rect( possibleRightEyeMinX, possibleRightEyeMinY, possibleRightEyeWidth, possibleRightEyeHeight );
    Rect MouthPossibleWindow    = Rect( possibleMouthMinX, possibleMouthMinY, possibleMouthWidth, possibleMouthHeight );
    Rect CurrentWindow          = Rect( 0, 0, iImg.cols, iImg.rows );
    Rect DetectedLeftEyeWindow, DetectedRightEyeWindow, DetectedMouthWindow;

    bool LeftEyeDetected    = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, LeftEyePossibleWindow, DetectedLeftEyeWindow, VO_FacePart::LEFTEYE);
    bool RightEyeDetected   = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, RightEyePossibleWindow, DetectedRightEyeWindow, VO_FacePart::RIGHTEYE );
    bool MouthDetected      = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, MouthPossibleWindow, DetectedMouthWindow, VO_FacePart::LIPOUTERLINE );

    t = ((double)cvGetTickCount() -  t )
        / (cvGetTickFrequency()*1000.0f);
    cout << "Detection Confirmation time cost: " << t << "millisec" << endl;

    if(LeftEyeDetected && RightEyeDetected && MouthDetected)
        return true;
    else
        return false;
}
/**
* @param    shape1  - input     shape1
* @param    shape2  - input     shape2
* @return   the shape distance
*/
float CRecognitionAlgs::ShapeDistance(  const VO_Shape& shape1,
                                        const VO_Shape& shape2)
{
    VO_Shape shapediff = const_cast<VO_Shape&>(shape1) - shape2;
    return shapediff.GetShapeNorm();
}