Example #1
0
/**
 *@author     JIA Pei
 *
 *@brief      Obtain the first shape instance
 *
 *@methods  - 1) three points' pairs are used to calculate affine transform
 *          - 2) use my own align transform - COG -> rotation -> scaling...
 *          - 3) SVD:  Y = AX
 *                x'     a00 a01 a02     x
 *               (y') = (a10 a11 a12) * (y) ,
 *                1      a20 a21 a22     1
 *               where  a00 = cos(theta), a01 = -sin(theta)
 *                      a10 = sin(theta), a11 = cons(theta)
 *                      a02 = tx, a12 = ty
 *                      a20 = a21 = 0; a22 = 1
 *            However, the above values are not guaranteed during calculation
**/
cv::Mat_<float> VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(const VO_FaceParts& iFaceParts,
        const VO_Shape& iShape,
        const cv::Point2f& ptLeftEyeCenter,
        const cv::Point2f& ptRightEyeCenter,
        const cv::Point2f& ptMouthCenter )
{
    unsigned int NbOfKeyPoints     = 3;
    cv::Mat fpts(1, NbOfKeyPoints, CV_32FC2);
    cv::Mat tpts(1, NbOfKeyPoints, CV_32FC2);

    cv::Point2f pt;
    VO_KeyPoint::CalcFaceKeyPoint(pt,  iShape, iFaceParts, VO_KeyPoint::LEFTEYECENTER);
    fpts.at<cv::Vec2f>(0,0) = pt;
    VO_KeyPoint::CalcFaceKeyPoint(pt,  iShape, iFaceParts, VO_KeyPoint::RIGHTEYECENTER);
    fpts.at<cv::Vec2f>(0,1) = pt;
    VO_KeyPoint::CalcFaceKeyPoint(pt,  iShape, iFaceParts, VO_KeyPoint::MOUTHCENTER);
    fpts.at<cv::Vec2f>(0,2) = pt;

    tpts.at<cv::Vec2f>(0,0) = ptLeftEyeCenter;
    tpts.at<cv::Vec2f>(0,1) = ptRightEyeCenter;
    tpts.at<cv::Vec2f>(0,2) = ptMouthCenter;

    // Explained by JIA Pei. For only 3 points, the affine transform can be computed by "getAffineTransform"
//    cv::Mat_<float> matWarping = cv::getAffineTransform( iShapeKeyPoints, detectKeyPoints );

    // For more than 3 points, we need "estimateRigidTransform"
    cv::Mat_<float> matWarping = cv::estimateRigidTransform( fpts, tpts, true );

    return matWarping;
}
TEST_P(EstimateAffinePartial2D, test2Points)
{
    // try more transformations
    for (size_t i = 0; i < 500; ++i)
    {
        Mat aff = rngPartialAffMat();

        // setting points that are no in the same line
        Mat fpts(1, 2, CV_32FC2);
        Mat tpts(1, 2, CV_32FC2);

        fpts.at<Point2f>(0) = Point2f( rngIn(1,2), rngIn(5,6) );
        fpts.at<Point2f>(1) = Point2f( rngIn(3,4), rngIn(3,4) );

        transform(fpts, tpts, aff);

        vector<uchar> inliers;
        Mat aff_est = estimateAffinePartial2D(fpts, tpts, inliers, GetParam() /* method */);

        EXPECT_NEAR(0., cvtest::norm(aff_est, aff, NORM_INF), 1e-3);

        // all must be inliers
        EXPECT_EQ(countNonZero(inliers), 2);
    }
}
// test conversion from other datatypes than float
TEST_P(EstimateAffinePartial2D, testConversion)
{
    Mat aff = rngPartialAffMat();
    aff.convertTo(aff, CV_32S); // convert to int to transform ints properly

    std::vector<Point> fpts(3);
    std::vector<Point> tpts(3);

    fpts[0] = Point2f( rngIn(1,2), rngIn(5,6) );
    fpts[1] = Point2f( rngIn(3,4), rngIn(3,4) );
    fpts[2] = Point2f( rngIn(1,2), rngIn(3,4) );

    transform(fpts, tpts, aff);

    vector<uchar> inliers;
    Mat aff_est = estimateAffinePartial2D(fpts, tpts, inliers, GetParam() /* method */);

    ASSERT_FALSE(aff_est.empty());

    aff.convertTo(aff, CV_64F); // need to convert back before compare
    EXPECT_NEAR(0., cvtest::norm(aff_est, aff, NORM_INF), 1e-3);

    // all must be inliers
    EXPECT_EQ(countNonZero(inliers), 3);
}
PERF_TEST_P( EstimateAffine, EstimateAffine2D, ESTIMATE_PARAMS )
{
    AffineParams params = GetParam();
    const int n = get<0>(params);
    const double confidence = get<1>(params);
    const int method = get<2>(params);
    const size_t refining = get<3>(params);

    Mat aff(2, 3, CV_64F);
    cv::randu(aff, -2., 2.);

    // LMEDS can't handle more than 50% outliers (by design)
    int m;
    if (method == LMEDS)
        m = 3*n/5;
    else
        m = 2*n/5;
    const float shift_outl = 15.f;
    const float noise_level = 20.f;

    Mat fpts(1, n, CV_32FC2);
    Mat tpts(1, n, CV_32FC2);

    randu(fpts, 0., 100.);
    transform(fpts, tpts, aff);

    /* adding noise to some points */
    Mat outliers = tpts.colRange(m, n);
    outliers.reshape(1) += shift_outl;

    Mat noise (outliers.size(), outliers.type());
    randu(noise, 0., noise_level);
    outliers += noise;

    Mat aff_est;
    vector<uchar> inliers (n);

    warmup(inliers, WARMUP_WRITE);
    warmup(fpts, WARMUP_READ);
    warmup(tpts, WARMUP_READ);

    TEST_CYCLE()
    {
        aff_est = estimateAffine2D(fpts, tpts, inliers, method, 3, 2000, confidence, refining);
    }

    // we already have accuracy tests
    SANITY_CHECK_NOTHING();
}
TEST_P(EstimateAffinePartial2D, testNPoints)
{
    // try more transformations
    for (size_t i = 0; i < 500; ++i)
    {
        Mat aff = rngPartialAffMat();

        const int method = GetParam();
        const int n = 100;
        int m;
        // LMEDS can't handle more than 50% outliers (by design)
        if (method == LMEDS)
            m = 3*n/5;
        else
            m = 2*n/5;
        const float shift_outl = 15.f;
        const float noise_level = 20.f;

        Mat fpts(1, n, CV_32FC2);
        Mat tpts(1, n, CV_32FC2);

        randu(fpts, 0., 100.);
        transform(fpts, tpts, aff);

        /* adding noise to some points */
        Mat outliers = tpts.colRange(m, n);
        outliers.reshape(1) += shift_outl;

        Mat noise (outliers.size(), outliers.type());
        randu(noise, 0., noise_level);
        outliers += noise;

        vector<uchar> inliers;
        Mat aff_est = estimateAffinePartial2D(fpts, tpts, inliers, method);

        EXPECT_FALSE(aff_est.empty());

        EXPECT_NEAR(0., cvtest::norm(aff_est, aff, NORM_INF), 1e-4);

        bool inliers_good = count(inliers.begin(), inliers.end(), 1) == m &&
            m == accumulate(inliers.begin(), inliers.begin() + m, 0);

        EXPECT_TRUE(inliers_good);
    }
}