예제 #1
0
파일: P3P.cpp 프로젝트: sfabel/qtopencv
P3P::P3P( vector< Point3f > ObjPoints, vector< Point2i > ImgPoints, vector< double > initialGuess, double fl)
    : ObjectPoints(ObjPoints), ImagePoints(ImgPoints)
{
    // set number of image points for arma structure
    ImgPts.set_size(3);
    field< vec> objPts(3);

    vec pxPitch = ".0075 .00825 .0075";
    vec initial;
    initial.set_size(3);

    for (size_t i=0; i<3; ++i)
    {
        ImgPts(i).set_size(2);
        ImgPts(i)(0) = ImagePoints[i].x;
        ImgPts(i)(1) = ImagePoints[i].y;
    }

    for (size_t i=0; i<3; ++i)
    {
        objPts(i).set_size(3);
        objPts(i)(0) = ObjectPoints[i].x;
        objPts(i)(1) = ObjectPoints[i].y;
        objPts(i)(2) = ObjectPoints[i].z;
    }

    initial(0) = initialGuess.at(0);
    initial(1) = initialGuess.at(1);
    initial(2) = initialGuess.at(2);

    vec err_lim = "1e-55 1e-55 1e-55";

    plm = new PoseLM( ImgPts, objPts, pxPitch, fl, initial, err_lim );
}
예제 #2
0
파일: P3P.cpp 프로젝트: sfabel/qtopencv
QMatrix4x4 P3P::getRegister(vector< Point3f > ObjPoints, vector< Point3f > estimatedPoints)
{
    field< vec > objPts(3);
    field< vec > estPts(3);

    for (size_t i=0; i<3; ++i)
    {
        objPts(i).set_size(3);
        objPts(i)(0) = ObjPoints[i].x;
        objPts(i)(1) = ObjPoints[i].y;
        objPts(i)(2) = ObjPoints[i].z;
    }

    for (size_t i=0; i<3; ++i)
    {
        estPts(i).set_size(3);
        estPts(i)(0) = estimatedPoints[i].x;
        estPts(i)(1) = estimatedPoints[i].y;
        estPts(i)(2) = estimatedPoints[i].z;
    }

    estPts.print("P3P with LM Result:");

    Reg3D A(objPts,estPts);
    mat44 homTransform = A.getTransformation();

    QMatrix4x4 transMat(homTransform.at(0,0),
                        homTransform.at(0,1),
                        homTransform.at(0,2),
                        homTransform.at(0,3),
                        homTransform.at(1,0),
                        homTransform.at(1,1),
                        homTransform.at(1,2),
                        homTransform.at(1,3),
                        homTransform.at(2,0),
                        homTransform.at(2,1),
                        homTransform.at(2,2),
                        homTransform.at(2,3),
                        homTransform.at(3,0),
                        homTransform.at(3,1),
                        homTransform.at(3,2),
                        homTransform.at(3,3));

    return transMat;
}
void getHomographyRtMatrixAndRotationCenter(
        std::vector<cv::Point2f> corners, const cv::Size& imageSize,
        const cv::Size& boardSize, const double& squareSize,
        const int &horizon, const int &deadZone,
        const cv::Mat& cameraMatrix,
        const cv::Mat& distortionCoefficients,
        const double &chessboardHeight, cv::Mat& homography,
        cv::Point2d &rotationCenter, cv::Mat &rtMatrix) {
//The size of chessboard on the image
    cv::Size imageBoardSize((boardSize.width - 1) * squareSize,
            (boardSize.height - 1) * squareSize);

    std::vector<cv::Point2f> objPts(4), imgPts(4);
//Describe where the chessboardcorners are on the outcome image
    objPts[0].x = imageSize.width / 2 - imageBoardSize.width / 2;
    objPts[0].y = imageSize.height - horizon - deadZone
            - imageBoardSize.height;
    objPts[1].x = imageSize.width / 2 + imageBoardSize.width / 2;
    objPts[1].y = imageSize.height - horizon - deadZone
            - imageBoardSize.height;
    objPts[2].x = imageSize.width / 2 - imageBoardSize.width / 2;
    objPts[2].y = imageSize.height - horizon - deadZone;
    objPts[3].x = imageSize.width / 2 + imageBoardSize.width / 2;
    objPts[3].y = imageSize.height - horizon - deadZone;
//The only four corners used to get homography
    imgPts[0] = corners[0];
    imgPts[1] = corners[boardSize.width - 1];
    imgPts[2] = corners[(boardSize.height - 1) * boardSize.width];
    imgPts[3] = corners[(boardSize.height) * boardSize.width - 1];

    cv::Mat rvec, tvec;
//Creation of 3D chessboard model
    std::vector<cv::Point3d> mCorners;
    for (int i = 0; i < boardSize.height; ++i) {
        for (int j = 0; j < boardSize.width; ++j) {
            mCorners.push_back(
                    cv::Point3f(i * squareSize, j * squareSize,
                            chessboardHeight));
        }
    }
    std::vector<cv::Point3d> p3pcorn(4);
    p3pcorn[0] = mCorners[0];
    p3pcorn[1] = mCorners[boardSize.width - 1];
    p3pcorn[2] = mCorners[(boardSize.height - 1) * boardSize.width];
    p3pcorn[3] = mCorners[(boardSize.height) * boardSize.width - 1];
//Get rvec and tvec describing where the chessboards first corner lays in cameras coordinate system
    cv::solvePnP(p3pcorn, imgPts, cameraMatrix,
            distortionCoefficients, rvec, tvec, false, CV_ITERATIVE);

    cv::Mat rot;
    cv::Rodrigues(rvec, rot);

    rtMatrix = invRtTransformMatrix(rot, tvec);

    {
        cv::Mat invT = getT(rtMatrix);

        cv::Mat z = rtMatrix(cv::Range(0, 3), cv::Range(2, 3));

        z.at<double>(2) = 0;
        cv::Mat zN = z / norm(cv::Vec3d(z));

        double gamma = atan2(zN.at<double>(1), zN.at<double>(0))
                - atan2(0, 1);
        gamma *= 180 / CV_PI;
        cv::Mat rMatN = cv::getRotationMatrix2D(objPts[0], -gamma, 1);
        cv::Mat rMatP = cv::getRotationMatrix2D(objPts[0], gamma, 1);

        cv::Mat homN;

        std::vector<cv::Point2f> rPointsN;
        cv::transform(objPts, rPointsN, rMatN);
        homN = cv::getPerspectiveTransform(rPointsN, imgPts);

// VERY IMPORTANT: in bird's eye view, the x and y axes are swapped!
        rotationCenter = rPointsN[0]
                + cv::Point2f(rtMatrix.at<double>(1, 3),
                        rtMatrix.at<double>(0, 3));
        std::vector<cv::Point2f> rcenter, rrcenter;
        rcenter.push_back(rotationCenter);
        cv::transform(rcenter, rrcenter, rMatN);
        rotationCenter = rrcenter.front();

        homography = homN.clone();

    }

}