Example #1
0
void myConvexityDefects( InputArray _points, InputArray _hull, OutputArray _defects ) {
    Mat points = _points.getMat();
    int ptnum = points.checkVector(2, CV_32S);
    CV_Assert( ptnum > 3 );
    Mat hull = _hull.getMat();
    CV_Assert( hull.checkVector(1, CV_32S) > 2 );

    std::vector<CvConvexityDefect> seq;
    convexityDefects(_points, _hull, seq);

    if( seq.size() == 0 ) {
        _defects.release();
        return;
    }

    _defects.create(seq.size(), 1, CV_32SC4);
    Mat defects = _defects.getMat();

    auto it = seq.begin();
    CvPoint* ptorg = (CvPoint*)points.data;

    for( unsigned i = 0;  i < seq.size();  ++i, ++it ) {
        CvConvexityDefect& d = *it;
        int idx0 = (int)(d.start - ptorg);
        int idx1 = (int)(d.end - ptorg);
        int idx2 = (int)(d.depth_point - ptorg);
        CV_Assert( 0 <= idx0 && idx0 < ptnum );
        CV_Assert( 0 <= idx1 && idx1 < ptnum );
        CV_Assert( 0 <= idx2 && idx2 < ptnum );
        CV_Assert( d.depth >= 0 );
        int idepth = cvRound(d.depth*256);
        defects.at<Vec4i>(i) = Vec4i(idx0, idx1, idx2, idepth);
    }
}
Example #2
0
void cv::convertPointsHomogeneous( const Mat& src, vector<Point3f>& dst )
{
    int srccn = src.checkVector(2) >= 0 ? 2 : src.checkVector(4) >= 0 ? 4 : -1;
    CV_Assert( srccn > 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
    
    dst.resize(src.cols*src.rows*src.channels()/srccn);
    CvMat _src = src, _dst = Mat(dst);
    cvConvertPointsHomogeneous(&_src, &_dst);
}
Example #3
0
int solveP3P( InputArray _opoints, InputArray _ipoints,
              InputArray _cameraMatrix, InputArray _distCoeffs,
              OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags) {
    CV_INSTRUMENT_REGION();

    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints == 3 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
    CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P );

    Mat cameraMatrix0 = _cameraMatrix.getMat();
    Mat distCoeffs0 = _distCoeffs.getMat();
    Mat cameraMatrix = Mat_<double>(cameraMatrix0);
    Mat distCoeffs = Mat_<double>(distCoeffs0);

    Mat undistortedPoints;
    undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
    std::vector<Mat> Rs, ts;

    int solutions = 0;
    if (flags == SOLVEPNP_P3P)
    {
        p3p P3Psolver(cameraMatrix);
        solutions = P3Psolver.solve(Rs, ts, opoints, undistortedPoints);
    }
    else if (flags == SOLVEPNP_AP3P)
    {
        ap3p P3Psolver(cameraMatrix);
        solutions = P3Psolver.solve(Rs, ts, opoints, undistortedPoints);
    }

    if (solutions == 0) {
        return 0;
    }

    if (_rvecs.needed()) {
        _rvecs.create(solutions, 1, CV_64F);
    }

    if (_tvecs.needed()) {
        _tvecs.create(solutions, 1, CV_64F);
    }

    for (int i = 0; i < solutions; i++) {
        Mat rvec;
        Rodrigues(Rs[i], rvec);
        _tvecs.getMatRef(i) = ts[i];
        _rvecs.getMatRef(i) = rvec;
    }

    return solutions;
}
Example #4
0
void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
                                    InputArray _Fmat, OutputArray _lines )
{
    Mat points = _points.getMat(), F = _Fmat.getMat();
    int npoints = points.checkVector(2);
    if( npoints < 0 )
        npoints = points.checkVector(3);
    CV_Assert( npoints >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S));
    
    _lines.create(npoints, 1, CV_32FC3, -1, true);
    CvMat c_points = points, c_lines = _lines.getMat(), c_F = F;
    cvComputeCorrespondEpilines(&c_points, whichImage, &c_F, &c_lines);
}
bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
                  InputArray _cameraMatrix, InputArray _distCoeffs,
                  OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
{
    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) ); 
    _rvec.create(3, 1, CV_64F);
    _tvec.create(3, 1, CV_64F);
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();

    if (flags == CV_EPNP)
    {
		cv::Mat undistortedPoints;
		cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
		epnp PnP(cameraMatrix, opoints, undistortedPoints);
				
        cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
        PnP.compute_pose(R, tvec);
        cv::Rodrigues(R, rvec);
		return true;
	}
	else if (flags == CV_P3P) 
	{
		CV_Assert( npoints == 4);
		cv::Mat undistortedPoints;
		cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
		p3p P3Psolver(cameraMatrix);

        cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
        bool result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
        if (result)
			cv::Rodrigues(R, rvec);
		return result;
	}
	else if (flags == CV_ITERATIVE) 
	{
		CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
		CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
		CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
		cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
									 c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
									 &c_rvec, &c_tvec, useExtrinsicGuess );
		return true;
	}
	else 
        CV_Error(CV_StsBadArg, "The flags argument must be one of CV_ITERATIVE or CV_EPNP");
	return false;
}
Example #6
0
// calculates length of a curve (e.g. contour perimeter)
double cv::arcLength( InputArray _curve, bool is_closed )
{
    Mat curve = _curve.getMat();
    int count = curve.checkVector(2);
    int depth = curve.depth();
    CV_Assert( count >= 0 && (depth == CV_32F || depth == CV_32S));
    double perimeter = 0;

    int i;

    if( count <= 1 )
        return 0.;

    bool is_float = depth == CV_32F;
    int last = is_closed ? count-1 : 0;
    const Point* pti = curve.ptr<Point>();
    const Point2f* ptf = curve.ptr<Point2f>();

    Point2f prev = is_float ? ptf[last] : Point2f((float)pti[last].x,(float)pti[last].y);

    for( i = 0; i < count; i++ )
    {
        Point2f p = is_float ? ptf[i] : Point2f((float)pti[i].x,(float)pti[i].y);
        float dx = p.x - prev.x, dy = p.y - prev.y;
        perimeter += std::sqrt(dx*dx + dy*dy);

        prev = p;
    }

    return perimeter;
}
Example #7
0
void cv::fitLine( InputArray _points, OutputArray _line, int distType,
                 double param, double reps, double aeps )
{
    Mat points = _points.getMat();
    
    float linebuf[6]={0.f};
    int npoints2 = points.checkVector(2, -1, false);
    int npoints3 = points.checkVector(3, -1, false);
    
    CV_Assert( npoints2 >= 0 || npoints3 >= 0 );
    
    if( points.depth() != CV_32F || !points.isContinuous() )
    {
        Mat temp;
        points.convertTo(temp, CV_32F);
        points = temp;
    }
    
    if( npoints2 >= 0 )
        fitLine2D( points.ptr<Point2f>(), npoints2, distType,
                   (float)param, (float)reps, (float)aeps, linebuf);
    else
        fitLine3D( points.ptr<Point3f>(), npoints3, distType,
                   (float)param, (float)reps, (float)aeps, linebuf);
    
    Mat(npoints2 >= 0 ? 4 : 6, 1, CV_32F, linebuf).copyTo(_line);
}
Example #8
0
// area of a whole sequence
double cv::contourArea( InputArray _contour, bool oriented )
{
    Mat contour = _contour.getMat();
    int npoints = contour.checkVector(2);
    int depth = contour.depth();
    CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_32S));

    if( npoints == 0 )
        return 0.;

    double a00 = 0;
    bool is_float = depth == CV_32F;
    const Point* ptsi = (const Point*)contour.data;
    const Point2f* ptsf = (const Point2f*)contour.data;
    Point2f prev = is_float ? ptsf[npoints-1] : Point2f((float)ptsi[npoints-1].x, (float)ptsi[npoints-1].y);

    for( int i = 0; i < npoints; i++ )
    {
        Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
        a00 += (double)prev.x * p.y - (double)prev.y * p.x;
        prev = p;
    }

    a00 *= 0.5;
    if( !oriented )
        a00 = fabs(a00);

    return a00;
}
Example #9
0
void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
{
    Mat src = _src.getMat();
    int npoints = src.checkVector(2), cn = 2;
    if( npoints < 0 )
    {
        npoints = src.checkVector(3);
        if( npoints >= 0 )
            cn = 3;
    }
    CV_Assert( npoints >= 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
    
    _dst.create(npoints, 1, CV_MAKETYPE(CV_32F, cn+1));
    CvMat c_src = src, c_dst = _dst.getMat();
    cvConvertPointsHomogeneous(&c_src, &c_dst);
}
Example #10
0
int cv::estimateAffine3D(InputArray _from, InputArray _to,
                         OutputArray _out, OutputArray _inliers,
                         double param1, double param2)
{
    Mat from = _from.getMat(), to = _to.getMat();
    int count = from.checkVector(3, CV_32F);
    
    CV_Assert( count >= 0 && to.checkVector(3, CV_32F) == count );

    _out.create(3, 4, CV_64F);
    Mat out = _out.getMat();
    
    _inliers.create(count, 1, CV_8U, -1, true);
    Mat inliers = _inliers.getMat();
    inliers = Scalar::all(1);

    Mat dFrom, dTo;
    from.convertTo(dFrom, CV_64F);
    to.convertTo(dTo, CV_64F);
    
    CvMat F3x4 = out;
    CvMat mask  = inliers;
    CvMat m1 = dFrom;
    CvMat m2 = dTo;
    
    const double epsilon = numeric_limits<double>::epsilon();        
    param1 = param1 <= 0 ? 3 : param1;
    param2 = (param2 < epsilon) ? 0.99 : (param2 > 1 - epsilon) ? 0.99 : param2;
            
    return Affine3DEstimator().runRANSAC(&m1, &m2, &F3x4, &mask, param1, param2 );    
}
Example #11
0
static void collectCalibrationData( InputArrayOfArrays objectPoints,
                                    InputArrayOfArrays imagePoints1,
                                    InputArrayOfArrays imagePoints2,
                                    Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
                                    Mat& npoints )
{
    int nimages = (int)objectPoints.total();
    int i, j = 0, ni = 0, total = 0;
    CV_Assert(nimages > 0 && nimages == (int)imagePoints1.total() &&
        (!imgPtMat2 || nimages == (int)imagePoints2.total()));

	cout << " Number of Frames:  " << nimages << endl;
    for( i = 0; i < nimages; i++ )
    {
		cout << endl << "Object Points: " << endl;
		printMatOBJ(objectPoints.getMat(i));
		cout << endl << "Image Points: " << endl;
		printMatIMG(imagePoints1.getMat(i));
        ni = objectPoints.getMat(i).checkVector(3, CV_32F);
        CV_Assert( ni >= 0 );
        total += ni;
    }

    npoints.create(1, (int)nimages, CV_32S);
    objPtMat.create(1, (int)total, CV_32FC3);
    imgPtMat1.create(1, (int)total, CV_32FC2);
    Point2f* imgPtData2 = 0;

    if( imgPtMat2 )
    {
        imgPtMat2->create(1, (int)total, CV_32FC2);
        imgPtData2 = imgPtMat2->ptr<Point2f>();
    }

    Point3f* objPtData = objPtMat.ptr<Point3f>();
    Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();

    for( i = 0; i < nimages; i++, j += ni )
    {
        Mat objpt = objectPoints.getMat(i);
        Mat imgpt1 = imagePoints1.getMat(i);
        ni = objpt.checkVector(3, CV_32F);
        int ni1 = imgpt1.checkVector(2, CV_32F);
        CV_Assert( ni > 0 && ni == ni1 );
        npoints.at<int>(i) = ni;
        memcpy( objPtData + j, objpt.data, ni*sizeof(objPtData[0]) );
        memcpy( imgPtData1 + j, imgpt1.data, ni*sizeof(imgPtData1[0]) );

        if( imgPtData2 )
        {
            Mat imgpt2 = imagePoints2.getMat(i);
            int ni2 = imgpt2.checkVector(2, CV_32F);
            CV_Assert( ni == ni2 );
            memcpy( imgPtData2 + j, imgpt2.data, ni*sizeof(imgPtData2[0]) );
        }
    }
}
Example #12
0
void cv::convertPointsHomogeneous( const Mat& src, vector<Point2f>& dst )
{
    CV_Assert(src.checkVector(3) >= 0 &&
              (src.depth() == CV_32F || src.depth() == CV_32S));
    
    dst.resize(src.cols*src.rows*src.channels()/3);
    CvMat _src = Mat(src), _dst = Mat(dst);
    cvConvertPointsHomogeneous(&_src, &_dst);
}
Example #13
0
void cv::solvePnP( InputArray _opoints, InputArray _ipoints,
                  InputArray _cameraMatrix, InputArray _distCoeffs,
                  OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess )
{
    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
    
    _rvec.create(3, 1, CV_64F);
    _tvec.create(3, 1, CV_64F);
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
    CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
    CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
    CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
    cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
                                 c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
                                 &c_rvec, &c_tvec, useExtrinsicGuess );
}
Example #14
0
void cv::computeCorrespondEpilines( const Mat& points, int whichImage,
                                    const Mat& F, vector<Vec3f>& lines )
{
    CV_Assert(points.checkVector(2) >= 0 &&
              (points.depth() == CV_32F || points.depth() == CV_32S));
    
    lines.resize(points.cols*points.rows*points.channels()/2);
    CvMat _points = points, _lines = Mat(lines), matF = F;
    cvComputeCorrespondEpilines(&_points, whichImage, &matF, &_lines);
}
Example #15
0
static Mat _findFundamentalMat( const Mat& points1, const Mat& points2,
                               int method, double param1, double param2,
                               vector<uchar>* mask )
{
    CV_Assert(points1.checkVector(2) >= 0 && points2.checkVector(2) >= 0 &&
              (points1.depth() == CV_32F || points1.depth() == CV_32S) &&
              points1.depth() == points2.depth());
    
    Mat F(3, 3, CV_64F);
    CvMat _pt1 = Mat(points1), _pt2 = Mat(points2);
    CvMat matF = F, _mask, *pmask = 0;
    if( mask )
    {
        mask->resize(points1.cols*points1.rows*points1.channels()/2);
        pmask = &(_mask = cvMat(1, (int)mask->size(), CV_8U, (void*)&(*mask)[0]));
    }
    int n = cvFindFundamentalMat( &_pt1, &_pt2, &matF, method, param1, param2, pmask );
    if( n <= 0 )
        F = Scalar(0);
    return F;
}
        void calcPosition( InputArray _tvecs, InputArray _rvecs, InputArray _pts,
                           InputArray _cameraMatrices, InputArray _distortionMatrices,
                           OutputArray _state, OutputArray _covariance )
        {
            Ptr< PositionCalculator > p_pc = PositionCalculator::create();

            std::vector< Mat > tvecs, rvecs;
            _tvecs.getMatVector( tvecs );
            _rvecs.getMatVector( rvecs );

            CV_Assert( tvecs.size() >= 2 );
            CV_Assert( tvecs.size() == rvecs.size() );

            Mat pts = _pts.getMat();

            CV_Assert( ( tvecs.size() == pts.checkVector( 2, CV_32F, true ) ) );

            std::vector< Mat > camera_m, dist_m;
            if ( _cameraMatrices.kind() == _InputArray::STD_VECTOR_MAT )
            {
                _cameraMatrices.getMatVector( camera_m );
                CV_Assert( tvecs.size() == camera_m.size() );
            }
            else
            {
                camera_m.push_back( _cameraMatrices.getMat() );
                CV_Assert( ( camera_m[0].rows == 3 ) && ( camera_m[0].cols == 3 ) );
            }

            if ( _distortionMatrices.kind() == _InputArray::STD_VECTOR_MAT )
            {
                _distortionMatrices.getMatVector( dist_m );
                CV_Assert( tvecs.size() == dist_m.size() );
            }
            else
            {
                dist_m.push_back( _distortionMatrices.getMat() );
                CV_Assert( ( ( dist_m[0].rows == 5 ) && ( dist_m[0].cols == 1 ) ) || dist_m[0].empty() );
            }

            Mat camera = camera_m[0];
            Mat dist = dist_m[0];
            for ( size_t i = 0; i < tvecs.size(); ++i )
            {
                if ( camera_m.size() == tvecs.size() )
                    camera = camera_m[i];
                if ( dist_m.size() == tvecs.size() )
                    dist = dist_m[i];
                p_pc->addMeasurement( tvecs[i], rvecs[i], pts.at< Point2f >( i ), camera, dist );
            }
            p_pc->computeState( _state, _covariance );
        }
Example #17
0
void cv::cornerSubPix( InputArray _image, InputOutputArray _corners,
                       Size winSize, Size zeroZone,
                       TermCriteria criteria )
{
    Mat corners = _corners.getMat();
    int ncorners = corners.checkVector(2);
    CV_Assert( ncorners >= 0 && corners.depth() == CV_32F );
    Mat image = _image.getMat();
    CvMat c_image = image;
    
    cvFindCornerSubPix( &c_image, (CvPoint2D32f*)corners.data, ncorners,
                        winSize, zeroZone, criteria );
}
Example #18
0
bool isContourConvex( InputArray _contour )
{
    Mat contour = _contour.getMat();
    int total = contour.checkVector(2), depth = contour.depth();
    CV_Assert(total >= 0 && (depth == CV_32F || depth == CV_32S));

    if( total == 0 )
        return false;

    return depth == CV_32S ?
    isContourConvex_(contour.ptr<Point>(), total ) :
    isContourConvex_(contour.ptr<Point2f>(), total );
}
Example #19
0
cv::RotatedRect cv::minAreaRect( InputArray _points )
{
    Mat hull;
    Point2f out[3];
    RotatedRect box;

    convexHull(_points, hull, true, true);

    if( hull.depth() != CV_32F )
    {
        Mat temp;
        hull.convertTo(temp, CV_32F);
        hull = temp;
    }

    int n = hull.checkVector(2);
    const Point2f* hpoints = hull.ptr<Point2f>();

    if( n > 2 )
    {
        rotatingCalipers( hpoints, n, CALIPERS_MINAREARECT, (float*)out );
        box.center.x = out[0].x + (out[1].x + out[2].x)*0.5f;
        box.center.y = out[0].y + (out[1].y + out[2].y)*0.5f;
        box.size.width = (float)std::sqrt((double)out[1].x*out[1].x + (double)out[1].y*out[1].y);
        box.size.height = (float)std::sqrt((double)out[2].x*out[2].x + (double)out[2].y*out[2].y);
        box.angle = (float)atan2( (double)out[1].y, (double)out[1].x );
    }
    else if( n == 2 )
    {
        box.center.x = (hpoints[0].x + hpoints[1].x)*0.5f;
        box.center.y = (hpoints[0].y + hpoints[1].y)*0.5f;
        double dx = hpoints[1].x - hpoints[0].x;
        double dy = hpoints[1].y - hpoints[0].y;
        box.size.width = (float)std::sqrt(dx*dx + dy*dy);
        box.size.height = 0;
        box.angle = (float)atan2( dy, dx );
    }
    else
    {
        if( n == 1 )
            box.center = hpoints[0];
    }

    box.angle = (float)(box.angle*180/CV_PI);
    return box;
}
Example #20
0
// calculates length of a curve (e.g. contour perimeter)
double cv::arcLength( InputArray _curve, bool is_closed )
{
    Mat curve = _curve.getMat();
    int count = curve.checkVector(2);
    int depth = curve.depth();
    CV_Assert( count >= 0 && (depth == CV_32F || depth == CV_32S));
    double perimeter = 0;

    int i, j = 0;
    const int N = 16;
    float buf[N];

    if( count <= 1 )
        return 0.;

    bool is_float = depth == CV_32F;
    int last = is_closed ? count-1 : 0;
    const Point* pti = (const Point*)curve.data;
    const Point2f* ptf = (const Point2f*)curve.data;

    Point2f prev = is_float ? ptf[last] : Point2f((float)pti[last].x,(float)pti[last].y);

    for( i = 0; i < count; i++ )
    {
        Point2f p = is_float ? ptf[i] : Point2f((float)pti[i].x,(float)pti[i].y);
        float dx = p.x - prev.x, dy = p.y - prev.y;
        buf[j] = dx*dx + dy*dy;

        if( ++j == N || i == count-1 )
        {
            Mat bufmat(1, j, CV_32F, buf);
            sqrt(bufmat, bufmat);
            for( ; j > 0; j-- )
                perimeter += buf[j-1];
        }
        prev = p;
    }

    return perimeter;
}
Example #21
0
    /* Post: fill _err with projection errors */
    void computeError( InputArray _m1, InputArray _m2, InputArray _model, OutputArray _err ) const
    {

        Mat opoints = _m1.getMat(), ipoints = _m2.getMat(), model = _model.getMat();

        int i, count = opoints.checkVector(3);
        Mat _rvec = model.col(0);
        Mat _tvec = model.col(1);


        Mat projpoints(count, 2, CV_32FC1);
        projectPoints(opoints, _rvec, _tvec, cameraMatrix, distCoeffs, projpoints);

        const Point2f* ipoints_ptr = ipoints.ptr<Point2f>();
        const Point2f* projpoints_ptr = projpoints.ptr<Point2f>();

        _err.create(count, 1, CV_32FC1);
        float* err = _err.getMat().ptr<float>();

        for ( i = 0; i < count; ++i)
            err[i] = (float)norm( Matx21f(ipoints_ptr[i] - projpoints_ptr[i]), NORM_L2SQR );

    }
Example #22
0
bool solvePnP( InputArray _opoints, InputArray _ipoints,
               InputArray _cameraMatrix, InputArray _distCoeffs,
               OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
{
    CV_INSTRUMENT_REGION()

    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );

    Mat rvec, tvec;
    if( flags != SOLVEPNP_ITERATIVE )
        useExtrinsicGuess = false;

    if( useExtrinsicGuess )
    {
        int rtype = _rvec.type(), ttype = _tvec.type();
        Size rsize = _rvec.size(), tsize = _tvec.size();
        CV_Assert( (rtype == CV_32F || rtype == CV_64F) &&
                   (ttype == CV_32F || ttype == CV_64F) );
        CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
                   (tsize == Size(1, 3) || tsize == Size(3, 1)) );
    }
    else
    {
        int mtype = CV_64F;
        // use CV_32F if all PnP inputs are CV_32F and outputs are empty
        if (_ipoints.depth() == _cameraMatrix.depth() && _ipoints.depth() == _opoints.depth() &&
            _rvec.empty() && _tvec.empty())
            mtype = _opoints.depth();

        _rvec.create(3, 1, mtype);
        _tvec.create(3, 1, mtype);
    }
    rvec = _rvec.getMat();
    tvec = _tvec.getMat();

    Mat cameraMatrix0 = _cameraMatrix.getMat();
    Mat distCoeffs0 = _distCoeffs.getMat();
    Mat cameraMatrix = Mat_<double>(cameraMatrix0);
    Mat distCoeffs = Mat_<double>(distCoeffs0);
    bool result = false;

    if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
    {
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        epnp PnP(cameraMatrix, opoints, undistortedPoints);

        Mat R;
        PnP.compute_pose(R, tvec);
        Rodrigues(R, rvec);
        result = true;
    }
    else if (flags == SOLVEPNP_P3P)
    {
        CV_Assert( npoints == 4);
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        p3p P3Psolver(cameraMatrix);

        Mat R;
        result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
        if (result)
            Rodrigues(R, rvec);
    }
    else if (flags == SOLVEPNP_AP3P)
    {
        CV_Assert( npoints == 4);
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        ap3p P3Psolver(cameraMatrix);

        Mat R;
        result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
        if (result)
            Rodrigues(R, rvec);
    }
    else if (flags == SOLVEPNP_ITERATIVE)
    {
        CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
        CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
        CvMat c_rvec = rvec, c_tvec = tvec;
        cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
                                     c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
                                     &c_rvec, &c_tvec, useExtrinsicGuess );
        result = true;
    }
    /*else if (flags == SOLVEPNP_DLS)
    {
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);

        dls PnP(opoints, undistortedPoints);

        Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
        bool result = PnP.compute_pose(R, tvec);
        if (result)
            Rodrigues(R, rvec);
        return result;
    }
    else if (flags == SOLVEPNP_UPNP)
    {
        upnp PnP(cameraMatrix, opoints, ipoints);

        Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
        PnP.compute_pose(R, tvec);
        Rodrigues(R, rvec);
        return true;
    }*/
    else
        CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
    return result;
}
Example #23
0
void convexityDefects( InputArray _points, InputArray _hull, OutputArray _defects )
{
    CV_INSTRUMENT_REGION()

    Mat points = _points.getMat();
    int i, j = 0, npoints = points.checkVector(2, CV_32S);
    CV_Assert( npoints >= 0 );

    if( npoints <= 3 )
    {
        _defects.release();
        return;
    }

    Mat hull = _hull.getMat();
    int hpoints = hull.checkVector(1, CV_32S);
    CV_Assert( hpoints > 0 );

    const Point* ptr = points.ptr<Point>();
    const int* hptr = hull.ptr<int>();
    std::vector<Vec4i> defects;
    if ( hpoints < 3 ) //if hull consists of one or two points, contour is always convex
    {
        _defects.release();
        return;
    }

    // 1. recognize co-orientation of the contour and its hull
    bool rev_orientation = ((hptr[1] > hptr[0]) + (hptr[2] > hptr[1]) + (hptr[0] > hptr[2])) != 2;

    // 2. cycle through points and hull, compute defects
    int hcurr = hptr[rev_orientation ? 0 : hpoints-1];
    CV_Assert( 0 <= hcurr && hcurr < npoints );

    for( i = 0; i < hpoints; i++ )
    {
        int hnext = hptr[rev_orientation ? hpoints - i - 1 : i];
        CV_Assert( 0 <= hnext && hnext < npoints );

        Point pt0 = ptr[hcurr], pt1 = ptr[hnext];
        double dx0 = pt1.x - pt0.x;
        double dy0 = pt1.y - pt0.y;
        double scale = dx0 == 0 && dy0 == 0 ? 0. : 1./std::sqrt(dx0*dx0 + dy0*dy0);

        int defect_deepest_point = -1;
        double defect_depth = 0;
        bool is_defect = false;
        j=hcurr;
        for(;;)
        {
            // go through points to achieve next hull point
            j++;
            j &= j >= npoints ? 0 : -1;
            if( j == hnext )
                break;

            // compute distance from current point to hull edge
            double dx = ptr[j].x - pt0.x;
            double dy = ptr[j].y - pt0.y;
            double dist = fabs(-dy0*dx + dx0*dy) * scale;

            if( dist > defect_depth )
            {
                defect_depth = dist;
                defect_deepest_point = j;
                is_defect = true;
            }
        }

        if( is_defect )
        {
            int idepth = cvRound(defect_depth*256);
            defects.push_back(Vec4i(hcurr, hnext, defect_deepest_point, idepth));
        }

        hcurr = hnext;
    }

    Mat(defects).copyTo(_defects);
}
Example #24
0
double cv::pointPolygonTest( InputArray _contour, Point2f pt, bool measureDist )
{
    CV_INSTRUMENT_REGION();

    double result = 0;
    Mat contour = _contour.getMat();
    int i, total = contour.checkVector(2), counter = 0;
    int depth = contour.depth();
    CV_Assert( total >= 0 && (depth == CV_32S || depth == CV_32F));

    bool is_float = depth == CV_32F;
    double min_dist_num = FLT_MAX, min_dist_denom = 1;
    Point ip(cvRound(pt.x), cvRound(pt.y));

    if( total == 0 )
        return measureDist ? -DBL_MAX : -1;

    const Point* cnt = contour.ptr<Point>();
    const Point2f* cntf = (const Point2f*)cnt;

    if( !is_float && !measureDist && ip.x == pt.x && ip.y == pt.y )
    {
        // the fastest "purely integer" branch
        Point v0, v = cnt[total-1];

        for( i = 0; i < total; i++ )
        {
            v0 = v;
            v = cnt[i];

            if( (v0.y <= ip.y && v.y <= ip.y) ||
               (v0.y > ip.y && v.y > ip.y) ||
               (v0.x < ip.x && v.x < ip.x) )
            {
                if( ip.y == v.y && (ip.x == v.x || (ip.y == v0.y &&
                    ((v0.x <= ip.x && ip.x <= v.x) || (v.x <= ip.x && ip.x <= v0.x)))) )
                    return 0;
                continue;
            }

            int64 dist = static_cast<int64>(ip.y - v0.y)*(v.x - v0.x)
                       - static_cast<int64>(ip.x - v0.x)*(v.y - v0.y);
            if( dist == 0 )
                return 0;
            if( v.y < v0.y )
                dist = -dist;
            counter += dist > 0;
        }

        result = counter % 2 == 0 ? -1 : 1;
    }
    else
    {
        Point2f v0, v;
        Point iv;

        if( is_float )
        {
            v = cntf[total-1];
        }
        else
        {
            v = cnt[total-1];
        }

        if( !measureDist )
        {
            for( i = 0; i < total; i++ )
            {
                double dist;
                v0 = v;
                if( is_float )
                    v = cntf[i];
                else
                    v = cnt[i];

                if( (v0.y <= pt.y && v.y <= pt.y) ||
                   (v0.y > pt.y && v.y > pt.y) ||
                   (v0.x < pt.x && v.x < pt.x) )
                {
                    if( pt.y == v.y && (pt.x == v.x || (pt.y == v0.y &&
                        ((v0.x <= pt.x && pt.x <= v.x) || (v.x <= pt.x && pt.x <= v0.x)))) )
                        return 0;
                    continue;
                }

                dist = (double)(pt.y - v0.y)*(v.x - v0.x) - (double)(pt.x - v0.x)*(v.y - v0.y);
                if( dist == 0 )
                    return 0;
                if( v.y < v0.y )
                    dist = -dist;
                counter += dist > 0;
            }

            result = counter % 2 == 0 ? -1 : 1;
        }
        else
        {
            for( i = 0; i < total; i++ )
            {
                double dx, dy, dx1, dy1, dx2, dy2, dist_num, dist_denom = 1;

                v0 = v;
                if( is_float )
                    v = cntf[i];
                else
                    v = cnt[i];

                dx = v.x - v0.x; dy = v.y - v0.y;
                dx1 = pt.x - v0.x; dy1 = pt.y - v0.y;
                dx2 = pt.x - v.x; dy2 = pt.y - v.y;

                if( dx1*dx + dy1*dy <= 0 )
                    dist_num = dx1*dx1 + dy1*dy1;
                else if( dx2*dx + dy2*dy >= 0 )
                    dist_num = dx2*dx2 + dy2*dy2;
                else
                {
                    dist_num = (dy1*dx - dx1*dy);
                    dist_num *= dist_num;
                    dist_denom = dx*dx + dy*dy;
                }

                if( dist_num*min_dist_denom < min_dist_num*dist_denom )
                {
                    min_dist_num = dist_num;
                    min_dist_denom = dist_denom;
                    if( min_dist_num == 0 )
                        break;
                }

                if( (v0.y <= pt.y && v.y <= pt.y) ||
                   (v0.y > pt.y && v.y > pt.y) ||
                   (v0.x < pt.x && v.x < pt.x) )
                    continue;

                dist_num = dy1*dx - dx1*dy;
                if( dy < 0 )
                    dist_num = -dist_num;
                counter += dist_num > 0;
            }

            result = std::sqrt(min_dist_num/min_dist_denom);
            if( counter % 2 == 0 )
                result = -result;
        }
    }

    return result;
}
Example #25
0
// Calculates bounding rectagnle of a point set or retrieves already calculated
static Rect pointSetBoundingRect( const Mat& points )
{
    int npoints = points.checkVector(2);
    int depth = points.depth();
    CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_32S));

    int  xmin = 0, ymin = 0, xmax = -1, ymax = -1, i;
    bool is_float = depth == CV_32F;

    if( npoints == 0 )
        return Rect();

    const Point* pts = (const Point*)points.data;
    Point pt = pts[0];

#if CV_SSE4_2
    if(cv::checkHardwareSupport(CV_CPU_SSE4_2))
    {
        if( !is_float )
        {
            __m128i minval, maxval;
            minval = maxval = _mm_loadl_epi64((const __m128i*)(&pt)); //min[0]=pt.x, min[1]=pt.y

            for( i = 1; i < npoints; i++ )
            {
                __m128i ptXY = _mm_loadl_epi64((const __m128i*)&pts[i]);
                minval = _mm_min_epi32(ptXY, minval);
                maxval = _mm_max_epi32(ptXY, maxval);
            }
            xmin = _mm_cvtsi128_si32(minval);
            ymin = _mm_cvtsi128_si32(_mm_srli_si128(minval, 4));
            xmax = _mm_cvtsi128_si32(maxval);
            ymax = _mm_cvtsi128_si32(_mm_srli_si128(maxval, 4));
        }
        else
        {
            __m128 minvalf, maxvalf, z = _mm_setzero_ps(), ptXY = _mm_setzero_ps();
            minvalf = maxvalf = _mm_loadl_pi(z, (const __m64*)(&pt));

            for( i = 1; i < npoints; i++ )
            {
                ptXY = _mm_loadl_pi(ptXY, (const __m64*)&pts[i]);

                minvalf = _mm_min_ps(minvalf, ptXY);
                maxvalf = _mm_max_ps(maxvalf, ptXY);
            }

            float xyminf[2], xymaxf[2];
            _mm_storel_pi((__m64*)xyminf, minvalf);
            _mm_storel_pi((__m64*)xymaxf, maxvalf);
            xmin = cvFloor(xyminf[0]);
            ymin = cvFloor(xyminf[1]);
            xmax = cvFloor(xymaxf[0]);
            ymax = cvFloor(xymaxf[1]);
        }
    }
    else
#endif
    {
        if( !is_float )
        {
            xmin = xmax = pt.x;
            ymin = ymax = pt.y;

            for( i = 1; i < npoints; i++ )
            {
                pt = pts[i];

                if( xmin > pt.x )
                    xmin = pt.x;

                if( xmax < pt.x )
                    xmax = pt.x;

                if( ymin > pt.y )
                    ymin = pt.y;

                if( ymax < pt.y )
                    ymax = pt.y;
            }
        }
        else
        {
            Cv32suf v;
            // init values
            xmin = xmax = CV_TOGGLE_FLT(pt.x);
            ymin = ymax = CV_TOGGLE_FLT(pt.y);

            for( i = 1; i < npoints; i++ )
            {
                pt = pts[i];
                pt.x = CV_TOGGLE_FLT(pt.x);
                pt.y = CV_TOGGLE_FLT(pt.y);

                if( xmin > pt.x )
                    xmin = pt.x;

                if( xmax < pt.x )
                    xmax = pt.x;

                if( ymin > pt.y )
                    ymin = pt.y;

                if( ymax < pt.y )
                    ymax = pt.y;
            }

            v.i = CV_TOGGLE_FLT(xmin); xmin = cvFloor(v.f);
            v.i = CV_TOGGLE_FLT(ymin); ymin = cvFloor(v.f);
            // because right and bottom sides of the bounding rectangle are not inclusive
            // (note +1 in width and height calculation below), cvFloor is used here instead of cvCeil
            v.i = CV_TOGGLE_FLT(xmax); xmax = cvFloor(v.f);
            v.i = CV_TOGGLE_FLT(ymax); ymax = cvFloor(v.f);
        }
    }

    return Rect(xmin, ymin, xmax - xmin + 1, ymax - ymin + 1);
}
Example #26
0
cv::RotatedRect cv::fitEllipse( InputArray _points )
{
    Mat points = _points.getMat();
    int i, n = points.checkVector(2);
    int depth = points.depth();
    CV_Assert( n >= 0 && (depth == CV_32F || depth == CV_32S));

    RotatedRect box;

    if( n < 5 )
        CV_Error( CV_StsBadSize, "There should be at least 5 points to fit the ellipse" );

    // New fitellipse algorithm, contributed by Dr. Daniel Weiss
    Point2f c(0,0);
    double gfp[5], rp[5], t;
    const double min_eps = 1e-6;
    bool is_float = depth == CV_32F;
    const Point* ptsi = (const Point*)points.data;
    const Point2f* ptsf = (const Point2f*)points.data;

    AutoBuffer<double> _Ad(n*5), _bd(n);
    double *Ad = _Ad, *bd = _bd;

    // first fit for parameters A - E
    Mat A( n, 5, CV_64F, Ad );
    Mat b( n, 1, CV_64F, bd );
    Mat x( 5, 1, CV_64F, gfp );

    for( i = 0; i < n; i++ )
    {
        Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
        c += p;
    }
    c.x /= n;
    c.y /= n;

    for( i = 0; i < n; i++ )
    {
        Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
        p -= c;

        bd[i] = 10000.0; // 1.0?
        Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP
        Ad[i*5 + 1] = -(double)p.y * p.y;
        Ad[i*5 + 2] = -(double)p.x * p.y;
        Ad[i*5 + 3] = p.x;
        Ad[i*5 + 4] = p.y;
    }

    solve(A, b, x, DECOMP_SVD);

    // now use general-form parameters A - E to find the ellipse center:
    // differentiate general form wrt x/y to get two equations for cx and cy
    A = Mat( 2, 2, CV_64F, Ad );
    b = Mat( 2, 1, CV_64F, bd );
    x = Mat( 2, 1, CV_64F, rp );
    Ad[0] = 2 * gfp[0];
    Ad[1] = Ad[2] = gfp[2];
    Ad[3] = 2 * gfp[1];
    bd[0] = gfp[3];
    bd[1] = gfp[4];
    solve( A, b, x, DECOMP_SVD );

    // re-fit for parameters A - C with those center coordinates
    A = Mat( n, 3, CV_64F, Ad );
    b = Mat( n, 1, CV_64F, bd );
    x = Mat( 3, 1, CV_64F, gfp );
    for( i = 0; i < n; i++ )
    {
        Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
        p -= c;
        bd[i] = 1.0;
        Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]);
        Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]);
        Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]);
    }
    solve(A, b, x, DECOMP_SVD);

    // store angle and radii
    rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage
    t = sin(-2.0 * rp[4]);
    if( fabs(t) > fabs(gfp[2])*min_eps )
        t = gfp[2]/t;
    else
        t = gfp[1] - gfp[0];
    rp[2] = fabs(gfp[0] + gfp[1] - t);
    if( rp[2] > min_eps )
        rp[2] = std::sqrt(2.0 / rp[2]);
    rp[3] = fabs(gfp[0] + gfp[1] + t);
    if( rp[3] > min_eps )
        rp[3] = std::sqrt(2.0 / rp[3]);

    box.center.x = (float)rp[0] + c.x;
    box.center.y = (float)rp[1] + c.y;
    box.size.width = (float)(rp[2]*2);
    box.size.height = (float)(rp[3]*2);
    if( box.size.width > box.size.height )
    {
        float tmp;
        CV_SWAP( box.size.width, box.size.height, tmp );
        box.angle = (float)(90 + rp[4]*180/CV_PI);
    }
    if( box.angle < -180 )
        box.angle += 360;
    if( box.angle > 360 )
        box.angle -= 360;

    return box;
}
Example #27
0
void cv::minEnclosingCircle( InputArray _points, Point2f& _center, float& _radius )
{
    int max_iters = 100;
    const float eps = FLT_EPSILON*2;
    bool result = false;
    Mat points = _points.getMat();
    int i, j, k, count = points.checkVector(2);
    int depth = points.depth();
    Point2f center;
    float radius = 0.f;
    CV_Assert(count >= 0 && (depth == CV_32F || depth == CV_32S));

    _center.x = _center.y = 0.f;
    _radius = 0.f;

    if( count == 0 )
        return;

    bool is_float = depth == CV_32F;
    const Point* ptsi = (const Point*)points.data;
    const Point2f* ptsf = (const Point2f*)points.data;

    Point2f pt = is_float ? ptsf[0] : Point2f((float)ptsi[0].x,(float)ptsi[0].y);
    Point2f pts[4] = {pt, pt, pt, pt};

    for( i = 1; i < count; i++ )
    {
        pt = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);

        if( pt.x < pts[0].x )
            pts[0] = pt;
        if( pt.x > pts[1].x )
            pts[1] = pt;
        if( pt.y < pts[2].y )
            pts[2] = pt;
        if( pt.y > pts[3].y )
            pts[3] = pt;
    }

    for( k = 0; k < max_iters; k++ )
    {
        double min_delta = 0, delta;
        Point2f farAway(0,0);
        /*only for first iteration because the alg is repared at the loop's foot*/
        if( k == 0 )
            findEnslosingCicle4pts_32f( pts, center, radius );

        for( i = 0; i < count; i++ )
        {
            pt = is_float ? ptsf[i] : Point2f((float)ptsi[i].x,(float)ptsi[i].y);

            delta = pointInCircle( pt, center, radius );
            if( delta < min_delta )
            {
                min_delta = delta;
                farAway = pt;
            }
        }
        result = min_delta >= 0;
        if( result )
            break;

        Point2f ptsCopy[4];
        // find good replacement partner for the point which is at most far away,
        // starting with the one that lays in the actual circle (i=3)
        for( i = 3; i >= 0; i-- )
        {
            for( j = 0; j < 4; j++ )
                ptsCopy[j] = i != j ? pts[j] : farAway;

            findEnslosingCicle4pts_32f( ptsCopy, center, radius );
            if( pointInCircle( pts[i], center, radius ) >= 0)
            {
                // replaced one again in the new circle?
                pts[i] = farAway;
                break;
            }
        }
    }

    if( !result )
    {
        radius = 0.f;
        for( i = 0; i < count; i++ )
        {
            pt = is_float ? ptsf[i] : Point2f((float)ptsi[i].x,(float)ptsi[i].y);
            float dx = center.x - pt.x, dy = center.y - pt.y;
            float t = dx*dx + dy*dy;
            radius = MAX(radius, t);
        }

        radius = (float)(std::sqrt(radius)*(1 + eps));
    }

    _center = center;
    _radius = radius;
}
Example #28
0
CV_IMPL void
cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] )
{
    if( !pt )
        CV_Error( CV_StsNullPtr, "NULL vertex array pointer" );
    cv::RotatedRect(box).points((cv::Point2f*)pt);
}


double cv::pointPolygonTest( InputArray _contour, Point2f pt, bool measureDist )
{
    CV_INSTRUMENT_REGION()

    double result = 0;
    Mat contour = _contour.getMat();
    int i, total = contour.checkVector(2), counter = 0;
    int depth = contour.depth();
    CV_Assert( total >= 0 && (depth == CV_32S || depth == CV_32F));

    bool is_float = depth == CV_32F;
    double min_dist_num = FLT_MAX, min_dist_denom = 1;
    Point ip(cvRound(pt.x), cvRound(pt.y));

    if( total == 0 )
        return measureDist ? -DBL_MAX : -1;

    const Point* cnt = contour.ptr<Point>();
    const Point2f* cntf = (const Point2f*)cnt;

    if( !is_float && !measureDist && ip.x == pt.x && ip.y == pt.y )
    {
Example #29
0
static Moments contourMoments( const Mat& contour )
{
    Moments m;
    int lpt = contour.checkVector(2);
    int is_float = contour.depth() == CV_32F;
    const Point* ptsi = (const Point*)contour.data;
    const Point2f* ptsf = (const Point2f*)contour.data;

    CV_Assert( contour.depth() == CV_32S || contour.depth() == CV_32F );

    if( lpt == 0 )
        return m;

    double a00 = 0, a10 = 0, a01 = 0, a20 = 0, a11 = 0, a02 = 0, a30 = 0, a21 = 0, a12 = 0, a03 = 0;
    double xi, yi, xi2, yi2, xi_1, yi_1, xi_12, yi_12, dxy, xii_1, yii_1;

    if( !is_float )
    {
        xi_1 = ptsi[lpt-1].x;
        yi_1 = ptsi[lpt-1].y;
    }
    else
    {
        xi_1 = ptsf[lpt-1].x;
        yi_1 = ptsf[lpt-1].y;
    }

    xi_12 = xi_1 * xi_1;
    yi_12 = yi_1 * yi_1;

    for( int i = 0; i < lpt; i++ )
    {
        if( !is_float )
        {
            xi = ptsi[i].x;
            yi = ptsi[i].y;
        }
        else
        {
            xi = ptsf[i].x;
            yi = ptsf[i].y;
        }

        xi2 = xi * xi;
        yi2 = yi * yi;
        dxy = xi_1 * yi - xi * yi_1;
        xii_1 = xi_1 + xi;
        yii_1 = yi_1 + yi;

        a00 += dxy;
        a10 += dxy * xii_1;
        a01 += dxy * yii_1;
        a20 += dxy * (xi_1 * xii_1 + xi2);
        a11 += dxy * (xi_1 * (yii_1 + yi_1) + xi * (yii_1 + yi));
        a02 += dxy * (yi_1 * yii_1 + yi2);
        a30 += dxy * xii_1 * (xi_12 + xi2);
        a03 += dxy * yii_1 * (yi_12 + yi2);
        a21 += dxy * (xi_12 * (3 * yi_1 + yi) + 2 * xi * xi_1 * yii_1 +
                   xi2 * (yi_1 + 3 * yi));
        a12 += dxy * (yi_12 * (3 * xi_1 + xi) + 2 * yi * yi_1 * xii_1 +
                   yi2 * (xi_1 + 3 * xi));
        xi_1 = xi;
        yi_1 = yi;
        xi_12 = xi2;
        yi_12 = yi2;
    }

    if( fabs(a00) > FLT_EPSILON )
    {
        double db1_2, db1_6, db1_12, db1_24, db1_20, db1_60;

        if( a00 > 0 )
        {
            db1_2 = 0.5;
            db1_6 = 0.16666666666666666666666666666667;
            db1_12 = 0.083333333333333333333333333333333;
            db1_24 = 0.041666666666666666666666666666667;
            db1_20 = 0.05;
            db1_60 = 0.016666666666666666666666666666667;
        }
        else
        {
            db1_2 = -0.5;
            db1_6 = -0.16666666666666666666666666666667;
            db1_12 = -0.083333333333333333333333333333333;
            db1_24 = -0.041666666666666666666666666666667;
            db1_20 = -0.05;
            db1_60 = -0.016666666666666666666666666666667;
        }

        // spatial moments
        m.m00 = a00 * db1_2;
        m.m10 = a10 * db1_6;
        m.m01 = a01 * db1_6;
        m.m20 = a20 * db1_12;
        m.m11 = a11 * db1_24;
        m.m02 = a02 * db1_12;
        m.m30 = a30 * db1_20;
        m.m21 = a21 * db1_60;
        m.m12 = a12 * db1_60;
        m.m03 = a03 * db1_20;

        completeMomentState( &m );
    }
    return m;
}
Example #30
0
bool cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
                        InputArray _cameraMatrix, InputArray _distCoeffs,
                        OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
                        int iterationsCount, float reprojectionError, double confidence,
                        OutputArray _inliers, int flags)
{

    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();

    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );

    CV_Assert(opoints.isContinuous());
    CV_Assert(opoints.depth() == CV_32F || opoints.depth() == CV_64F);
    CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
    CV_Assert(ipoints.isContinuous());
    CV_Assert(ipoints.depth() == CV_32F || ipoints.depth() == CV_64F);
    CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);

    _rvec.create(3, 1, CV_64FC1);
    _tvec.create(3, 1, CV_64FC1);

    Mat rvec = useExtrinsicGuess ? _rvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat tvec = useExtrinsicGuess ? _tvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();

    Ptr<PointSetRegistrator::Callback> cb; // pointer to callback
    cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, flags, useExtrinsicGuess, rvec, tvec);

    int model_points = 4;                             // minimum of number of model points
    if( flags == cv::SOLVEPNP_ITERATIVE ) model_points = 6;
    else if( flags == cv::SOLVEPNP_UPNP ) model_points = 6;
    else if( flags == cv::SOLVEPNP_EPNP ) model_points = 5;

    double param1 = reprojectionError;                // reprojection error
    double param2 = confidence;                       // confidence
    int param3 = iterationsCount;                     // number maximum iterations

    cv::Mat _local_model(3, 2, CV_64FC1);
    cv::Mat _mask_local_inliers(1, opoints.rows, CV_8UC1);

    // call Ransac
    int result = createRANSACPointSetRegistrator(cb, model_points, param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers);

    if( result <= 0 || _local_model.rows <= 0)
    {
        _rvec.assign(rvec);    // output rotation vector
        _tvec.assign(tvec);    // output translation vector

        if( _inliers.needed() )
            _inliers.release();

        return false;
    }
    else
    {
        _rvec.assign(_local_model.col(0));    // output rotation vector
        _tvec.assign(_local_model.col(1));    // output translation vector
    }

    if(_inliers.needed())
    {
        Mat _local_inliers;
        int count = 0;
        for (int i = 0; i < _mask_local_inliers.rows; ++i)
        {
            if((int)_mask_local_inliers.at<uchar>(i) == 1) // inliers mask
            {
                _local_inliers.push_back(count);    // output inliers vector
                count++;
            }
        }
        _local_inliers.copyTo(_inliers);
    }
    return true;
}