void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst)
{
    CV_INSTRUMENT_REGION();

    if( nsrc == 0 || !src )
    {
        _dst.release();
        return;
    }

    int totalCols = 0, cols = 0;
    for( size_t i = 0; i < nsrc; i++ )
    {
        CV_Assert( src[i].dims <= 2 &&
                   src[i].rows == src[0].rows &&
                   src[i].type() == src[0].type());
        totalCols += src[i].cols;
    }
    _dst.create( src[0].rows, totalCols, src[0].type());
    Mat dst = _dst.getMat();
    for( size_t i = 0; i < nsrc; i++ )
    {
        Mat dpart = dst(Rect(cols, 0, src[i].cols, src[i].rows));
        src[i].copyTo(dpart);
        cols += src[i].cols;
    }
}
void cv::hconcat(InputArray src1, InputArray src2, OutputArray dst)
{
    CV_INSTRUMENT_REGION();

    Mat src[] = {src1.getMat(), src2.getMat()};
    hconcat(src, 2, dst);
}
Beispiel #3
0
void sqrt64f(const double* src, double* dst, int len)
{
    CV_INSTRUMENT_REGION();

    int i = 0;

#if CV_SIMD_64F
    const int VECSZ = v_float64::nlanes;
    for( ; i < len; i += VECSZ*2 )
    {
        if( i + VECSZ*2 > len )
        {
            if( i == 0 || src == dst )
                break;
            i = len - VECSZ*2;
        }
        v_float64 t0 = vx_load(src + i), t1 = vx_load(src + i + VECSZ);
        t0 = v_sqrt(t0);
        t1 = v_sqrt(t1);
        v_store(dst + i, t0); v_store(dst + i + VECSZ, t1);
    }
    vx_cleanup();
#endif

    for( ; i < len; i++ )
        dst[i] = std::sqrt(src[i]);
}
Beispiel #4
0
void magnitude64f(const double* x, const double* y, double* mag, int len)
{
    CV_INSTRUMENT_REGION();

    int i = 0;

#if CV_SIMD_64F
    const int VECSZ = v_float64::nlanes;
    for( ; i < len; i += VECSZ*2 )
    {
        if( i + VECSZ*2 > len )
        {
            if( i == 0 || mag == x || mag == y )
                break;
            i = len - VECSZ*2;
        }
        v_float64 x0 = vx_load(x + i), x1 = vx_load(x + i + VECSZ);
        v_float64 y0 = vx_load(y + i), y1 = vx_load(y + i + VECSZ);
        x0 = v_sqrt(v_muladd(x0, x0, y0*y0));
        x1 = v_sqrt(v_muladd(x1, x1, y1*y1));
        v_store(mag + i, x0);
        v_store(mag + i + VECSZ, x1);
    }
    vx_cleanup();
#endif

    for( ; i < len; i++ )
    {
        double x0 = x[i], y0 = y[i];
        mag[i] = std::sqrt(x0*x0 + y0*y0);
    }
}
//void RhoanaGainCompensator::apply(int index, Point /*corner*/, Mat &image, const Mat &/*mask*/)
void RhoanaGainCompensator::apply(int index, Point /*corner*/, InputOutputArray image, InputArray /*mask*/)
{
    //image *= gains_(index, 0);
    CV_INSTRUMENT_REGION()

    multiply(image, gains_(index, 0), image);
}
Beispiel #6
0
cv::String getcwd()
{
    CV_INSTRUMENT_REGION();
    cv::AutoBuffer<char, 4096> buf;
#if defined WIN32 || defined _WIN32 || defined WINCE
#ifdef WINRT
    return cv::String();
#else
    DWORD sz = GetCurrentDirectoryA(0, NULL);
    buf.allocate((size_t)sz);
    sz = GetCurrentDirectoryA((DWORD)buf.size(), buf.data());
    return cv::String(buf.data(), (size_t)sz);
#endif
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__
    for(;;)
    {
        char* p = ::getcwd(buf.data(), buf.size());
        if (p == NULL)
        {
            if (errno == ERANGE)
            {
                buf.allocate(buf.size() * 2);
                continue;
            }
            return cv::String();
        }
        break;
    }
    return cv::String(buf.data(), (size_t)strlen(buf.data()));
#else
    return cv::String();
#endif
}
Beispiel #7
0
void KeyPoint::convert(const std::vector<KeyPoint>& keypoints, std::vector<Point2f>& points2f,
                       const std::vector<int>& keypointIndexes)
{
    CV_INSTRUMENT_REGION();

    if( keypointIndexes.empty() )
    {
        points2f.resize( keypoints.size() );
        for( size_t i = 0; i < keypoints.size(); i++ )
            points2f[i] = keypoints[i].pt;
    }
    else
    {
        points2f.resize( keypointIndexes.size() );
        for( size_t i = 0; i < keypointIndexes.size(); i++ )
        {
            int idx = keypointIndexes[i];
            if( idx >= 0 )
                points2f[i] = keypoints[idx].pt;
            else
            {
                CV_Error( CV_StsBadArg, "keypointIndexes has element < 0. TODO: process this case" );
                //points2f[i] = Point2f(-1, -1);
            }
        }
    }
}
Beispiel #8
0
int normHamming(const uchar* a, const uchar* b, int n)
{
    CV_INSTRUMENT_REGION()

    CV_CPU_DISPATCH(normHamming, (a, b, n),
        CV_CPU_DISPATCH_MODES_ALL);
}
Beispiel #9
0
void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
                    const Scalar& _color, DrawMatchesFlags flags )
{
    CV_INSTRUMENT_REGION();

    if( !(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
    {
        if (image.type() == CV_8UC3 || image.type() == CV_8UC4)
        {
            image.copyTo(outImage);
        }
        else if( image.type() == CV_8UC1 )
        {
            cvtColor( image, outImage, COLOR_GRAY2BGR );
        }
        else
        {
            CV_Error( Error::StsBadArg, "Incorrect type of input image: " + typeToString(image.type()) );
        }
    }

    RNG& rng=theRNG();
    bool isRandColor = _color == Scalar::all(-1);

    CV_Assert( !outImage.empty() );
    std::vector<KeyPoint>::const_iterator it = keypoints.begin(),
                                     end = keypoints.end();
    for( ; it != end; ++it )
    {
        Scalar color = isRandColor ? Scalar( rng(256), rng(256), rng(256), 255 ) : _color;
        _drawKeypoint( outImage, *it, color, flags );
    }
}
void RhoanaBlocksGainCompensator::apply(int index, Point /*corner*/, InputOutputArray _image, InputArray /*mask*/)
{
    CV_INSTRUMENT_REGION()

    ///CV_Assert(image.type() == CV_8UC3);
    CV_Assert(_image.type() == CV_8UC1);

    UMat u_gain_map;
    if (gain_maps_[index].size() == _image.size())
        u_gain_map = gain_maps_[index];
    else
        resize(gain_maps_[index], u_gain_map, _image.size(), 0, 0, INTER_LINEAR);

    Mat_<float> gain_map = u_gain_map.getMat(ACCESS_READ);
    Mat image = _image.getMat();
    for (int y = 0; y < image.rows; ++y)
    {
        const float* gain_row = gain_map.ptr<float>(y);
        ///Point3_<uchar>* row = image.ptr<Point3_<uchar> >(y);
        uchar* row = image.ptr<uchar>(y);
        for (int x = 0; x < image.cols; ++x)
        {
            ///row[x].x = saturate_cast<uchar>(row[x].x * gain_row[x]);
            ///row[x].y = saturate_cast<uchar>(row[x].y * gain_row[x]);
            ///row[x].z = saturate_cast<uchar>(row[x].z * gain_row[x]);
            row[x] = saturate_cast<uchar>(row[x] * gain_row[x]);
        }
    }
}
Beispiel #11
0
bool VideoCapture::retrieve(OutputArray image, int channel)
{
    CV_INSTRUMENT_REGION();
    if (!icap.empty())
        return icap->retrieveFrame(channel, image);
    return false;
}
Beispiel #12
0
void invSqrt32f(const float* src, float* dst, int len)
{
    CV_INSTRUMENT_REGION();

    int i = 0;

#if CV_SIMD
    const int VECSZ = v_float32::nlanes;
    for( ; i < len; i += VECSZ*2 )
    {
        if( i + VECSZ*2 > len )
        {
            if( i == 0 || src == dst )
                break;
            i = len - VECSZ*2;
        }
        v_float32 t0 = vx_load(src + i), t1 = vx_load(src + i + VECSZ);
        t0 = v_invsqrt(t0);
        t1 = v_invsqrt(t1);
        v_store(dst + i, t0); v_store(dst + i + VECSZ, t1);
    }
    vx_cleanup();
#endif

    for( ; i < len; i++ )
        dst[i] = 1/std::sqrt(src[i]);
}
Beispiel #13
0
    void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
    {
        CV_INSTRUMENT_REGION()

        std::vector<Point2f> corners;

        if (_image.isUMat())
        {
            UMat ugrayImage;
            if( _image.type() != CV_8U )
                cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
            else
                ugrayImage = _image.getUMat();

            goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
                                 blockSize, useHarrisDetector, k );
        }
        else
        {
            Mat image = _image.getMat(), grayImage = image;
            if( image.type() != CV_8U )
                cvtColor( image, grayImage, COLOR_BGR2GRAY );

            goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
                                blockSize, useHarrisDetector, k );
        }

        keypoints.resize(corners.size());
        std::vector<Point2f>::const_iterator corner_it = corners.begin();
        std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
        for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
            *keypoint_it = KeyPoint( *corner_it, (float)blockSize );

    }
Beispiel #14
0
bool p3p::solve(cv::Mat& R, cv::Mat& tvec, const cv::Mat& opoints, const cv::Mat& ipoints)
{
    CV_INSTRUMENT_REGION();

    double rotation_matrix[3][3] = {}, translation[3] = {};
    std::vector<double> points;
    if (opoints.depth() == ipoints.depth())
    {
        if (opoints.depth() == CV_32F)
            extract_points<cv::Point3f,cv::Point2f>(opoints, ipoints, points);
        else
            extract_points<cv::Point3d,cv::Point2d>(opoints, ipoints, points);
    }
    else if (opoints.depth() == CV_32F)
        extract_points<cv::Point3f,cv::Point2d>(opoints, ipoints, points);
    else
        extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points);

    bool result = solve(rotation_matrix, translation,
                        points[0], points[1], points[2], points[3], points[4],
                        points[5], points[6], points[7], points[8], points[9],
                        points[10], points[11], points[12], points[13], points[14],
                        points[15], points[16], points[17], points[18], points[19]);
    cv::Mat(3, 1, CV_64F, translation).copyTo(tvec);
    cv::Mat(3, 3, CV_64F, rotation_matrix).copyTo(R);
    return result;
}
Beispiel #15
0
bool VideoCapture::grab()
{
    CV_INSTRUMENT_REGION();

    if (!icap.empty())
        return icap->grabFrame();
    return cvGrabFrame(cap) != 0;
}
void cv::hconcat(InputArray _src, OutputArray dst)
{
    CV_INSTRUMENT_REGION();

    std::vector<Mat> src;
    _src.getMatVector(src);
    hconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
}
Beispiel #17
0
void KeyPoint::convert( const std::vector<Point2f>& points2f, std::vector<KeyPoint>& keypoints,
                        float size, float response, int octave, int class_id )
{
    CV_INSTRUMENT_REGION();

    keypoints.resize(points2f.size());
    for( size_t i = 0; i < points2f.size(); i++ )
        keypoints[i] = KeyPoint(points2f[i], size, -1, response, octave, class_id);
}
Beispiel #18
0
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, InputArrayOfArrays masks, OutputArray pano)
{
    CV_INSTRUMENT_REGION();

    Status status = estimateTransform(images, masks);
    if (status != OK)
        return status;
    return composePanorama(pano);
}
void fastAtan64f(const double *Y, const double *X, double *angle, int len, bool angleInDegrees)
{
    CV_INSTRUMENT_REGION()

    CALL_HAL(fastAtan64f, cv_hal_fastAtan64f, Y, X, angle, len, angleInDegrees);

    CV_CPU_DISPATCH(fastAtan64f, (Y, X, angle, len, angleInDegrees),
        CV_CPU_DISPATCH_MODES_ALL);
}
Beispiel #20
0
void log64f(const double *src, double *dst, int n)
{
    CV_INSTRUMENT_REGION();

    for (int i = 0; i < n; i++)
    {
        dst[i] = std::log(src[i]);
    }
}
Beispiel #21
0
void KeyPointsFilter::runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask )
{
    CV_INSTRUMENT_REGION();

    if( mask.empty() )
        return;

    keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
}
Beispiel #22
0
void log32f(const float *src, float *dst, int n)
{
    CV_INSTRUMENT_REGION();

    for (int i = 0; i < n; i++)
    {
        dst[i] = std::log(src[i]);
    }
}
void log64f(const double *src, double *dst, int n)
{
    CV_INSTRUMENT_REGION()

    CALL_HAL(log64f, cv_hal_log64f, src, dst, n);
    CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippsLn_64f_A50, src, dst, n) >= 0);

    CV_CPU_DISPATCH(log64f, (src, dst, n),
        CV_CPU_DISPATCH_MODES_ALL);
}
void log32f(const float *src, float *dst, int n)
{
    CV_INSTRUMENT_REGION()

    CALL_HAL(log32f, cv_hal_log32f, src, dst, n);
    CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippsLn_32f_A21, src, dst, n) >= 0);

    CV_CPU_DISPATCH(log32f, (src, dst, n),
        CV_CPU_DISPATCH_MODES_ALL);
}
Beispiel #25
0
bool VideoCapture::read(OutputArray image)
{
    CV_INSTRUMENT_REGION();

    if(grab())
        retrieve(image);
    else
        image.release();
    return !image.empty();
}
void magnitude32f(const float* x, const float* y, float* mag, int len)
{
    CV_INSTRUMENT_REGION()

    CALL_HAL(magnitude32f, cv_hal_magnitude32f, x, y, mag, len);
    CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippsMagnitude_32f, x, y, mag, len) >= 0);

    CV_CPU_DISPATCH(magnitude32f, (x, y, mag, len),
        CV_CPU_DISPATCH_MODES_ALL);
}
void magnitude64f(const double* x, const double* y, double* mag, int len)
{
    CV_INSTRUMENT_REGION()

    CALL_HAL(magnitude64f, cv_hal_magnitude64f, x, y, mag, len);
    CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippsMagnitude_64f, x, y, mag, len) >= 0);

    CV_CPU_DISPATCH(magnitude64f, (x, y, mag, len),
        CV_CPU_DISPATCH_MODES_ALL);
}
void invSqrt64f(const double* src, double* dst, int len)
{
    CV_INSTRUMENT_REGION()

    CALL_HAL(invSqrt64f, cv_hal_invSqrt64f, src, dst, len);
    CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippsInvSqrt_64f_A50, src, dst, len) >= 0);

    CV_CPU_DISPATCH(invSqrt64f, (src, dst, len),
        CV_CPU_DISPATCH_MODES_ALL);
}
void sqrt32f(const float* src, float* dst, int len)
{
    CV_INSTRUMENT_REGION()

    CALL_HAL(sqrt32f, cv_hal_sqrt32f, src, dst, len);
    CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippsSqrt_32f_A21, src, dst, len) >= 0);

    CV_CPU_DISPATCH(sqrt32f, (src, dst, len),
        CV_CPU_DISPATCH_MODES_ALL);
}
Beispiel #30
0
void Index::build(InputArray _data, const IndexParams& params, flann_distance_t _distType)
{
    CV_INSTRUMENT_REGION()

    release();
    algo = getParam<flann_algorithm_t>(params, "algorithm", FLANN_INDEX_LINEAR);
    if( algo == FLANN_INDEX_SAVED )
    {
        load(_data, getParam<String>(params, "filename", String()));
        return;
    }

    Mat data = _data.getMat();
    index = 0;
    featureType = data.type();
    distType = _distType;

    if ( algo == FLANN_INDEX_LSH)
    {
        distType = FLANN_DIST_HAMMING;
    }

    switch( distType )
    {
    case FLANN_DIST_HAMMING:
        buildIndex< HammingDistance >(index, data, params);
        break;
    case FLANN_DIST_L2:
        buildIndex< ::cvflann::L2<float> >(index, data, params);
        break;
    case FLANN_DIST_L1:
        buildIndex< ::cvflann::L1<float> >(index, data, params);
        break;
#if MINIFLANN_SUPPORT_EXOTIC_DISTANCE_TYPES
    case FLANN_DIST_MAX:
        buildIndex< ::cvflann::MaxDistance<float> >(index, data, params);
        break;
    case FLANN_DIST_HIST_INTERSECT:
        buildIndex< ::cvflann::HistIntersectionDistance<float> >(index, data, params);
        break;
    case FLANN_DIST_HELLINGER:
        buildIndex< ::cvflann::HellingerDistance<float> >(index, data, params);
        break;
    case FLANN_DIST_CHI_SQUARE:
        buildIndex< ::cvflann::ChiSquareDistance<float> >(index, data, params);
        break;
    case FLANN_DIST_KL:
        buildIndex< ::cvflann::KL_Divergence<float> >(index, data, params);
        break;
#endif
    default:
        CV_Error(Error::StsBadArg, "Unknown/unsupported distance type");
    }
}