vector< pair< int, int > > FeatureShiCorner::genPoints( InputArray _image )
    {
        auto outVec      = vector< pair< int, int > >();
        const Mat image  = _image.getMat();
        const int height = image.rows - descRadius;
        const int width  = image.cols - descRadius;
        int32_t val;

        for( int y = descRadius; y < height; y++ )
        {
            for( int x = descRadius; x < width; x++ )
            {
                val = image.at< int32_t >( y, x );
                if( val <= 0 ) { continue; }

                outVec.push_back( make_pair( x, y ) );
            }
        }

        return outVec;
    }
Пример #2
0
bool libfacerec::isSymmetric(InputArray src, double eps) {
    Mat m = src.getMat();
    switch (m.type()) {
    case CV_8SC1: return isSymmetric_<char>(m); break;
    case CV_8UC1:
        return isSymmetric_<unsigned char>(m); break;
    case CV_16SC1:
        return isSymmetric_<short>(m); break;
    case CV_16UC1:
        return isSymmetric_<unsigned short>(m); break;
    case CV_32SC1:
        return isSymmetric_<int>(m); break;
    case CV_32FC1:
        return isSymmetric_<float>(m, eps); break;
    case CV_64FC1:
        return isSymmetric_<double>(m, eps); break;
    default:
        break;
    }
    return false;
}
Пример #3
0
int Dictionary::getDistanceToId(InputArray bits, int id, bool allRotations) const {

    CV_Assert(id >= 0 && id < bytesList.rows);

    unsigned int nRotations = 4;
    if(!allRotations) nRotations = 1;

    Mat candidateBytes = getByteListFromBits(bits.getMat());
    int currentMinDistance = int(bits.total() * bits.total());
    for(unsigned int r = 0; r < nRotations; r++) {
        int currentHamming = cv::hal::normHamming(
                bytesList.ptr(id) + r*candidateBytes.cols,
                candidateBytes.ptr(),
                candidateBytes.cols);

        if(currentHamming < currentMinDistance) {
            currentMinDistance = currentHamming;
        }
    }
    return currentMinDistance;
}
Пример #4
0
void removeBorder(InputArray _src, OutputArray _dst, int top, int bottom, int left, int right)
{
    CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 );

    Mat src = _src.getMat();
    int type = src.type();

    _dst.create( src.rows - top - bottom, src.cols - left - right, type );
    Mat dst = _dst.getMat();

//    if(top == 0 && left == 0 && bottom == 0 && right == 0)
//    {
//        if(src.data != dst.data || src.step != dst.step)
//            src.copyTo(dst);
//        return;
//    }

    if(src.data != dst.data || src.step != dst.step)
        src(Range(top, src.rows - bottom), Range(left, src.cols-right)).copyTo(dst);
    return;
}
Пример #5
0
void Fisherfaces::predict(InputArray _src, Ptr<PredictCollector> collector) const {
    Mat src = _src.getMat();
    // check data alignment just for clearer exception messages
    if(_projections.empty()) {
        // throw error if no data (or simply return -1?)
        String error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?";
        CV_Error(Error::StsBadArg, error_message);
    } else if(src.total() != (size_t) _eigenvectors.rows) {
        String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
        CV_Error(Error::StsBadArg, error_message);
    }
    // project into LDA subspace
    Mat q = LDA::subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
    // find 1-nearest neighbor
    collector->init((int)_projections.size());
    for (size_t sampleIdx = 0; sampleIdx < _projections.size(); sampleIdx++) {
        double dist = norm(_projections[sampleIdx], q, NORM_L2);
        int label = _labels.at<int>((int)sampleIdx);
        if (!collector->collect(label, dist))return;
    }
}
Пример #6
0
    void diffSign(InputArray _src1, OutputArray _src2, OutputArray _dst)
    {
        CV_OCL_RUN(_dst.isUMat(),
                   ocl_diffSign(_src1, _src2, _dst))

        Mat src1 = _src1.getMat(), src2 = _src2.getMat();
        _dst.create(src1.size(), src1.type());
        Mat dst = _dst.getMat();

        const int count = src1.cols * src1.channels();

        for (int y = 0; y < src1.rows; ++y)
        {
            const float * const src1Ptr = src1.ptr<float>(y);
            const float * const src2Ptr = src2.ptr<float>(y);
            float* dstPtr = dst.ptr<float>(y);

            for (int x = 0; x < count; ++x)
                dstPtr[x] = diffSign(src1Ptr[x], src2Ptr[x]);
        }
    }
Пример #7
0
void
isotropicPreconditionerFromPoints( InputArray _points,
                                   OutputArray _T )
{
  const Mat points = _points.getMat();
  const int depth = points.depth();
  CV_Assert((points.dims == 2 || points.dims == 3) && (depth == CV_32F || depth == CV_64F));

  _T.create(3, 3, depth);

  Mat T = _T.getMat();

  if ( depth == CV_32F )
  {
    isotropicPreconditionerFromPoints<float>(points, T);
  }
  else
  {
    isotropicPreconditionerFromPoints<double>(points, T);
  }
}
Пример #8
0
// calculates length of a curve (e.g. contour perimeter)
double cv::arcLength( InputArray _curve, bool is_closed )
{
    Mat curve = _curve.getMat();
    int count = curve.checkVector(2);
    int depth = curve.depth();
    CV_Assert( count >= 0 && (depth == CV_32F || depth == CV_32S));
    double perimeter = 0;

    int i, j = 0;
    const int N = 16;
    float buf[N];

    if( count <= 1 )
        return 0.;

    bool is_float = depth == CV_32F;
    int last = is_closed ? count-1 : 0;
    const Point* pti = (const Point*)curve.data;
    const Point2f* ptf = (const Point2f*)curve.data;

    Point2f prev = is_float ? ptf[last] : Point2f((float)pti[last].x,(float)pti[last].y);

    for( i = 0; i < count; i++ )
    {
        Point2f p = is_float ? ptf[i] : Point2f((float)pti[i].x,(float)pti[i].y);
        float dx = p.x - prev.x, dy = p.y - prev.y;
        buf[j] = dx*dx + dy*dy;

        if( ++j == N || i == count-1 )
        {
            Mat bufmat(1, j, CV_32F, buf);
            sqrt(bufmat, bufmat);
            for( ; j > 0; j-- )
                perimeter += buf[j-1];
        }
        prev = p;
    }

    return perimeter;
}
Пример #9
0
void guiBilateralUpsample(InputArray srcimage, OutputArray dest, int resizeFactor)
{
	string windowName = "bilateral";
	namedWindow(windowName);
	Mat src = srcimage.getMat();

	int alpha = 0; createTrackbar("a",windowName, &alpha, 100);

	int r = 3; createTrackbar("r",windowName, &r, 30);
	int sc = 30; createTrackbar("sigma_color",windowName, &sc, 255);
	int ss = 30; createTrackbar("sigma_space",windowName, &ss, 255);
	int iter = 3; createTrackbar("iteration",windowName, &iter, 10);

	int key = 0;
	while(key!='q')
	{
		Mat srctemp;
		src.copyTo(srctemp);
		for(int i=0;i<iter;i++)
		{
			Mat tmp;
			bilateralFilter(srctemp, tmp, 2*r+1, sc, ss, BORDER_REPLICATE);
			tmp.copyTo(srctemp);
		}

		alphaBlend(srcimage, srctemp, alpha/100.0, srctemp);
		

		resize(srctemp, dest, Size(src.cols*resizeFactor, src.rows*resizeFactor), 0,0, INTER_CUBIC);

		imshow(windowName, dest);
		key = waitKey(30);
		if(key=='f')
		{
			alpha = (alpha != 0) ? 100:0;
			setTrackbarPos("a", windowName, alpha);
		}
	}
	destroyWindow(windowName);
}
Пример #10
0
	void guiContrast(InputArray src_)
	{
		string window_name = "contrast";
		Mat src = src_.getMat();
		namedWindow(window_name);
		int a = 10;
		int b = 0;
		cv::createTrackbar("a/10", window_name, &a, 1024);
		cv::createTrackbar("b", window_name, &b, 256);
		int key = 0;
		cv::Mat show;
		while (key != 'q')
		{
			show = a / 10.0*src + b;
			imshow(window_name, show);
			key = waitKey(33);

			if (key == 'l')
			{
				a--;
				setTrackbarPos("a/10", window_name, a);
			}
			if (key == 'j')
			{
				a++;
				setTrackbarPos("a/10", window_name, a);
			}
			if (key == 'i')
			{
				b++;
				setTrackbarPos("b", window_name, b);
			}
			if (key == 'k')
			{
				b--;
				setTrackbarPos("b", window_name, b);
			}
		}
		destroyWindow(window_name);
	}
Пример #11
0
void Mat::copyTo( OutputArray _dst, InputArray _mask ) const
{
    Mat mask = _mask.getMat();
    if( !mask.data )
    {
        copyTo(_dst);
        return;
    }

    int cn = channels(), mcn = mask.channels();
    CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) );
    bool colorMask = mcn > 1;

    size_t esz = colorMask ? elemSize1() : elemSize();
    BinaryFunc copymask = getCopyMaskFunc(esz);

    uchar* data0 = _dst.getMat().data;
    _dst.create( dims, size, type() );
    Mat dst = _dst.getMat();

    if( dst.data != data0 ) // do not leave dst uninitialized
        dst = Scalar(0);

    if( dims <= 2 )
    {
        CV_Assert( size() == mask.size() );
        Size sz = getContinuousSize(*this, dst, mask, mcn);
        copymask(data, step, mask.data, mask.step, dst.data, dst.step, sz, &esz);
        return;
    }

    const Mat* arrays[] = { this, &dst, &mask, 0 };
    uchar* ptrs[3];
    NAryMatIterator it(arrays, ptrs);
    Size sz((int)(it.size*mcn), 1);

    for( size_t i = 0; i < it.nplanes; i++, ++it )
        copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz);
}
Пример #12
0
// gliese581h suggested filling a cv::Mat with descriptors to enable BFmatcher compatibility
// speed-ups and enhancements by gliese581h
void LUCIDImpl::compute(InputArray _src, std::vector<KeyPoint> &keypoints, OutputArray _desc) {
    cv::Mat src_input = _src.getMat();
    if (src_input.empty())
        return;
    CV_Assert(src_input.depth() == CV_8U && src_input.channels() == 3);

    Mat_<Vec3b> src;

    blur(src_input, src, cv::Size(b_kernel, b_kernel));

    int x, y, j, d, p, m = (l_kernel*2+1)*(l_kernel*2+1)*3, width = src.cols, height = src.rows, r, c;

    Mat_<uchar> desc(static_cast<int>(keypoints.size()), m);

    for (std::size_t i = 0; i < keypoints.size(); ++i) {
        x = static_cast<int>(keypoints[i].pt.x)-l_kernel, y = static_cast<int>(keypoints[i].pt.y)-l_kernel, d = x+2*l_kernel, p = y+2*l_kernel, j = x, r = static_cast<int>(i), c = 0;

        while (x <= d) {
            Vec3b &pix = src((y < 0 ? height+y : y >= height ? y-height : y), (x < 0 ? width+x : x >= width ? x-width : x));

            desc(r, c++) = pix[0];
            desc(r, c++) = pix[1];
            desc(r, c++) = pix[2];

            ++x;
            if (x > d) {
                if (y < p) {
                    ++y;
                    x = j;
                }
                else
                    break;
            }
        }
    }

    if (_desc.needed())
        sort(desc, _desc, SORT_EVERY_ROW | SORT_ASCENDING);
}
    FeatureValue FeatureShiCorner::genDescriptor( InputArray _image, vector< pair< int, int > > points )
    {
        //TODO: finish

        auto out = FeatureValue();

        for( auto point : points )
        {
            // every kSize x kSize a new feature
            const int kSize   = descRadius * 2 + 1;
            const auto image  = _image.getMat();
            const auto height = image.rows;
            const auto width  = image.cols;
            auto x = point.first;
            auto y = point.second;

            for( int fy = -kSize; fy <= kSize; fy++ )
            {
                for( int fx = -kSize; fx <= kSize; fx++ )
                {
                    int xD = (x + fx);
                    int yD = (y + fy);

                    // not really needed since genPoints removes corners too
                    // to close to the image edges
                    if( (xD < 0) || (xD >= width) || (yD < 0) || (yD >= height) )
                    {
                        out.push_back( 0 );
                    }
                    else
                    {
                        out.push_back( (double) image.at< int32_t >( yD, xD ) );
                    }
                }
            }
        }

        return out;
    }
Пример #14
0
void BriefDescriptorExtractor::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
    // Construct integral image for fast smoothing (box filter)
    Mat sum;

    Mat grayImage = image.getMat();
    if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );

    ///TODO allow the user to pass in a precomputed integral image
    //if(image.type() == CV_32S)
    //  sum = image;
    //else

    integral( grayImage, sum, CV_32S);

    //Remove keypoints very close to the border
    KeyPointsFilter::runByImageBorder(keypoints, image.size(), PATCH_SIZE/2 + KERNEL_SIZE/2);

    descriptors.create((int)keypoints.size(), bytes_, CV_8U);
    descriptors.setTo(Scalar::all(0));
    test_fn_(sum, keypoints, descriptors);
}
Пример #15
0
void cv::GlBuffer::copyFrom(InputArray mat_)
{
#ifndef HAVE_OPENGL
    (void)mat_;
    throw_nogl;
#else
    int kind = mat_.kind();
    Size _size = mat_.size();
    int _type = mat_.type();

    create(_size, _type);

    switch (kind)
    {
    case _InputArray::OPENGL_BUFFER:
        {
            GlBuffer buf = mat_.getGlBuffer();
            *this = buf;
            break;
        }
    case _InputArray::GPU_MAT:
        {
            #if !defined HAVE_CUDA || defined(CUDA_DISABLER)
                throw_nocuda;
            #else
                GpuMat d_mat = mat_.getGpuMat();
                impl_->copyFrom(d_mat);
            #endif

            break;
        }
    default:
        {
            Mat mat = mat_.getMat();
            impl_->copyFrom(mat, usage_);
        }
    }
#endif
}
Пример #16
0
 static std::vector<Mat> extractMatVector(InputArray in)
 {
     if (in.isMat() || in.isUMat())
     {
         return std::vector<Mat>(1, in.getMat());
     }
     else if (in.isMatVector())
     {
         return *static_cast<const std::vector<Mat>*>(in.getObj());
     }
     else if (in.isUMatVector())
     {
         std::vector<Mat> vmat;
         in.getMatVector(vmat);
         return vmat;
     }
     else
     {
         CV_Assert(in.isMat() || in.isMatVector() || in.isUMat() || in.isUMatVector());
         return std::vector<Mat>();
     }
 }
Пример #17
0
static Mat spatial_histogram(InputArray _src, int numPatterns, int grid_x, int grid_y, bool /*normed*/)
{
    Mat src    = _src.getMat();

    // calculate LBP patch size
    int width  = src.cols/grid_x;
    int height = src.rows/grid_y;

    // allocate memory for the spatial histogram
    Mat result = Mat::zeros(grid_x * grid_y, numPatterns, CV_32FC1);

    // return matrix with zeros if no data was given
    if (src.empty())
        return result.reshape(1,1);

    // initial result_row
    int resultRowIdx = 0;

    // iterate through grid

    for (int i = 0; i < grid_y; i++)
    {
        for (int j = 0; j < grid_x; j++)
        {
            Mat src_cell   = Mat(src, Range(i*height,(i+1)*height), Range(j*width,(j+1)*width));
            Mat cell_hist  = histc(src_cell, 0, (numPatterns-1), true);

            // copy to the result matrix
            Mat result_row = result.row(resultRowIdx);
            cell_hist.reshape(1,1).convertTo(result_row, CV_32FC1);

            // increase row count in result matrix
            resultRowIdx++;
        }
    }

    // return result as reshaped feature vector
    return result.reshape(1,1);
}
Пример #18
0
void cv::viz::writeTrajectory(InputArray _traj, const String& files_format, int start, const String& tag)
{
    if (_traj.kind() == _InputArray::STD_VECTOR_MAT)
    {
#if CV_MAJOR_VERSION < 3
        std::vector<Mat>& v = *(std::vector<Mat>*)_traj.obj;
#else
        std::vector<Mat>& v = *(std::vector<Mat>*)_traj.getObj();
#endif

        for(size_t i = 0, index = max(0, start); i < v.size(); ++i, ++index)
        {
            Affine3d affine;
            Mat pose = v[i];
            CV_Assert(pose.type() == CV_32FC(16) || pose.type() == CV_64FC(16));
            pose.copyTo(affine.matrix);
            writePose(cv::format(files_format.c_str(), index), affine, tag);
        }
        return;
    }

    if (_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT)
    {
        CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));

        Mat traj = _traj.getMat();

        if (traj.depth() == CV_32F)
            for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index)
                writePose(cv::format(files_format.c_str(), index), traj.at<Affine3f>((int)i), tag);

        if (traj.depth() == CV_64F)
            for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index)
                writePose(cv::format(files_format.c_str(), index), traj.at<Affine3d>((int)i), tag);
        return;
    }

    CV_Error(Error::StsError, "Unsupported array kind");
}
Пример #19
0
void FeatureExtractorLch3D::extractBlockHist(InputArray iBlock, OutputArray oFeature) {
    const static float MAX_H = 180.0f;
    const static float MAX_S = 255.0f;
    const static float MAX_V = 255.0f;

    Mat block = iBlock.getMat();
    int w = block.size().width;
    int h = block.size().height;

    float h_step = MAX_H / _h_bin;
    float s_step = MAX_S / _s_bin;
    float v_step = MAX_V / _v_bin;
    int count = 0;

    oFeature.create(_dim, 1, CV_32FC1);
    Mat feature = oFeature.getMat();
    feature.setTo(0);

    for (int y=0; y<h; ++y) {
        unsigned char* ptr = block.ptr<unsigned char>(y);
        for (int x=0; x<w; ++x) {
            int xx =  3 * x;
            unsigned char h = ptr[xx];
            unsigned char s = ptr[xx + 1];
            unsigned char v = ptr[xx + 2];

            int hi = min((int) floor(h/h_step), _h_bin - 1);
            int si = min((int) floor(s/s_step), _s_bin - 1);
            int vi = min((int) floor(v/v_step), _v_bin - 1);
            int i = (vi << (_h_bit + _s_bit)) + (si << _h_bit) + hi;
            ++feature.at<float>(i);
            ++count;
        }
    }
    for (int i=0; i < _dim; ++i) {
        feature.at<float>(i) /= count;
    }
}
Пример #20
0
void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,
                                      float h, float hForColorComponents,
                                      int templateWindowSize, int searchWindowSize)
{
    int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);

    if (type != CV_8UC3 && type != CV_8UC4)
    {
        CV_Error(Error::StsBadArg, "Type of input image should be CV_8UC3!");
        return;
    }

    CV_OCL_RUN(_src.dims() <= 2 && (_dst.isUMat() || _src.isUMat()),
                ocl_fastNlMeansDenoisingColored(_src, _dst, h, hForColorComponents,
                                                templateWindowSize, searchWindowSize))

    Mat src = _src.getMat();
    _dst.create(src.size(), type);
    Mat dst = _dst.getMat();

    Mat src_lab;
    cvtColor(src, src_lab, COLOR_LBGR2Lab);

    Mat l(src.size(), CV_8U);
    Mat ab(src.size(), CV_8UC2);
    Mat l_ab[] = { l, ab };
    int from_to[] = { 0,0, 1,1, 2,2 };
    mixChannels(&src_lab, 1, l_ab, 2, from_to, 3);

    fastNlMeansDenoising(l, l, h, templateWindowSize, searchWindowSize);
    fastNlMeansDenoising(ab, ab, hForColorComponents, templateWindowSize, searchWindowSize);

    Mat l_ab_denoised[] = { l, ab };
    Mat dst_lab(src.size(), CV_MAKE_TYPE(depth, 3));
    mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3);

    cvtColor(dst_lab, dst, COLOR_Lab2LBGR, cn);
}
Пример #21
0
//returns the point of the brightest pixel, on the channel of the passed color
Point findLED(InputArray _imgFrame, int colorChnl){

    Mat imgFrame = _imgFrame.getMat();

    double minVal; double maxVal; Point minLoc; Point maxLoc;
    Point pt;

/* //measures by total intensity not individual color channels...wrong datatype here, doesn't work
    Vec3b intensity = frame.at<Vec3b>(0, 0);
    uchar blue = intensity.val[0];
    uchar green = intensity.val[1];
    uchar red = intensity.val[2];
*/

    //separate image into color channels
    vector<Mat> spl;
    split(imgFrame, spl);
    /*cout << "got here" << endl;*/
    minMaxLoc( spl[ colorChnl ], &minVal, &maxVal, &minLoc, &maxLoc);
/*cout << "not here" <<endl;*/

    return maxLoc;
}
Пример #22
0
Mat SpatialHistogramReco::spatial_histogram(InputArray _src) const {
    Mat src = _src.getMat();
    if(src.empty())
        return Mat();

    // calculate patch size
    int width  = src.cols/_grid_x;
    int height = src.rows/_grid_y;

    Mat result = Mat::zeros(0, 0, hist_type);
    // iterate through grid
    for(int i = 0; i < _grid_y; i++) {
        for(int j = 0; j < _grid_x; j++) {
            Mat src_cell(src, Range(i*height,(i+1)*height), Range(j*width,(j+1)*width));
            Mat hist = Mat::zeros(1,hist_len,hist_type);

            oper(src_cell,hist);

            result.push_back(hist);
        }
    }
    return result;
}
Пример #23
0
void show_hist(InputArray img) {

    int height = 200;

    Mat hist(height, 256, CV_8UC3, Scalar(255,255,255));

    vector<int> bins (255);

    int max = 0;
    for (int i = 0; i < img.size().height * img.size().width ; i++) {
        auto val = img.getMat().data[i];
        bins[val] ++;
        if (bins[val] > max) {
            max = bins[val];
        }
    }

    for (int i; i <= 255; i++) {
        line(hist, Point(i,height), Point(i, height - (int)(bins[i] * (double)(height)/max) ), Scalar(0,0,0), 1);
    }

    imshow("Histo", hist);
}
Пример #24
0
void cv::viz::vtkImageMatSource::SetImage(InputArray _image)
{
    CV_Assert(_image.depth() == CV_8U && (_image.channels() == 1 || _image.channels() == 3 || _image.channels() == 4));

    Mat image = _image.getMat();

    this->ImageData->SetDimensions(image.cols, image.rows, 1);
#if VTK_MAJOR_VERSION <= 5
    this->ImageData->SetNumberOfScalarComponents(image.channels());
    this->ImageData->SetScalarTypeToUnsignedChar();
    this->ImageData->AllocateScalars();
#else
    this->ImageData->AllocateScalars(VTK_UNSIGNED_CHAR, image.channels());
#endif

    switch(image.channels())
    {
    case 1: copyGrayImage(image, this->ImageData); break;
    case 3: copyRGBImage (image, this->ImageData); break;
    case 4: copyRGBAImage(image, this->ImageData); break;
    }
    this->ImageData->Modified();
}
Пример #25
0
void IPPE::IPPERot2vec(InputArray _R, OutputArray _r)
{
    cv::Mat R = _R.getMat();
    cv::Mat rvec = _r.getMat();
    double trace = R.at<double>(0,0) + R.at<double>(1,1) + R.at<double>(2,2);
    double w_norm = acos((trace-1.0)/2.0);
    double c0,c1,c2;
    double eps =  std::numeric_limits<double>::epsilon();
    double d =  1/(2*sin(w_norm))*w_norm;
    if (w_norm < eps) //rotation is the identity
    {
        rvec.setTo(0);
    }
    else
    {
        c0 = R.at<double>(2,1)-R.at<double>(1,2);
        c1 = R.at<double>(0,2)-R.at<double>(2,0);
        c2 = R.at<double>(1,0)-R.at<double>(0,1);
        rvec.at<double>(0) = d*c0;
        rvec.at<double>(1) = d*c1;
        rvec.at<double>(2) = d*c2;
    }
}
Пример #26
0
void LBPH::predict(InputArray _src, Ptr<PredictCollector> collector) const {
    if(_histograms.empty()) {
        // throw error if no data (or simply return -1?)
        String error_message = "This LBPH model is not computed yet. Did you call the train method?";
        CV_Error(Error::StsBadArg, error_message);
    }
    Mat src = _src.getMat();
    // get the spatial histogram from input image
    Mat lbp_image = elbp(src, _radius, _neighbors);
    Mat query = spatial_histogram(
            lbp_image, /* lbp_image */
            static_cast<int>(std::pow(2.0, static_cast<double>(_neighbors))), /* number of possible patterns */
            _grid_x, /* grid size x */
            _grid_y, /* grid size y */
            true /* normed histograms */);
    // find 1-nearest neighbor
    collector->init((int)_histograms.size());
    for (size_t sampleIdx = 0; sampleIdx < _histograms.size(); sampleIdx++) {
        double dist = compareHist(_histograms[sampleIdx], query, HISTCMP_CHISQR_ALT);
        int label = _labels.at<int>((int)sampleIdx);
        if (!collector->collect(label, dist))return;
    }
}
Пример #27
0
void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask, double learningRate)
{
    Mat image = _image.getMat();
    bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;

    if( needToInitialize )
        initialize(image.size(), image.type());

    CV_Assert( image.depth() == CV_8U );
    _fgmask.create( image.size(), CV_8U );
    Mat fgmask = _fgmask.getMat();

    ++nframes;
    learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./min( nframes, history );
    CV_Assert(learningRate >= 0);

    if( image.type() == CV_8UC1 )
        process8uC1( image, fgmask, learningRate, bgmodel, nmixtures, backgroundRatio, varThreshold, noiseSigma );
    else if( image.type() == CV_8UC3 )
        process8uC3( image, fgmask, learningRate, bgmodel, nmixtures, backgroundRatio, varThreshold, noiseSigma );
    else
        CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" );
}
Пример #28
0
Vec2d EM::predict(InputArray _sample, OutputArray _probs) const
{
    Mat sample = _sample.getMat();
    CV_Assert(isTrained());

    CV_Assert(!sample.empty());
    if(sample.type() != CV_64FC1)
    {
        Mat tmp;
        sample.convertTo(tmp, CV_64FC1);
        sample = tmp;
    }
    sample.reshape(1, 1);

    Mat probs;
    if( _probs.needed() )
    {
        _probs.create(1, nclusters, CV_64FC1);
        probs = _probs.getMat();
    }

    return computeProbabilities(sample, !probs.empty() ? &probs : 0);
}
Пример #29
0
//------------------------------------------------------------------------------
// cv::elbp
//------------------------------------------------------------------------------
template <typename _Tp> static
inline void elbp_(InputArray _src, OutputArray _dst, int radius, int neighbors) {
    //get matrices
    Mat src = _src.getMat();
    // allocate memory for result
    _dst.create(src.rows-2*radius, src.cols-2*radius, CV_32SC1);
    Mat dst = _dst.getMat();
    // zero
    dst.setTo(0);
    for(int n=0; n<neighbors; n++) {
        // sample points
        float x = static_cast<float>(radius * cos(2.0*CV_PI*n/static_cast<float>(neighbors)));
        float y = static_cast<float>(-radius * sin(2.0*CV_PI*n/static_cast<float>(neighbors)));
        // relative indices
        int fx = static_cast<int>(floor(x));
        int fy = static_cast<int>(floor(y));
        int cx = static_cast<int>(ceil(x));
        int cy = static_cast<int>(ceil(y));
        // fractional part
        float ty = y - fy;
        float tx = x - fx;
        // set interpolation weights
        float w1 = (1 - tx) * (1 - ty);
        float w2 =      tx  * (1 - ty);
        float w3 = (1 - tx) *      ty;
        float w4 =      tx  *      ty;
        // iterate through your data
        for(int i=radius; i < src.rows-radius;i++) {
            for(int j=radius;j < src.cols-radius;j++) {
                // calculate interpolated value
                float t = static_cast<float>(w1*src.at<_Tp>(i+fy,j+fx) + w2*src.at<_Tp>(i+fy,j+cx) + w3*src.at<_Tp>(i+cy,j+fx) + w4*src.at<_Tp>(i+cy,j+cx));
                // floating point precision, so check some machine-dependent epsilon
                dst.at<int>(i-radius,j-radius) += ((t > src.at<_Tp>(i,j)) || (std::abs(t-src.at<_Tp>(i,j)) < std::numeric_limits<float>::epsilon())) << n;
            }
        }
    }
}
Пример #30
0
void BackgroundSubtractorKNNImpl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
{
    Mat image = _image.getMat();
    bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;

    if( needToInitialize )
        initialize(image.size(), image.type());

    _fgmask.create( image.size(), CV_8U );
    Mat fgmask = _fgmask.getMat();

    ++nframes;
    learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history );
    CV_Assert(learningRate >= 0);

    //parallel_for_(Range(0, image.rows),
    //              KNNInvoker(image, fgmask,
    icvUpdatePixelBackgroundNP(image, fgmask,
            bgmodel,
            nNextLongUpdate,
            nNextMidUpdate,
            nNextShortUpdate,
            aModelIndexLong,
            aModelIndexMid,
            aModelIndexShort,
            nLongCounter,
            nMidCounter,
            nShortCounter,
            nN,
            (float)learningRate,
            fTb,
            nkNN,
            fTau,
            bShadowDetection,
            nShadowDetection
            );
}