// Computes the shadows occlusion where we cannot reconstruct the model
void GrayCodePattern_Impl::computeShadowMasks( InputArrayOfArrays blackImages, InputArrayOfArrays whiteImages,
                                               OutputArrayOfArrays shadowMasks ) const
{
  std::vector<Mat>& whiteImages_ = *( std::vector<Mat>* ) whiteImages.getObj();
  std::vector<Mat>& blackImages_ = *( std::vector<Mat>* ) blackImages.getObj();
  std::vector<Mat>& shadowMasks_ = *( std::vector<Mat>* ) shadowMasks.getObj();

  shadowMasks_.resize( whiteImages_.size() );

  int cam_width = whiteImages_[0].cols;
  int cam_height = whiteImages_[0].rows;

  // TODO: parallelize for
  for( int k = 0; k < (int) shadowMasks_.size(); k++ )
  {
    shadowMasks_[k] = Mat( cam_height, cam_width, CV_8U );
    for( int i = 0; i < cam_width; i++ )
    {
      for( int j = 0; j < cam_height; j++ )
      {
        double white = whiteImages_[k].at<uchar>( Point( i, j ) );
        double black = blackImages_[k].at<uchar>( Point( i, j ) );

        if( abs(white - black) > blackThreshold )
        {
          shadowMasks_[k].at<uchar>( Point( i, j ) ) = ( uchar ) 1;
        }
        else
        {
          shadowMasks_[k].at<uchar>( Point( i, j ) ) = ( uchar ) 0;
        }
      }
    }
  }
}
Example #2
0
  virtual void run(InputArrayOfArrays _points2d)
  {
    std::vector<Mat> points2d;
    _points2d.getMatVector(points2d);
    CV_Assert( _points2d.total() >= 2 );

    // Parse 2d points to Tracks
    Tracks tracks;
    parser_2D_tracks(points2d, tracks);

    // Set libmv logs level
    libmv_initLogging("");

    if (libmv_reconstruction_options_.verbosity_level >= 0)
    {
      libmv_startDebugLogging();
      libmv_setLoggingVerbosity(
        libmv_reconstruction_options_.verbosity_level);
    }

    // Perform reconstruction
    libmv_reconstruction_ =
      *libmv_solveReconstruction(tracks,
                                 &libmv_camera_intrinsics_options_,
                                 &libmv_reconstruction_options_);
  }
Example #3
0
  //  Reconstruction function for API
  void
  reconstruct(const InputArrayOfArrays points2d, OutputArrayOfArrays projection_matrices, OutputArray points3d,
              bool is_projective, bool has_outliers, bool is_sequence)
  {

    int nviews = points2d.total();
    cv::Mat F;

    // OpenCV data types
    std::vector<cv::Mat> pts2d;
    points2d.getMatVector(pts2d);
    int depth = pts2d[0].depth();

    // Projective reconstruction

    if (is_projective)
    {

      // Two view reconstruction

      if (nviews == 2)
      {

        // Get fundamental matrix
        fundamental8Point(pts2d[0], pts2d[1], F, has_outliers);

        // Get Projection matrices
        cv::Mat P, Pp;
        projectionsFromFundamental(F, P, Pp);
        projection_matrices.create(2, 1, depth);
        P.copyTo(projection_matrices.getMatRef(0));
        Pp.copyTo(projection_matrices.getMatRef(1));

        //  Triangulate and find 3D points using inliers
        triangulatePoints(points2d, projection_matrices, points3d);
      }
    }

    // Affine reconstruction

    else
    {

      // Two view reconstruction

      if (nviews == 2)
      {

      }
      else
      {

      }

    }

  }
Example #4
0
 void descriptorExtractor::extract(InputArrayOfArrays inputimg, OutputArray feature, String feature_blob)
 {
     if (net_ready)
     {
         Blob<float>* input_layer = convnet->input_blobs()[0];
         input_layer->Reshape(1, num_channels,
         input_geometry.height, input_geometry.width);
         /* Forward dimension change to all layers. */
         convnet->Reshape();
         std::vector<cv::Mat> input_channels;
         wrapInput(&input_channels);
         if (inputimg.kind() == 65536)
         {/* this is a Mat */
             Mat img = inputimg.getMat();
             preprocess(img, &input_channels);
             convnet->ForwardPrefilled();
             /* Copy the output layer to a std::vector */
             Blob<float>* output_layer = convnet->blob_by_name(feature_blob).get();
             const float* begin = output_layer->cpu_data();
             const float* end = begin + output_layer->channels();
             std::vector<float> featureVec = std::vector<float>(begin, end);
             cv::Mat feature_mat = cv::Mat(featureVec, true).t();
             feature_mat.copyTo(feature);
         }
         else
         {/* This is a vector<Mat> */
             vector<Mat> img;
             inputimg.getMatVector(img);
             Mat feature_vector;
             for (unsigned int i = 0; i < img.size(); ++i)
             {
                 preprocess(img[i], &input_channels);
                 convnet->ForwardPrefilled();
                 /* Copy the output layer to a std::vector */
                 Blob<float>* output_layer = convnet->blob_by_name(feature_blob).get();
                 const float* begin = output_layer->cpu_data();
                 const float* end = begin + output_layer->channels();
                 std::vector<float> featureVec = std::vector<float>(begin, end);
                 if (i == 0)
                 {
                     feature_vector = cv::Mat(featureVec, true).t();
                     int dim_feature = feature_vector.cols;
                     feature_vector.resize(img.size(), dim_feature);
                 }
                 feature_vector.row(i) = cv::Mat(featureVec, true).t();
             }
             feature_vector.copyTo(feature);
         }
     }
     else
       std::cout << "Device must be set properly using constructor and the net must be set in advance using loadNet.";
 };
Example #5
0
static void collectCalibrationData( InputArrayOfArrays objectPoints,
                                    InputArrayOfArrays imagePoints1,
                                    InputArrayOfArrays imagePoints2,
                                    Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
                                    Mat& npoints )
{
    int nimages = (int)objectPoints.total();
    int i, j = 0, ni = 0, total = 0;
    CV_Assert(nimages > 0 && nimages == (int)imagePoints1.total() &&
        (!imgPtMat2 || nimages == (int)imagePoints2.total()));

	cout << " Number of Frames:  " << nimages << endl;
    for( i = 0; i < nimages; i++ )
    {
		cout << endl << "Object Points: " << endl;
		printMatOBJ(objectPoints.getMat(i));
		cout << endl << "Image Points: " << endl;
		printMatIMG(imagePoints1.getMat(i));
        ni = objectPoints.getMat(i).checkVector(3, CV_32F);
        CV_Assert( ni >= 0 );
        total += ni;
    }

    npoints.create(1, (int)nimages, CV_32S);
    objPtMat.create(1, (int)total, CV_32FC3);
    imgPtMat1.create(1, (int)total, CV_32FC2);
    Point2f* imgPtData2 = 0;

    if( imgPtMat2 )
    {
        imgPtMat2->create(1, (int)total, CV_32FC2);
        imgPtData2 = imgPtMat2->ptr<Point2f>();
    }

    Point3f* objPtData = objPtMat.ptr<Point3f>();
    Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();

    for( i = 0; i < nimages; i++, j += ni )
    {
        Mat objpt = objectPoints.getMat(i);
        Mat imgpt1 = imagePoints1.getMat(i);
        ni = objpt.checkVector(3, CV_32F);
        int ni1 = imgpt1.checkVector(2, CV_32F);
        CV_Assert( ni > 0 && ni == ni1 );
        npoints.at<int>(i) = ni;
        memcpy( objPtData + j, objpt.data, ni*sizeof(objPtData[0]) );
        memcpy( imgPtData1 + j, imgpt1.data, ni*sizeof(imgPtData1[0]) );

        if( imgPtData2 )
        {
            Mat imgpt2 = imagePoints2.getMat(i);
            int ni2 = imgpt2.checkVector(2, CV_32F);
            CV_Assert( ni == ni2 );
            memcpy( imgPtData2 + j, imgpt2.data, ni*sizeof(imgPtData2[0]) );
        }
    }
}
Example #6
0
static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0) {
    // make sure the input data is a vector of matrices or vector of vector
    if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
        String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
        CV_Error(Error::StsBadArg, error_message);
    }
    // number of samples
    size_t n = src.total();
    // return empty matrix if no matrices given
    if(n == 0)
        return Mat();
    // dimensionality of (reshaped) samples
    size_t d = src.getMat(0).total();
    // create data matrix
    Mat data((int)n, (int)d, rtype);
    // now copy data
    for(unsigned int i = 0; i < n; i++) {
        // make sure data can be reshaped, throw exception if not!
        if(src.getMat(i).total() != d) {
            String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total());
            CV_Error(Error::StsBadArg, error_message);
        }
        // get a hold of the current row
        Mat xi = data.row(i);
        // make reshape happy by cloning for non-continuous matrices
        if(src.getMat(i).isContinuous()) {
            src.getMat(i).reshape(1, 1).convertTo(xi, rtype, alpha, beta);
        } else {
            src.getMat(i).clone().reshape(1, 1).convertTo(xi, rtype, alpha, beta);
        }
    }
    return data;
}
Example #7
0
//------------------------------------------------------------------------------
// libfacerec::asColumnMatrix
//------------------------------------------------------------------------------
Mat libfacerec::asColumnMatrix(InputArrayOfArrays src, int rtype, double alpha, double beta) {
    // make sure the input data is a vector of matrices or vector of vector
    if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
        CV_Error(CV_StsBadArg, "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).");
    }
    int n = (int) src.total();
    // return empty matrix if no data given
    if(n == 0)
        return Mat();
    // dimensionality of samples
    int d = src.getMat(0).total();
    // create data matrix
    Mat data(d, n, rtype);
    // now copy data
    for(int i = 0; i < n; i++) {
        // make sure data can be reshaped, throw exception if not!
        if(src.getMat(i).total() != d) {
            string error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total());
            CV_Error(CV_StsBadArg, error_message);
        }
        // get a hold of the current row
        Mat yi = data.col(i);
        // make reshape happy by cloning for non-continuous matrices
        if(src.getMat(i).isContinuous()) {
            src.getMat(i).reshape(1, d).convertTo(yi, rtype, alpha, beta);
        } else {
            src.getMat(i).clone().reshape(1, d).convertTo(yi, rtype, alpha, beta);
        }
    }
    return data;
}
Example #8
0
//------------------------------------------------------------------------------
// Eigenfaces
//------------------------------------------------------------------------------
void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
    if(_src.total() == 0) {
        String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
        CV_Error(Error::StsBadArg, error_message);
    } else if(_local_labels.getMat().type() != CV_32SC1) {
        String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _local_labels.type());
        CV_Error(Error::StsBadArg, error_message);
    }
    // make sure data has correct size
    if(_src.total() > 1) {
        for(int i = 1; i < static_cast<int>(_src.total()); i++) {
            if(_src.getMat(i-1).total() != _src.getMat(i).total()) {
                String error_message = format("In the Eigenfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", _src.getMat(i-1).total(), _src.getMat(i).total());
                CV_Error(Error::StsUnsupportedFormat, error_message);
            }
        }
    }
    // get labels
    Mat labels = _local_labels.getMat();
    // observations in row
    Mat data = asRowMatrix(_src, CV_64FC1);

    // number of samples
   int n = data.rows;
    // assert there are as much samples as labels
    if(static_cast<int>(labels.total()) != n) {
        String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", n, labels.total());
        CV_Error(Error::StsBadArg, error_message);
    }
    // clear existing model data
    _labels.release();
    _projections.clear();
    // clip number of components to be valid
    if((_num_components <= 0) || (_num_components > n))
        _num_components = n;

    // perform the PCA
    PCA pca(data, Mat(), PCA::DATA_AS_ROW, _num_components);
    // copy the PCA results
    _mean = pca.mean.reshape(1,1); // store the mean vector
    _eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row
    transpose(pca.eigenvectors, _eigenvectors); // eigenvectors by column
    // store labels for prediction
    _labels = labels.clone();
    // save projections
    for(int sampleIdx = 0; sampleIdx < data.rows; sampleIdx++) {
        Mat p = subspaceProject(_eigenvectors, _mean, data.row(sampleIdx));
        _projections.push_back(p);
    }
}
Example #9
0
void cv::mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst,
                     const vector<int>& fromTo)
{
    if(fromTo.empty())
        return;
    int i, nsrc = (int)src.total(), ndst = (int)dst.total();
    CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0);
    cv::AutoBuffer<Mat> _buf(nsrc + ndst);
    Mat* buf = _buf;
    for( i = 0; i < nsrc; i++ )
        buf[i] = src.getMat(i);
    for( i = 0; i < ndst; i++ )
        buf[nsrc + i] = dst.getMat(i);
    mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, &fromTo[0], fromTo.size()/2);
}
Example #10
0
  void
  reconstruct(InputArrayOfArrays points2d, OutputArray Rs, OutputArray Ts, InputOutputArray K,
              OutputArray points3d, bool is_projective)
  {
    const int nviews = points2d.total();
    CV_Assert( nviews >= 2 );


    // Projective reconstruction

    if (is_projective)
    {

      // calls simple pipeline
      reconstruct_(points2d, Rs, Ts, K, points3d);

    }

    // Affine reconstruction

    else
    {
      // TODO: implement me
    }

  }
Example #11
0
void Feature2D::compute( InputArrayOfArrays _images,
                         std::vector<std::vector<KeyPoint> >& keypoints,
                         OutputArrayOfArrays _descriptors )
{
    CV_INSTRUMENT_REGION();

    if( !_descriptors.needed() )
        return;

    vector<Mat> images;

    _images.getMatVector(images);
    size_t i, nimages = images.size();

    CV_Assert( keypoints.size() == nimages );
    CV_Assert( _descriptors.kind() == _InputArray::STD_VECTOR_MAT );

    vector<Mat>& descriptors = *(vector<Mat>*)_descriptors.getObj();
    descriptors.resize(nimages);

    for( i = 0; i < nimages; i++ )
    {
        compute(images[i], keypoints[i], descriptors[i]);
    }
}
Example #12
0
void ACFFeatureEvaluatorImpl::setChannels(InputArrayOfArrays channels)
{
    channels_.clear();
    vector<Mat> ch;
    channels.getMatVector(ch);

    for( size_t i = 0; i < ch.size(); ++i )
    {
        const Mat &channel = ch[i];
        Mat_<int> acf_channel = Mat_<int>::zeros(channel.rows / 4, channel.cols / 4);
        for( int row = 0; row < channel.rows; row += 4 )
        {
            for( int col = 0; col < channel.cols; col += 4 )
            {
                int sum = 0;
                for( int cell_row = row; cell_row < row + 4; ++cell_row )
                    for( int cell_col = col; cell_col < col + 4; ++cell_col )
                        sum += (int)channel.at<float>(cell_row, cell_col);

                acf_channel(row / 4, col / 4) = sum;
            }
        }

        channels_.push_back(acf_channel.clone());
    }
}
Example #13
0
void LBPH::update(InputArrayOfArrays _in_src, InputArray _in_labels) {
    // got no data, just return
    if(_in_src.total() == 0)
        return;

    this->train(_in_src, _in_labels, true);
}
// For a (x,y) pixel of the camera returns the corresponding projector's pixel
bool GrayCodePattern_Impl::getProjPixel( InputArrayOfArrays patternImages, int x, int y, Point &projPix ) const
{
  std::vector<Mat>& _patternImages = *( std::vector<Mat>* ) patternImages.getObj();
  std::vector<uchar> grayCol;
  std::vector<uchar> grayRow;

  bool error = false;
  int xDec, yDec;

  // process column images
  for( size_t count = 0; count < numOfColImgs; count++ )
  {
    // get pixel intensity for regular pattern projection and its inverse
    double val1 = _patternImages[count * 2].at<uchar>( Point( x, y ) );
    double val2 = _patternImages[count * 2 + 1].at<uchar>( Point( x, y ) );

    // check if the intensity difference between the values of the normal and its inverse projection image is in a valid range
    if( abs(val1 - val2) < whiteThreshold )
      error = true;

    // determine if projection pixel is on or off
    if( val1 > val2 )
      grayCol.push_back( 1 );
    else
      grayCol.push_back( 0 );
  }

  xDec = grayToDec( grayCol );

  // process row images
  for( size_t count = 0; count < numOfRowImgs; count++ )
  {
    // get pixel intensity for regular pattern projection and its inverse
    double val1 = _patternImages[count * 2 + numOfColImgs * 2].at<uchar>( Point( x, y ) );
    double val2 = _patternImages[count * 2 + numOfColImgs * 2 + 1].at<uchar>( Point( x, y ) );

    // check if the intensity difference between the values of the normal and its inverse projection image is in a valid range
    if( abs(val1 - val2) < whiteThreshold )
      error = true;

    // determine if projection pixel is on or off
    if( val1 > val2 )
      grayRow.push_back( 1 );
    else
      grayRow.push_back( 0 );
  }

  yDec = grayToDec( grayRow );

  if( (yDec >= params.height || xDec >= params.width) )
  {
    error = true;
  }

  projPix.x = xDec;
  projPix.y = yDec;

  return error;
}
Example #15
0
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, InputArrayOfArrays masks)
{
    CV_INSTRUMENT_REGION();

    images.getUMatVector(imgs_);
    masks.getUMatVector(masks_);

    Status status;

    if ((status = matchImages()) != OK)
        return status;

    if ((status = estimateCameraParams()) != OK)
        return status;

    return OK;
}
Example #16
0
void FisherFaceRecognizer::update(InputArrayOfArrays _in_src, InputArray _inm_labels)
{
    // got no data, just return
    if (_in_src.total() == 0)
        return;

    this->train(_in_src, _inm_labels, true);
}
Example #17
0
void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserveData) {
    if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) {
        String error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
        CV_Error(Error::StsBadArg, error_message);
    }
    if(_in_src.total() == 0) {
        String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
        CV_Error(Error::StsUnsupportedFormat, error_message);
    } else if(_in_labels.getMat().type() != CV_32SC1) {
        String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type());
        CV_Error(Error::StsUnsupportedFormat, error_message);
    }
    // get the vector of matrices
    std::vector<Mat> src;
    _in_src.getMatVector(src);
    // get the label matrix
    Mat labels = _in_labels.getMat();
    // check if data is well- aligned
    if(labels.total() != src.size()) {
        String error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total());
        CV_Error(Error::StsBadArg, error_message);
    }
    // if this model should be trained without preserving old data, delete old model data
    if(!preserveData) {
        _labels.release();
        _histograms.clear();
    }
    // append labels to _labels matrix
    for(size_t labelIdx = 0; labelIdx < labels.total(); labelIdx++) {
        _labels.push_back(labels.at<int>((int)labelIdx));
    }
    // store the spatial histograms of the original data
    for(size_t sampleIdx = 0; sampleIdx < src.size(); sampleIdx++) {
        // calculate lbp image
        Mat lbp_image = elbp(src[sampleIdx], _radius, _neighbors);
        // get spatial histogram from this lbp image
        Mat p = spatial_histogram(
                lbp_image, /* lbp_image */
                static_cast<int>(std::pow(2.0, static_cast<double>(_neighbors))), /* number of possible patterns */
                _grid_x, /* grid size x */
                _grid_y, /* grid size y */
                true);
        // add to templates
        _histograms.push_back(p);
    }
}
Example #18
0
    void process(InputArrayOfArrays src, OutputArray dst, InputArray _times)
    {
        std::vector<Mat> images;
        src.getMatVector(images);
        Mat times = _times.getMat();

        CV_Assert(images.size() == times.total());
        checkImageDimensions(images);
        CV_Assert(images[0].depth() == CV_8U);

        int channels = images[0].channels();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);

        dst.create(LDR_SIZE, 1, CV_32FCC);
        Mat response = dst.getMat();
        response = linearResponse(3) / (LDR_SIZE / 2.0f);

        Mat card = Mat::zeros(LDR_SIZE, 1, CV_32FCC);
        for(size_t i = 0; i < images.size(); i++) {
           uchar *ptr = images[i].ptr();
           for(size_t pos = 0; pos < images[i].total(); pos++) {
               for(int c = 0; c < channels; c++, ptr++) {
                   card.at<Vec3f>(*ptr)[c] += 1;
               }
           }
        }
        card = 1.0 / card;

        Ptr<MergeRobertson> merge = createMergeRobertson();
        for(int iter = 0; iter < max_iter; iter++) {

            radiance = Mat::zeros(images[0].size(), CV_32FCC);
            merge->process(images, radiance, times, response);

            Mat new_response = Mat::zeros(LDR_SIZE, 1, CV_32FC3);
            for(size_t i = 0; i < images.size(); i++) {
                uchar *ptr = images[i].ptr();
                float* rad_ptr = radiance.ptr<float>();
                for(size_t pos = 0; pos < images[i].total(); pos++) {
                    for(int c = 0; c < channels; c++, ptr++, rad_ptr++) {
                        new_response.at<Vec3f>(*ptr)[c] += times.at<float>((int)i) * *rad_ptr;
                    }
                }
            }
            new_response = new_response.mul(card);
            for(int c = 0; c < 3; c++) {
                float middle = new_response.at<Vec3f>(LDR_SIZE / 2)[c];
                for(int i = 0; i < LDR_SIZE; i++) {
                    new_response.at<Vec3f>(i)[c] /= middle;
                }
            }
            float diff = static_cast<float>(sum(sum(abs(new_response - response)))[0] / channels);
            new_response.copyTo(response);
            if(diff < threshold) {
                break;
            }
        }
    }
Example #19
0
static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0)
{
    // number of samples
    int n = (int) src.total();
    // return empty matrix if no data given
    if(n == 0)
        return Mat();
    // dimensionality of samples
    int d = (int)src.getMat(0).total();
    // create data matrix
    Mat data(n, d, rtype);
    // copy data
    for(int i = 0; i < n; i++) {
        Mat xi = data.row(i);
        src.getMat(i).reshape(1, 1).convertTo(xi, rtype, alpha, beta);
    }
    return data;
}
Example #20
0
void cv::fastNlMeansDenoisingColoredMulti( InputArrayOfArrays _srcImgs, OutputArray _dst,
                                           int imgToDenoiseIndex, int temporalWindowSize,
                                           float h, float hForColorComponents,
                                           int templateWindowSize, int searchWindowSize)
{
    std::vector<Mat> srcImgs;
    _srcImgs.getMatVector(srcImgs);

    fastNlMeansDenoisingMultiCheckPreconditions(
        srcImgs, imgToDenoiseIndex,
        temporalWindowSize, templateWindowSize, searchWindowSize);

    _dst.create(srcImgs[0].size(), srcImgs[0].type());
    Mat dst = _dst.getMat();

    int src_imgs_size = static_cast<int>(srcImgs.size());

    if (srcImgs[0].type() != CV_8UC3)
    {
        CV_Error(Error::StsBadArg, "Type of input images should be CV_8UC3!");
        return;
    }

    int from_to[] = { 0,0, 1,1, 2,2 };

    // TODO convert only required images
    std::vector<Mat> src_lab(src_imgs_size);
    std::vector<Mat> l(src_imgs_size);
    std::vector<Mat> ab(src_imgs_size);
    for (int i = 0; i < src_imgs_size; i++)
    {
        src_lab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC3);
        l[i] = Mat::zeros(srcImgs[0].size(), CV_8UC1);
        ab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC2);
        cvtColor(srcImgs[i], src_lab[i], COLOR_LBGR2Lab);

        Mat l_ab[] = { l[i], ab[i] };
        mixChannels(&src_lab[i], 1, l_ab, 2, from_to, 3);
    }

    Mat dst_l;
    Mat dst_ab;

    fastNlMeansDenoisingMulti(
        l, dst_l, imgToDenoiseIndex, temporalWindowSize,
        h, templateWindowSize, searchWindowSize);

    fastNlMeansDenoisingMulti(
        ab, dst_ab, imgToDenoiseIndex, temporalWindowSize,
        hForColorComponents, templateWindowSize, searchWindowSize);

    Mat l_ab_denoised[] = { dst_l, dst_ab };
    Mat dst_lab(srcImgs[0].size(), srcImgs[0].type());
    mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3);

    cvtColor(dst_lab, dst, COLOR_Lab2LBGR);
}
Example #21
0
void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, OutputArrayOfArrays _descCollection ) const
{
    std::vector<Mat> imageCollection, descCollection;
    _imageCollection.getMatVector(imageCollection);
    _descCollection.getMatVector(descCollection);
    CV_Assert( imageCollection.size() == pointCollection.size() );
    descCollection.resize( imageCollection.size() );
    for( size_t i = 0; i < imageCollection.size(); i++ )
        compute( imageCollection[i], pointCollection[i], descCollection[i] );
}
Example #22
0
double mycalibrateCamera( InputArrayOfArrays _objectPoints,
                            InputArrayOfArrays _imagePoints,
                            Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
                            OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria )
{
    int rtype = CV_64F;
    Mat cameraMatrix = _cameraMatrix.getMat();
    cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
    Mat distCoeffs = _distCoeffs.getMat();
    distCoeffs = prepareDistCoeffs(distCoeffs, rtype);
    if( !(flags & CALIB_RATIONAL_MODEL) )
        distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);

    int i;
    size_t nimages = _objectPoints.total();
    CV_Assert( nimages > 0 );
    Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
    collectCalibrationData( _objectPoints, _imagePoints, noArray(),
                            objPt, imgPt, 0, npoints );
    CvMat c_objPt = objPt, c_imgPt = imgPt, c_npoints = npoints;
    CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
    CvMat c_rvecM = rvecM, c_tvecM = tvecM;

    double reprojErr = cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
                                          &c_cameraMatrix, &c_distCoeffs, &c_rvecM,
                                          &c_tvecM, flags, criteria );

    bool rvecs_needed = _rvecs.needed(), tvecs_needed = _tvecs.needed();

    if( rvecs_needed )
        _rvecs.create((int)nimages, 1, CV_64FC3);
    if( tvecs_needed )
        _tvecs.create((int)nimages, 1, CV_64FC3);

    for( i = 0; i < (int)nimages; i++ )
    {
        if( rvecs_needed )
        {
            _rvecs.create(3, 1, CV_64F, i, true);
            Mat rv = _rvecs.getMat(i);
            memcpy(rv.data, rvecM.ptr<double>(i), 3*sizeof(double));
        }
        if( tvecs_needed )
        {
            _tvecs.create(3, 1, CV_64F, i, true);
            Mat tv = _tvecs.getMat(i);
            memcpy(tv.data, tvecM.ptr<double>(i), 3*sizeof(double));
        }
    }
    cameraMatrix.copyTo(_cameraMatrix);
    distCoeffs.copyTo(_distCoeffs);

    return reprojErr;
}
Example #23
0
void GaborLbp_Algorithm::train(InputArrayOfArrays _in_src, InputArray _in_labels)
{
	if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) {
		string error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).";
		CV_Error(CV_StsBadArg, error_message);
	}
	if(_in_src.total() == 0) {
		string error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
		CV_Error(CV_StsUnsupportedFormat, error_message);
	} else if(_in_labels.getMat().type() != CV_32SC1) {
		string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type());
		CV_Error(CV_StsUnsupportedFormat, error_message);
	}
	vector<Mat> src;
	_in_src.getMatVector(src);
	Mat labels = _in_labels.getMat();
	if(labels.total() != src.size()) {
		string error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), m_labels.total());
		CV_Error(CV_StsBadArg, error_message);
	}
	int i=0,j=0;
	double m_kmax = CV_PI/2;  
	double m_f = sqrt(double(2));  
	double m_sigma = 2*CV_PI;  
	for (size_t sampleIdx = 0;sampleIdx<src.size();sampleIdx++)
	{
		int row = src[sampleIdx].rows;
		int col = src[sampleIdx].cols;
		if (row <= 0 || col <= 0)
		{
			continue;
		}
		m_labels.push_back(labels.at<int>((int)sampleIdx));

		ZGabor m_gabor;
		m_gabor.InitGabor();		
		m_gabor.GetFeature(src[sampleIdx],1,8,8,8);
		cout<<sampleIdx<<endl;
		m_projection.push_back(m_gabor.m_eigenvector);
	}
}
Example #24
0
Mosaic::Status Mosaic::estimateTransform(InputArrayOfArrays images) {
    images.getUMatVector(imgs_);

    Status status;

    if ((status = matchImages()) != OK)
        return status;

    if ((status = estimateCameraParams()) != OK)
        return status;

    return OK;
}
Example #25
0
void ICFFeatureEvaluatorImpl::setChannels(InputArrayOfArrays channels)
{
    channels_.clear();
    vector<Mat> ch;
    channels.getMatVector(ch);

    for( size_t i = 0; i < ch.size(); ++i )
    {
        const Mat &channel = ch[i];
        Mat integral_channel;
        integral(channel, integral_channel, CV_32F);
        integral_channel.convertTo(integral_channel, CV_32S);
        channels_.push_back(integral_channel.clone());
    }
}
Example #26
0
void Feature2D::detect( InputArrayOfArrays _images,
                        std::vector<std::vector<KeyPoint> >& keypoints,
                        InputArrayOfArrays _masks )
{
    CV_INSTRUMENT_REGION()

    vector<Mat> images, masks;

    _images.getMatVector(images);
    size_t i, nimages = images.size();

    if( !_masks.empty() )
    {
        _masks.getMatVector(masks);
        CV_Assert(masks.size() == nimages);
    }

    keypoints.resize(nimages);

    for( i = 0; i < nimages; i++ )
    {
        detect(images[i], keypoints[i], masks.empty() ? Mat() : masks[i] );
    }
}
Example #27
0
    void process(InputArrayOfArrays _src, std::vector<Mat>& dst)
    {
        std::vector<Mat> src;
        _src.getMatVector(src);

        checkImageDimensions(src);
        dst.resize(src.size());

        size_t pivot = src.size() / 2;
        dst[pivot] = src[pivot];
        Mat gray_base;
        cvtColor(src[pivot], gray_base, COLOR_RGB2GRAY);
        std::vector<Point> shifts;

        for(size_t i = 0; i < src.size(); i++) {
            if(i == pivot) {
                shifts.push_back(Point(0, 0));
                continue;
            }
            Mat gray;
            cvtColor(src[i], gray, COLOR_RGB2GRAY);
            Point shift = calculateShift(gray_base, gray);
            shifts.push_back(shift);
            shiftMat(src[i], dst[i], shift);
        }
        if(cut) {
            Point max(0, 0), min(0, 0);
            for(size_t i = 0; i < shifts.size(); i++) {
                if(shifts[i].x > max.x) {
                    max.x = shifts[i].x;
                }
                if(shifts[i].y > max.y) {
                    max.y = shifts[i].y;
                }
                if(shifts[i].x < min.x) {
                    min.x = shifts[i].x;
                }
                if(shifts[i].y < min.y) {
                    min.y = shifts[i].y;
                }
            }
            Point size = dst[0].size();
            for(size_t i = 0; i < dst.size(); i++) {
                dst[i] = dst[i](Rect(max, min + size));
            }
        }
    }
Example #28
0
void cv::fastNlMeansDenoisingMulti( InputArrayOfArrays _srcImgs, OutputArray _dst,
                                    int imgToDenoiseIndex, int temporalWindowSize,
                                    float h, int templateWindowSize, int searchWindowSize)
{
    std::vector<Mat> srcImgs;
    _srcImgs.getMatVector(srcImgs);

    fastNlMeansDenoisingMultiCheckPreconditions(
        srcImgs, imgToDenoiseIndex,
        temporalWindowSize, templateWindowSize, searchWindowSize);

    _dst.create(srcImgs[0].size(), srcImgs[0].type());
    Mat dst = _dst.getMat();

    switch (srcImgs[0].type())
    {
        case CV_8U:
            parallel_for_(cv::Range(0, srcImgs[0].rows),
                FastNlMeansMultiDenoisingInvoker<uchar>(
                    srcImgs, imgToDenoiseIndex, temporalWindowSize,
                    dst, templateWindowSize, searchWindowSize, h));
            break;
        case CV_8UC2:
            parallel_for_(cv::Range(0, srcImgs[0].rows),
                FastNlMeansMultiDenoisingInvoker<cv::Vec2b>(
                    srcImgs, imgToDenoiseIndex, temporalWindowSize,
                    dst, templateWindowSize, searchWindowSize, h));
            break;
        case CV_8UC3:
            parallel_for_(cv::Range(0, srcImgs[0].rows),
                FastNlMeansMultiDenoisingInvoker<cv::Vec3b>(
                    srcImgs, imgToDenoiseIndex, temporalWindowSize,
                    dst, templateWindowSize, searchWindowSize, h));
            break;
        default:
            CV_Error(Error::StsBadArg,
                "Unsupported matrix format! Only uchar, Vec2b, Vec3b are supported");
    }
}
Example #29
0
    bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
    {
        std::vector<UMat> inputs;
        std::vector<UMat> outputs;

        inps.getUMatVector(inputs);
        outs.getUMatVector(outputs);

        int _layerWidth = inputs[0].size[3];
        int _layerHeight = inputs[0].size[2];

        int _imageWidth = inputs[1].size[3];
        int _imageHeight = inputs[1].size[2];

        float stepX, stepY;
        if (_stepX == 0 || _stepY == 0)
        {
            stepX = static_cast<float>(_imageWidth) / _layerWidth;
            stepY = static_cast<float>(_imageHeight) / _layerHeight;
        } else {
            stepX = _stepX;
            stepY = _stepY;
        }

        if (umat_offsetsX.empty())
        {
            Mat offsetsX(1, _offsetsX.size(), CV_32FC1, &_offsetsX[0]);
            Mat offsetsY(1, _offsetsX.size(), CV_32FC1, &_offsetsY[0]);
            Mat aspectRatios(1, _aspectRatios.size(), CV_32FC1, &_aspectRatios[0]);
            Mat variance(1, _variance.size(), CV_32FC1, &_variance[0]);

            offsetsX.copyTo(umat_offsetsX);
            offsetsY.copyTo(umat_offsetsY);
            aspectRatios.copyTo(umat_aspectRatios);
            variance.copyTo(umat_variance);

            int real_numPriors = _numPriors >> (_offsetsX.size() - 1);
            umat_scales = UMat(1, &real_numPriors, CV_32F, 1.0f);
        }
Example #30
0
    void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response)
    {
        std::vector<Mat> images;
        src.getMatVector(images);
        Mat times = _times.getMat();

        CV_Assert(images.size() == times.total());
        checkImageDimensions(images);
        CV_Assert(images[0].depth() == CV_8U);

        int channels = images[0].channels();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);

        dst.create(images[0].size(), CV_32FCC);
        Mat result = dst.getMat();

        Mat response = input_response.getMat();
        if(response.empty()) {
            float middle = LDR_SIZE / 2.0f;
            response = linearResponse(channels) / middle;
        }
        CV_Assert(response.rows == LDR_SIZE && response.cols == 1 &&
                  response.channels() == channels);

        result = Mat::zeros(images[0].size(), CV_32FCC);
        Mat wsum = Mat::zeros(images[0].size(), CV_32FCC);
        for(size_t i = 0; i < images.size(); i++) {
            Mat im, w;
            LUT(images[i], weight, w);
            LUT(images[i], response, im);

            result += times.at<float>((int)i) * w.mul(im);
            wsum += times.at<float>((int)i) * times.at<float>((int)i) * w;
        }
        result = result.mul(1 / wsum);
    }