// Computes the shadows occlusion where we cannot reconstruct the model
void GrayCodePattern_Impl::computeShadowMasks( InputArrayOfArrays blackImages, InputArrayOfArrays whiteImages,
                                               OutputArrayOfArrays shadowMasks ) const
{
  std::vector<Mat>& whiteImages_ = *( std::vector<Mat>* ) whiteImages.getObj();
  std::vector<Mat>& blackImages_ = *( std::vector<Mat>* ) blackImages.getObj();
  std::vector<Mat>& shadowMasks_ = *( std::vector<Mat>* ) shadowMasks.getObj();

  shadowMasks_.resize( whiteImages_.size() );

  int cam_width = whiteImages_[0].cols;
  int cam_height = whiteImages_[0].rows;

  // TODO: parallelize for
  for( int k = 0; k < (int) shadowMasks_.size(); k++ )
  {
    shadowMasks_[k] = Mat( cam_height, cam_width, CV_8U );
    for( int i = 0; i < cam_width; i++ )
    {
      for( int j = 0; j < cam_height; j++ )
      {
        double white = whiteImages_[k].at<uchar>( Point( i, j ) );
        double black = blackImages_[k].at<uchar>( Point( i, j ) );

        if( abs(white - black) > blackThreshold )
        {
          shadowMasks_[k].at<uchar>( Point( i, j ) ) = ( uchar ) 1;
        }
        else
        {
          shadowMasks_[k].at<uchar>( Point( i, j ) ) = ( uchar ) 0;
        }
      }
    }
  }
}
void checkSameSizeAndDepth(InputArrayOfArrays src, Size &sz, int &depth)
{
    CV_Assert(src.isMat() || src.isUMat() || src.isMatVector() || src.isUMatVector());

    if (src.isMat() || src.isUMat())
    {
        CV_Assert(!src.empty());
        sz = src.size();
        depth = src.depth();
    }
    else if (src.isMatVector())
    {
        const vector<Mat>& srcv = *static_cast<const vector<Mat>*>(src.getObj());
        CV_Assert(srcv.size() > 0);
        for (unsigned i = 0; i < srcv.size(); i++)
        {
            CV_Assert(srcv[i].depth() == srcv[0].depth());
            CV_Assert(srcv[i].size() == srcv[0].size());
        }
        sz = srcv[0].size();
        depth = srcv[0].depth();
    }
    else if (src.isUMatVector())
    {
        const vector<UMat>& srcv = *static_cast<const vector<UMat>*>(src.getObj());
        CV_Assert(srcv.size() > 0);
        for (unsigned i = 0; i < srcv.size(); i++)
        {
            CV_Assert(srcv[i].depth() == srcv[0].depth());
            CV_Assert(srcv[i].size() == srcv[0].size());
        }
        sz = srcv[0].size();
        depth = srcv[0].depth();
    }
}
int getTotalNumberOfChannels(InputArrayOfArrays src)
{
    CV_Assert(src.isMat() || src.isUMat() || src.isMatVector() || src.isUMatVector());

    if (src.isMat() || src.isUMat())
    {
        return src.channels();
    }
    else if (src.isMatVector())
    {
        int cnNum = 0;
        const vector<Mat>& srcv = *static_cast<const vector<Mat>*>(src.getObj());
        for (unsigned i = 0; i < srcv.size(); i++)
            cnNum += srcv[i].channels();
        return cnNum;
    }
    else if (src.isUMatVector())
    {
        int cnNum = 0;
        const vector<UMat>& srcv = *static_cast<const vector<UMat>*>(src.getObj());
        for (unsigned i = 0; i < srcv.size(); i++)
            cnNum += srcv[i].channels();
        return cnNum;
    }
    else
    {
        return 0;
    }
}
// For a (x,y) pixel of the camera returns the corresponding projector's pixel
bool GrayCodePattern_Impl::getProjPixel( InputArrayOfArrays patternImages, int x, int y, Point &projPix ) const
{
  std::vector<Mat>& _patternImages = *( std::vector<Mat>* ) patternImages.getObj();
  std::vector<uchar> grayCol;
  std::vector<uchar> grayRow;

  bool error = false;
  int xDec, yDec;

  // process column images
  for( size_t count = 0; count < numOfColImgs; count++ )
  {
    // get pixel intensity for regular pattern projection and its inverse
    double val1 = _patternImages[count * 2].at<uchar>( Point( x, y ) );
    double val2 = _patternImages[count * 2 + 1].at<uchar>( Point( x, y ) );

    // check if the intensity difference between the values of the normal and its inverse projection image is in a valid range
    if( abs(val1 - val2) < whiteThreshold )
      error = true;

    // determine if projection pixel is on or off
    if( val1 > val2 )
      grayCol.push_back( 1 );
    else
      grayCol.push_back( 0 );
  }

  xDec = grayToDec( grayCol );

  // process row images
  for( size_t count = 0; count < numOfRowImgs; count++ )
  {
    // get pixel intensity for regular pattern projection and its inverse
    double val1 = _patternImages[count * 2 + numOfColImgs * 2].at<uchar>( Point( x, y ) );
    double val2 = _patternImages[count * 2 + numOfColImgs * 2 + 1].at<uchar>( Point( x, y ) );

    // check if the intensity difference between the values of the normal and its inverse projection image is in a valid range
    if( abs(val1 - val2) < whiteThreshold )
      error = true;

    // determine if projection pixel is on or off
    if( val1 > val2 )
      grayRow.push_back( 1 );
    else
      grayRow.push_back( 0 );
  }

  yDec = grayToDec( grayRow );

  if( (yDec >= params.height || xDec >= params.width) )
  {
    error = true;
  }

  projPix.x = xDec;
  projPix.y = yDec;

  return error;
}
Example #5
0
    void BTVL1_Base::process(InputArrayOfArrays _src, OutputArray _dst, InputArrayOfArrays _forwardMotions,
                             InputArrayOfArrays _backwardMotions, int baseIdx)
    {
        CV_Assert( scale_ > 1 );
        CV_Assert( iterations_ > 0 );
        CV_Assert( tau_ > 0.0 );
        CV_Assert( alpha_ > 0.0 );
        CV_Assert( btvKernelSize_ > 0 );
        CV_Assert( blurKernelSize_ > 0 );
        CV_Assert( blurSigma_ >= 0.0 );

        CV_OCL_RUN(_src.isUMatVector() && _dst.isUMat() && _forwardMotions.isUMatVector() &&
                   _backwardMotions.isUMatVector(),
                   ocl_process(_src, _dst, _forwardMotions, _backwardMotions, baseIdx))

        std::vector<Mat> & src = *(std::vector<Mat> *)_src.getObj(),
                & forwardMotions = *(std::vector<Mat> *)_forwardMotions.getObj(),
                & backwardMotions = *(std::vector<Mat> *)_backwardMotions.getObj();

        // update blur filter and btv weights
        if (!filter_ || blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
        {
            filter_ = createGaussianFilter(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
            curBlurKernelSize_ = blurKernelSize_;
            curBlurSigma_ = blurSigma_;
            curSrcType_ = src[0].type();
        }

        if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
        {
            calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
            curBtvKernelSize_ = btvKernelSize_;
            curAlpha_ = alpha_;
        }

        // calc high res motions
        calcRelativeMotions(forwardMotions, backwardMotions, lowResForwardMotions_, lowResBackwardMotions_, baseIdx, src[0].size());

        upscaleMotions(lowResForwardMotions_, highResForwardMotions_, scale_);
        upscaleMotions(lowResBackwardMotions_, highResBackwardMotions_, scale_);

        forwardMaps_.resize(highResForwardMotions_.size());
        backwardMaps_.resize(highResForwardMotions_.size());
        for (size_t i = 0; i < highResForwardMotions_.size(); ++i)
            buildMotionMaps(highResForwardMotions_[i], highResBackwardMotions_[i], forwardMaps_[i], backwardMaps_[i]);

        // initial estimation
        const Size lowResSize = src[0].size();
        const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);

        resize(src[baseIdx], highRes_, highResSize, 0, 0, INTER_CUBIC);

        // iterations
        diffTerm_.create(highResSize, highRes_.type());
        a_.create(highResSize, highRes_.type());
        b_.create(highResSize, highRes_.type());
        c_.create(lowResSize, highRes_.type());

        for (int i = 0; i < iterations_; ++i)
        {
            diffTerm_.setTo(Scalar::all(0));

            for (size_t k = 0; k < src.size(); ++k)
            {
                // a = M * Ih
                remap(highRes_, a_, backwardMaps_[k], noArray(), INTER_NEAREST);
                // b = HM * Ih
                filter_->apply(a_, b_);
                // c = DHM * Ih
                resize(b_, c_, lowResSize, 0, 0, INTER_NEAREST);

                diffSign(src[k], c_, c_);

                // a = Dt * diff
                upscale(c_, a_, scale_);
                // b = HtDt * diff
                filter_->apply(a_, b_);
                // a = MtHtDt * diff
                remap(b_, a_, forwardMaps_[k], noArray(), INTER_NEAREST);

                add(diffTerm_, a_, diffTerm_);
            }

            if (lambda_ > 0)
            {
                calcBtvRegularization(highRes_, regTerm_, btvKernelSize_, btvWeights_, ubtvWeights_);
                addWeighted(diffTerm_, 1.0, regTerm_, -lambda_, 0.0, diffTerm_);
            }

            addWeighted(highRes_, 1.0, diffTerm_, tau_, 0.0, highRes_);
        }

        Rect inner(btvKernelSize_, btvKernelSize_, highRes_.cols - 2 * btvKernelSize_, highRes_.rows - 2 * btvKernelSize_);
        highRes_(inner).copyTo(_dst);
    }
Example #6
0
    bool BTVL1_Base::ocl_process(InputArrayOfArrays _src, OutputArray _dst, InputArrayOfArrays _forwardMotions,
                                 InputArrayOfArrays _backwardMotions, int baseIdx)
    {
        std::vector<UMat> & src = *(std::vector<UMat> *)_src.getObj(),
                & forwardMotions = *(std::vector<UMat> *)_forwardMotions.getObj(),
                & backwardMotions = *(std::vector<UMat> *)_backwardMotions.getObj();

        // update blur filter and btv weights
        if (!filter_ || blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
        {
            filter_ = createGaussianFilter(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
            curBlurKernelSize_ = blurKernelSize_;
            curBlurSigma_ = blurSigma_;
            curSrcType_ = src[0].type();
        }

        if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
        {
            calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
            Mat(btvWeights_, true).copyTo(ubtvWeights_);

            curBtvKernelSize_ = btvKernelSize_;
            curAlpha_ = alpha_;
        }

        // calc high res motions
        calcRelativeMotions(forwardMotions, backwardMotions, ulowResForwardMotions_, ulowResBackwardMotions_, baseIdx, src[0].size());

        upscaleMotions(ulowResForwardMotions_, uhighResForwardMotions_, scale_);
        upscaleMotions(ulowResBackwardMotions_, uhighResBackwardMotions_, scale_);

        uforwardMaps_.resize(uhighResForwardMotions_.size());
        ubackwardMaps_.resize(uhighResForwardMotions_.size());
        for (size_t i = 0; i < uhighResForwardMotions_.size(); ++i)
            buildMotionMaps(uhighResForwardMotions_[i], uhighResBackwardMotions_[i], uforwardMaps_[i], ubackwardMaps_[i]);

        // initial estimation
        const Size lowResSize = src[0].size();
        const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);

        resize(src[baseIdx], uhighRes_, highResSize, 0, 0, INTER_LINEAR); // TODO

        // iterations
        udiffTerm_.create(highResSize, uhighRes_.type());
        ua_.create(highResSize, uhighRes_.type());
        ub_.create(highResSize, uhighRes_.type());
        uc_.create(lowResSize, uhighRes_.type());

        for (int i = 0; i < iterations_; ++i)
        {
            udiffTerm_.setTo(Scalar::all(0));

            for (size_t k = 0; k < src.size(); ++k)
            {
                // a = M * Ih
                remap(uhighRes_, ua_, ubackwardMaps_[k], noArray(), INTER_NEAREST);
                // b = HM * Ih
                GaussianBlur(ua_, ub_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
                // c = DHM * Ih
                resize(ub_, uc_, lowResSize, 0, 0, INTER_NEAREST);

                diffSign(src[k], uc_, uc_);

                // a = Dt * diff
                upscale(uc_, ua_, scale_);

                // b = HtDt * diff
                GaussianBlur(ua_, ub_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
                // a = MtHtDt * diff
                remap(ub_, ua_, uforwardMaps_[k], noArray(), INTER_NEAREST);

                add(udiffTerm_, ua_, udiffTerm_);
            }

            if (lambda_ > 0)
            {
                calcBtvRegularization(uhighRes_, uregTerm_, btvKernelSize_, btvWeights_, ubtvWeights_);
                addWeighted(udiffTerm_, 1.0, uregTerm_, -lambda_, 0.0, udiffTerm_);
            }

            addWeighted(uhighRes_, 1.0, udiffTerm_, tau_, 0.0, uhighRes_);
        }

        Rect inner(btvKernelSize_, btvKernelSize_, uhighRes_.cols - 2 * btvKernelSize_, uhighRes_.rows - 2 * btvKernelSize_);
        uhighRes_(inner).copyTo(_dst);

        return true;
    }
bool GrayCodePattern_Impl::decode( InputArrayOfArrays patternImages, OutputArray disparityMap,
                                   InputArrayOfArrays blackImages, InputArrayOfArrays whitheImages, int flags ) const
{
    std::vector<std::vector<Mat> >& acquired_pattern = *( std::vector<std::vector<Mat> >* ) patternImages.getObj();

    if( flags == DECODE_3D_UNDERWORLD )
    {
        // Computing shadows mask
        std::vector<Mat> shadowMasks;
        computeShadowMasks( blackImages, whitheImages, shadowMasks );

        int cam_width = acquired_pattern[0][0].cols;
        int cam_height = acquired_pattern[0][0].rows;

        Point projPixel;

        // Storage for the pixels of the two cams that correspond to the same pixel of the projector
        std::vector<std::vector<std::vector<Point> > > camsPixels;
        camsPixels.resize( acquired_pattern.size() );

        // TODO: parallelize for (k and j)
        for( size_t k = 0; k < acquired_pattern.size(); k++ )
        {
            camsPixels[k].resize( params.height * params.width );
            for( int i = 0; i < cam_width; i++ )
            {
                for( int j = 0; j < cam_height; j++ )
                {
                    //if the pixel is not shadowed, reconstruct
                    if( shadowMasks[k].at<uchar>( j, i ) )
                    {
                        //for a (x,y) pixel of the camera returns the corresponding projector pixel by calculating the decimal number
                        bool error = getProjPixel( acquired_pattern[k], i, j, projPixel );

                        if( error )
                        {
                            continue;
                        }

                        camsPixels[k][projPixel.x * params.height + projPixel.y].push_back( Point( i, j ) );
                    }
                }
            }
        }

        std::vector<Point> cam1Pixs, cam2Pixs;

        Mat& disparityMap_ = *( Mat* ) disparityMap.getObj();
        disparityMap_ = Mat( cam_height, cam_width, CV_64F, double( 0 ) );

        double number_of_pixels_cam1 = 0;
        double number_of_pixels_cam2 = 0;

        for( int i = 0; i < params.width; i++ )
        {
            for( int j = 0; j < params.height; j++ )
            {
                cam1Pixs = camsPixels[0][i * params.height + j];
                cam2Pixs = camsPixels[1][i * params.height + j];

                if( cam1Pixs.size() == 0 || cam2Pixs.size() == 0 )
                    continue;

                Point p1;
                Point p2;

                double sump1x = 0;
                double sump2x = 0;

                number_of_pixels_cam1 += cam1Pixs.size();
                number_of_pixels_cam2 += cam2Pixs.size();
                for( int c1 = 0; c1 < (int) cam1Pixs.size(); c1++ )
                {
                    p1 = cam1Pixs[c1];
                    sump1x += p1.x;
                }
                for( int c2 = 0; c2 < (int) cam2Pixs.size(); c2++ )
                {
                    p2 = cam2Pixs[c2];
                    sump2x += p2.x;
                }

                sump2x /= cam2Pixs.size();
                sump1x /= cam1Pixs.size();
                for( int c1 = 0; c1 < (int) cam1Pixs.size(); c1++ )
                {
                    p1 = cam1Pixs[c1];
                    disparityMap_.at<double>( p1.y, p1.x ) = ( double ) (sump2x - sump1x);
                }

                sump2x = 0;
                sump1x = 0;
            }
        }

        return true;
    }  // end if flags

    return false;
}