Example #1
0
void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
{
    CV_Assert(left.size() == right.size() && left.type() == right.type());
    CvSize imgSize = left.size();
    int MaxD = MAX(labs(minDisp), labs(maxDisp));
    int SignD = 1; if (MIN(minDisp, maxDisp) < 0) SignD = -1;
    if (minDisp >= maxDisp) {MaxD = 256; SignD = 1;}

    Mat u;
    if ((flags & USE_INITIAL_DISPARITY) && (!disp.empty())) {
        CV_Assert(disp.size() == left.size() && disp.type() == CV_8UC1);
        disp.convertTo(u, CV_32FC1, static_cast<double>(SignD * MaxD) / 256);
    } else {
        u.create(imgSize, CV_32FC1);
        u.setTo(0);
    }

    // Preprocessing
    Mat leftgray, rightgray;
    if (left.type() != CV_8UC1) {
        cvtColor(left, leftgray, CV_BGR2GRAY);
        cvtColor(right, rightgray, CV_BGR2GRAY);
    } else {
        left.copyTo(leftgray);
        right.copyTo(rightgray);
    }
    if (flags & USE_EQUALIZE_HIST) {
        equalizeHist(leftgray, leftgray);
        equalizeHist(rightgray, rightgray);
    }
    if (poly_sigma > 0.0001) {
        GaussianBlur(leftgray, leftgray, cvSize(poly_n, poly_n), poly_sigma);
        GaussianBlur(rightgray, rightgray, cvSize(poly_n, poly_n), poly_sigma);
    }

    if (flags & USE_AUTO_PARAMS) {
        penalization = PENALIZATION_TICHONOV;
        autoParams();
    }

    Mat I1, I2;
    leftgray.convertTo(I1, CV_32FC1);
    rightgray.convertTo(I2, CV_32FC1);
    leftgray.release();
    rightgray.release();

    Mat I2x = diffX(I2);

    FMG(I1, I2, I2x, u, levels - 1);

    I1.release();
    I2.release();
    I2x.release();


    disp.create( left.size(), CV_8UC1 );
    u = abs(u);
    u.convertTo(disp, disp.type(), 256 / MaxD, 0);

    u.release();
}
Example #2
0
    void BTVL1_Base::process(InputArrayOfArrays _src, OutputArray _dst, InputArrayOfArrays _forwardMotions,
                             InputArrayOfArrays _backwardMotions, int baseIdx)
    {
        CV_Assert( scale_ > 1 );
        CV_Assert( iterations_ > 0 );
        CV_Assert( tau_ > 0.0 );
        CV_Assert( alpha_ > 0.0 );
        CV_Assert( btvKernelSize_ > 0 );
        CV_Assert( blurKernelSize_ > 0 );
        CV_Assert( blurSigma_ >= 0.0 );

        CV_OCL_RUN(_src.isUMatVector() && _dst.isUMat() && _forwardMotions.isUMatVector() &&
                   _backwardMotions.isUMatVector(),
                   ocl_process(_src, _dst, _forwardMotions, _backwardMotions, baseIdx))

        std::vector<Mat> & src = *(std::vector<Mat> *)_src.getObj(),
                & forwardMotions = *(std::vector<Mat> *)_forwardMotions.getObj(),
                & backwardMotions = *(std::vector<Mat> *)_backwardMotions.getObj();

        // update blur filter and btv weights
        if (blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
        {
            //filter_ = createGaussianFilter(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
            curBlurKernelSize_ = blurKernelSize_;
            curBlurSigma_ = blurSigma_;
            curSrcType_ = src[0].type();
        }

        if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
        {
            calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
            curBtvKernelSize_ = btvKernelSize_;
            curAlpha_ = alpha_;
        }

        // calc high res motions
        calcRelativeMotions(forwardMotions, backwardMotions, lowResForwardMotions_, lowResBackwardMotions_, baseIdx, src[0].size());

        upscaleMotions(lowResForwardMotions_, highResForwardMotions_, scale_);
        upscaleMotions(lowResBackwardMotions_, highResBackwardMotions_, scale_);

        forwardMaps_.resize(highResForwardMotions_.size());
        backwardMaps_.resize(highResForwardMotions_.size());
        for (size_t i = 0; i < highResForwardMotions_.size(); ++i)
            buildMotionMaps(highResForwardMotions_[i], highResBackwardMotions_[i], forwardMaps_[i], backwardMaps_[i]);

        // initial estimation
        const Size lowResSize = src[0].size();
        const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);

        resize(src[baseIdx], highRes_, highResSize, 0, 0, INTER_CUBIC);

        // iterations
        diffTerm_.create(highResSize, highRes_.type());
        a_.create(highResSize, highRes_.type());
        b_.create(highResSize, highRes_.type());
        c_.create(lowResSize, highRes_.type());

        for (int i = 0; i < iterations_; ++i)
        {
            diffTerm_.setTo(Scalar::all(0));

            for (size_t k = 0; k < src.size(); ++k)
            {
                // a = M * Ih
                remap(highRes_, a_, backwardMaps_[k], noArray(), INTER_NEAREST);
                // b = HM * Ih
                GaussianBlur(a_, b_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
                // c = DHM * Ih
                resize(b_, c_, lowResSize, 0, 0, INTER_NEAREST);

                diffSign(src[k], c_, c_);

                // a = Dt * diff
                upscale(c_, a_, scale_);
                // b = HtDt * diff
                GaussianBlur(a_, b_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
                // a = MtHtDt * diff
                remap(b_, a_, forwardMaps_[k], noArray(), INTER_NEAREST);

                add(diffTerm_, a_, diffTerm_);
            }

            if (lambda_ > 0)
            {
                calcBtvRegularization(highRes_, regTerm_, btvKernelSize_, btvWeights_, ubtvWeights_);
                addWeighted(diffTerm_, 1.0, regTerm_, -lambda_, 0.0, diffTerm_);
            }

            addWeighted(highRes_, 1.0, diffTerm_, tau_, 0.0, highRes_);
        }

        Rect inner(btvKernelSize_, btvKernelSize_, highRes_.cols - 2 * btvKernelSize_, highRes_.rows - 2 * btvKernelSize_);
        highRes_(inner).copyTo(_dst);
    }
//---------------------------------------------------------------------------------------------------------
// this function is from *Odometry implementation
static
int computeCorresps(const Mat& K, const Mat& K_inv, const Mat& Rt,
                    const Mat& depth0, const Mat& validMask0,
                    const Mat& depth1, const Mat& selectMask1, float maxDepthDiff,
                    Mat& corresps)
{
    CV_Assert(K.type() == CV_64FC1);
    CV_Assert(K_inv.type() == CV_64FC1);
    CV_Assert(Rt.type() == CV_64FC1);

    corresps.create(depth1.size(), CV_32SC1);
    corresps.setTo(-1);

    Rect r(0, 0, depth1.cols, depth1.rows);
    Mat Kt = Rt(Rect(3,0,1,3)).clone();
    Kt = K * Kt;
    const double * Kt_ptr = reinterpret_cast<const double *>(Kt.ptr());

    AutoBuffer<float> buf(3 * (depth1.cols + depth1.rows));
    float *KRK_inv0_u1 = buf;
    float *KRK_inv1_v1_plus_KRK_inv2 = KRK_inv0_u1 + depth1.cols;
    float *KRK_inv3_u1 = KRK_inv1_v1_plus_KRK_inv2 + depth1.rows;
    float *KRK_inv4_v1_plus_KRK_inv5 = KRK_inv3_u1 + depth1.cols;
    float *KRK_inv6_u1 = KRK_inv4_v1_plus_KRK_inv5 + depth1.rows;
    float *KRK_inv7_v1_plus_KRK_inv8 = KRK_inv6_u1 + depth1.cols;
    {
        Mat R = Rt(Rect(0,0,3,3)).clone();

        Mat KRK_inv = K * R * K_inv;
        const double * KRK_inv_ptr = reinterpret_cast<const double *>(KRK_inv.ptr());
        for(int u1 = 0; u1 < depth1.cols; u1++)
        {
            KRK_inv0_u1[u1] = KRK_inv_ptr[0] * u1;
            KRK_inv3_u1[u1] = KRK_inv_ptr[3] * u1;
            KRK_inv6_u1[u1] = KRK_inv_ptr[6] * u1;
        }

        for(int v1 = 0; v1 < depth1.rows; v1++)
        {
            KRK_inv1_v1_plus_KRK_inv2[v1] = KRK_inv_ptr[1] * v1 + KRK_inv_ptr[2];
            KRK_inv4_v1_plus_KRK_inv5[v1] = KRK_inv_ptr[4] * v1 + KRK_inv_ptr[5];
            KRK_inv7_v1_plus_KRK_inv8[v1] = KRK_inv_ptr[7] * v1 + KRK_inv_ptr[8];
        }
    }

    int correspCount = 0;
    for(int v1 = 0; v1 < depth1.rows; v1++)
    {
        const float *depth1_row = depth1.ptr<float>(v1);
        const uchar *mask1_row = selectMask1.ptr<uchar>(v1);
        for(int u1 = 0; u1 < depth1.cols; u1++)
        {
            float d1 = depth1_row[u1];
            if(mask1_row[u1])
            {
                CV_DbgAssert(!cvIsNaN(d1));
                float transformed_d1 = static_cast<float>(d1 * (KRK_inv6_u1[u1] + KRK_inv7_v1_plus_KRK_inv8[v1]) + Kt_ptr[2]);
                if(transformed_d1 > 0)
                {
                    float transformed_d1_inv = 1.f / transformed_d1;
                    int u0 = cvRound(transformed_d1_inv * (d1 * (KRK_inv0_u1[u1] + KRK_inv1_v1_plus_KRK_inv2[v1]) + Kt_ptr[0]));
                    int v0 = cvRound(transformed_d1_inv * (d1 * (KRK_inv3_u1[u1] + KRK_inv4_v1_plus_KRK_inv5[v1]) + Kt_ptr[1]));

                    if(r.contains(Point(u0,v0)))
                    {
                        float d0 = depth0.at<float>(v0,u0);
                        if(validMask0.at<uchar>(v0, u0) && std::abs(transformed_d1 - d0) <= maxDepthDiff)
                        {
                            CV_DbgAssert(!cvIsNaN(d0));
                            int c = corresps.at<int>(v0,u0);
                            if(c != -1)
                            {
                                int exist_u1, exist_v1;
                                get2shorts(c, exist_u1, exist_v1);

                                float exist_d1 = (float)(depth1.at<float>(exist_v1,exist_u1) *
                                    (KRK_inv6_u1[exist_u1] + KRK_inv7_v1_plus_KRK_inv8[exist_v1]) + Kt_ptr[2]);

                                if(transformed_d1 > exist_d1)
                                    continue;
                            }
                            else
                                correspCount++;

                            set2shorts(corresps.at<int>(v0,u0), u1, v1);
                        }
                    }
                }
            }
        }
    }
    return correspCount;
}
Example #4
0
File: Dip2.cpp Project: kziel1/dip
// checks basic properties of the convolution result
void Dip2::test_spatialConvolution(void){

   Mat input = Mat::ones(9,9, CV_32FC1);
   input.at<float>(4,4) = 255;
   Mat kernel = Mat(3,3, CV_32FC1, 1./9.);

   Mat output = spatialConvolution(input, kernel);
   
   if ( (input.cols != output.cols) || (input.rows != output.rows) ){
      cout << "ERROR: Dip2::spatialConvolution(): input.size != output.size --> Wrong border handling?" << endl;
      return;
   }
  if ( (sum(output.row(0) < 0).val[0] > 0) or
           (sum(output.row(0) > 255).val[0] > 0) or
           (sum(output.row(8) < 0).val[0] > 0) or
           (sum(output.row(8) > 255).val[0] > 0) or
           (sum(output.col(0) < 0).val[0] > 0) or
           (sum(output.col(0) > 255).val[0] > 0) or
           (sum(output.col(8) < 0).val[0] > 0) or
           (sum(output.col(8) > 255).val[0] > 0) ){
         cout << "ERROR: Dip2::spatialConvolution(): Border of convolution result contains too large/small values --> Wrong border handling?" << endl;
         return;
   }else{
      if ( (sum(output < 0).val[0] > 0) or
         (sum(output > 255).val[0] > 0) ){
            cout << "ERROR: Dip2::spatialConvolution(): Convolution result contains too large/small values!" << endl;
            return;
      }
   }
   float ref[9][9] = {{0, 0, 0, 0, 0, 0, 0, 0, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 1, 1, (8+255)/9., (8+255)/9., (8+255)/9., 1, 1, 0},
                      {0, 1, 1, (8+255)/9., (8+255)/9., (8+255)/9., 1, 1, 0},
                      {0, 1, 1, (8+255)/9., (8+255)/9., (8+255)/9., 1, 1, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 0, 0, 0, 0, 0, 0, 0, 0}};
   for(int y=1; y<8; y++){
      for(int x=1; x<8; x++){
         if (abs(output.at<float>(y,x) - ref[y][x]) > 0.0001){
            cout << "ERROR: Dip2::spatialConvolution(): Convolution result contains wrong values!" << endl;
            return;
         }
      }
   }
   input.setTo(0);
   input.at<float>(4,4) = 255;
   kernel.setTo(0);
   kernel.at<float>(0,0) = -1;
   output = spatialConvolution(input, kernel);
   if ( abs(output.at<float>(5,5) + 255.) < 0.0001 ){
      cout << "ERROR: Dip2::spatialConvolution(): Is filter kernel \"flipped\" during convolution? (Check lecture/exercise slides)" << endl;
      return;
   }
   if ( ( abs(output.at<float>(2,2) + 255.) < 0.0001 ) || ( abs(output.at<float>(4,4) + 255.) < 0.0001 ) ){
      cout << "ERROR: Dip2::spatialConvolution(): Is anchor point of convolution the centre of the filter kernel? (Check lecture/exercise slides)" << endl;
      return;
   }
   cout << "Message: Dip2::spatialConvolution() seems to be correct" << endl;
}
Example #5
0
vector<bool> CharacterAnalysis::filterBetweenLines(Mat img, vector<vector<Point> > contours, vector<Vec4i> hierarchy, vector<Point> outerPolygon, vector<bool> goodIndices)
{
    static float MIN_AREA_PERCENT_WITHIN_LINES = 0.88;

    vector<bool> includedIndices(contours.size());
    for (int j = 0; j < contours.size(); j++)
        includedIndices[j] = false;


    if (outerPolygon.size() == 0)
        return includedIndices;

    vector<Point> validPoints;

    // Figure out the line height
    LineSegment topLine(outerPolygon[0].x, outerPolygon[0].y, outerPolygon[1].x, outerPolygon[1].y);
    LineSegment bottomLine(outerPolygon[3].x, outerPolygon[3].y, outerPolygon[2].x, outerPolygon[2].y);

    float x = ((float) img.cols) / 2;
    Point midpoint = Point(x, bottomLine.getPointAt(x));
    Point acrossFromMidpoint = topLine.closestPointOnSegmentTo(midpoint);
    float lineHeight = distanceBetweenPoints(midpoint, acrossFromMidpoint);

    // Create a white mask for the area inside the polygon
    Mat outerMask = Mat::zeros(img.size(), CV_8U);
    Mat innerArea = Mat::zeros(img.size(), CV_8U);
    fillConvexPoly(outerMask, outerPolygon.data(), outerPolygon.size(), Scalar(255,255,255));


    for (int i = 0; i < contours.size(); i++)
    {
        if (goodIndices[i] == false)
            continue;

        // get rid of the outline by drawing a 1 pixel width black line
        drawContours(innerArea, contours,
                     i, // draw this contour
                     cv::Scalar(255,255,255), // in
                     CV_FILLED,
                     8,
                     hierarchy,
                     0
                    );


        bitwise_and(innerArea, outerMask, innerArea);


        vector<vector<Point> > tempContours;
        findContours(innerArea, tempContours,
                     CV_RETR_EXTERNAL, // retrieve the external contours
                     CV_CHAIN_APPROX_SIMPLE ); // all pixels of each contours );

        double totalArea = contourArea(contours[i]);
        double areaBetweenLines = 0;

        for (int tempContourIdx = 0; tempContourIdx < tempContours.size(); tempContourIdx++)
        {
            areaBetweenLines += contourArea(tempContours[tempContourIdx]);

        }


        if (areaBetweenLines / totalArea >= MIN_AREA_PERCENT_WITHIN_LINES)
        {
            includedIndices[i] = true;
        }

        innerArea.setTo(Scalar(0,0,0));
    }

    return includedIndices;
}