Exemple #1
0
    void AKAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
    {
        cv::Mat img = image.getMat();
        if (img.type() != CV_8UC1)
            cvtColor(image, img, COLOR_BGR2GRAY);

        Mat img1_32;
        img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);

        cv::Mat& desc = descriptors.getMatRef();

        AKAZEOptions options;
        options.descriptor = static_cast<DESCRIPTOR_TYPE>(descriptor);
        options.descriptor_channels = descriptor_channels;
        options.descriptor_size = descriptor_size;
        options.nsublevels = nsublevels;
        options.dthreshold = dtreshhold;
        options.img_width = img.cols;
        options.img_height = img.rows;

        AKAZEFeatures impl(options);
        impl.Create_Nonlinear_Scale_Space(img1_32);
        impl.Compute_Descriptors(keypoints, desc);

        CV_Assert((!desc.rows || desc.cols == descriptorSize()));
        CV_Assert((!desc.rows || (desc.type() == descriptorType())));
    }
void cv::superres::Simple::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
    Mat frame0 = ::getMat(_frame0, buf[0]);
    Mat frame1 = ::getMat(_frame1, buf[1]);

    CV_DbgAssert( frame1.type() == frame0.type() );
    CV_DbgAssert( frame1.size() == frame0.size() );

    Mat input0 = ::convertToType(frame0, CV_8U, 3, buf[2], buf[3]);
    Mat input1 = ::convertToType(frame1, CV_8U, 3, buf[4], buf[5]);

    if (!_flow2.needed() && _flow1.kind() == _InputArray::MAT)
    {
        call(input0, input1, _flow1.getMatRef());
        return;
    }

    call(input0, input1, flow);

    if (!_flow2.needed())
    {
        ::copy(_flow1, flow);
    }
    else
    {
        split(flow, flows);

        ::copy(_flow1, flows[0]);
        ::copy(_flow2, flows[1]);
    }
}
  virtual void
  getCameras(OutputArray Rs, OutputArray Ts) {
    const size_t n_views =
      libmv_reconstruction_.reconstruction.AllCameras().size();

    Rs.create(n_views, 1, CV_64F);
    Ts.create(n_views, 1, CV_64F);

    Matx33d R;
    Vec3d t;
    for(size_t i = 0; i < n_views; ++i)
    {
      eigen2cv(libmv_reconstruction_.reconstruction.AllCameras()[i].R, R);
      eigen2cv(libmv_reconstruction_.reconstruction.AllCameras()[i].t, t);
      Mat(R).copyTo(Rs.getMatRef(i));
      Mat(t).copyTo(Ts.getMatRef(i));
    }
  }
Exemple #4
0
void SparsePyrLkOptFlowEstimatorGpu::run(
        InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
        OutputArray status, OutputArray errors)
{
    frame0_.upload(frame0.getMat());
    frame1_.upload(frame1.getMat());
    points0_.upload(points0.getMat());

    if (errors.needed())
    {
        run(frame0_, frame1_, points0_, points1_, status_, errors_);
        errors_.download(errors.getMatRef());
    }
    else
        run(frame0_, frame1_, points0_, points1_, status_);

    points1_.download(points1.getMatRef());
    status_.download(status.getMatRef());
}
Exemple #5
0
        void detectAndCompute(InputArray image, InputArray mask,
                              std::vector<KeyPoint>& keypoints,
                              OutputArray descriptors,
                              bool useProvidedKeypoints)
        {
            Mat img = image.getMat();

            if (img_width != img.cols) {
                img_width = img.cols;
                impl.release();
            }

            if (img_height != img.rows) {
                img_height = img.rows;
                impl.release();
            }

            if (impl.empty()) {
                AKAZEOptionsV2 options;
                options.descriptor = descriptor;
                options.descriptor_channels = descriptor_channels;
                options.descriptor_size = descriptor_size;
                options.img_width = img_width;
                options.img_height = img_height;
                options.dthreshold = threshold;
                options.omax = octaves;
                options.nsublevels = sublevels;
                options.diffusivity = diffusivity;

                impl = makePtr<AKAZEFeaturesV2>(options);
            }

            impl->Create_Nonlinear_Scale_Space(img);

            if (!useProvidedKeypoints)
            {
                impl->Feature_Detection(keypoints);
            }

            if (!mask.empty())
            {
                KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
            }

            if( descriptors.needed() )
            {
                Mat& desc = descriptors.getMatRef();
                impl->Compute_Descriptors(keypoints, desc);

                CV_Assert((!desc.rows || desc.cols == descriptorSize()));
                CV_Assert((!desc.rows || (desc.type() == descriptorType())));
            }
        }
Exemple #6
0
        void detectAndCompute(InputArray image, InputArray mask,
                              std::vector<KeyPoint>& keypoints,
                              OutputArray descriptors,
                              bool useProvidedKeypoints)
        {
            Mat img = image.getMat();
            if (img.type() != CV_8UC1 && img.type() != CV_16UC1)
                cvtColor(image, img, COLOR_BGR2GRAY);

            Mat img1_32;
            if ( img.depth() == CV_32F )
                img1_32 = img;
            else if ( img.depth() == CV_8U )
                img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
            else if ( img.depth() == CV_16U )
                img.convertTo(img1_32, CV_32F, 1.0 / 65535.0, 0);

            CV_Assert( ! img1_32.empty() );

            AKAZEOptions options;
            options.descriptor = descriptor;
            options.descriptor_channels = descriptor_channels;
            options.descriptor_size = descriptor_size;
            options.img_width = img.cols;
            options.img_height = img.rows;
            options.dthreshold = threshold;
            options.omax = octaves;
            options.nsublevels = sublevels;
            options.diffusivity = diffusivity;

            AKAZEFeatures impl(options);
            impl.Create_Nonlinear_Scale_Space(img1_32);

            if (!useProvidedKeypoints)
            {
                impl.Feature_Detection(keypoints);
            }

            if (!mask.empty())
            {
                KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
            }

            if( descriptors.needed() )
            {
                Mat& desc = descriptors.getMatRef();
                impl.Compute_Descriptors(keypoints, desc);

                CV_Assert((!desc.rows || desc.cols == descriptorSize()));
                CV_Assert((!desc.rows || (desc.type() == descriptorType())));
            }
        }
Exemple #7
0
/*******************************************************************************
* Function:      subtractBGMedian
* Description:   BG subtraction via opening with diagonal structuring elements
* Arguments:
	inImg           -   input image
	bgsImg          -   BG subtracted image
	threshVal       -   threshold value for converting to binary image
	seLength        -   length of structuring elements
	
* Returns:       void
* Comments:
* Revision: 
*******************************************************************************/
void
FGExtraction::subtractBGMedian(InputArray src, OutputArray dst, int threshVal, int seLength)
{
	Mat inImg = src.getMat();
	Mat medImg;
	
    // median filter
	Mat tempImg = inImg.clone();
	medianBlur(tempImg, medImg, 31);
	//showImage("median", medImg);
	
    Mat bin;
	double thresh = threshold(medImg, bin, threshVal, 255, THRESH_BINARY);
	
	dst.getMatRef() = bin;
}
  virtual void
  getPoints(OutputArray points3d) {
    const size_t n_points =
      libmv_reconstruction_.reconstruction.AllPoints().size();

    points3d.create(n_points, 1, CV_64F);

    Vec3d point3d;
    for ( size_t i = 0; i < n_points; ++i )
    {
      for ( int j = 0; j < 3; ++j )
        point3d[j] =
          libmv_reconstruction_.reconstruction.AllPoints()[i].X[j];
      Mat(point3d).copyTo(points3d.getMatRef(i));
    }

  }
  void
  reconstruct(const std::vector<cv::String> images, OutputArray Ps, OutputArray points3d,
              InputOutputArray K, bool is_projective)
  {
    const int nviews = static_cast<int>(images.size());
    CV_Assert( nviews >= 2 );

    Matx33d Ka = K.getMat();
    const int depth = Mat(Ka).depth();

    // Projective reconstruction

    if ( is_projective )
    {
      std::vector<Mat> Rs, Ts;
      reconstruct(images, Rs, Ts, Ka, points3d, is_projective);

      // From Rs and Ts, extract Ps

      const int nviews_est = Rs.size();
      Ps.create(nviews_est, 1, depth);

      Matx34d P;
      for (size_t i = 0; i < nviews_est; ++i)
      {
        projectionFromKRt(Ka, Rs[i], Vec3d(Ts[i]), P);
        Mat(P).copyTo(Ps.getMatRef(i));
      }

      Mat(Ka).copyTo(K.getMat());
      }


    // Affine reconstruction

    else
    {
      // TODO: implement me
      CV_Error(Error::StsNotImplemented, "Affine reconstruction not yet implemented");
    }

  }
Exemple #10
0
void cv::cuda::createContinuous(int rows, int cols, int type, OutputArray arr)
{
    switch (arr.kind())
    {
    case _InputArray::MAT:
        ::createContinuousImpl(rows, cols, type, arr.getMatRef());
        break;

    case _InputArray::GPU_MAT:
        ::createContinuousImpl(rows, cols, type, arr.getGpuMatRef());
        break;

    case _InputArray::CUDA_MEM:
        ::createContinuousImpl(rows, cols, type, arr.getCudaMemRef());
        break;

    default:
        arr.create(rows, cols, type);
    }
}
void DensePyrLkOptFlowEstimatorGpu::run(
        InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
        OutputArray errors)
{
    frame0_.upload(frame0.getMat());
    frame1_.upload(frame1.getMat());

    optFlowEstimator_.winSize = winSize_;
    optFlowEstimator_.maxLevel = maxLevel_;
    if (errors.needed())
    {
        optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_);
        errors_.download(errors.getMatRef());
    }
    else
        optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_);

    flowX_.download(flowX.getMatRef());
    flowY_.download(flowY.getMatRef());
}
Exemple #12
0
void cv::cuda::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
{
    switch (arr.kind())
    {
    case _InputArray::MAT:
        ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getMatRef());
        break;

    case _InputArray::CUDA_GPU_MAT:
        ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getGpuMatRef());
        break;

    case _InputArray::CUDA_HOST_MEM:
        ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getHostMemRef());
        break;

    default:
        arr.create(rows, cols, type);
    }
}
//static
void QualityBRISQUE::computeFeatures(InputArray img, OutputArray features)
{
    CV_Assert(features.needed());
    CV_Assert(img.isMat());
    CV_Assert(!img.getMat().empty());

    auto mat = mat_convert(img.getMat());

    const auto vals = ComputeBrisqueFeature(mat);
    cv::Mat valmat( cv::Size( (int)vals.size(), 1 ), CV_32FC1, (void*)vals.data()); // create row vector, type depends on brisque_calc_element_type

    if (features.isUMat())
        valmat.copyTo(features.getUMatRef());
    else if (features.isMat())
        // how to move data instead?
        // if calling this:
        //      features.getMatRef() = valmat;
        //  then shared data is erased when valmat is released, corrupting the data in the outputarray for the caller
        valmat.copyTo(features.getMatRef());
    else
        CV_Error(cv::Error::StsNotImplemented, "Unsupported output type");
}
Exemple #14
0
  //  Reconstruction function for API
  void
  reconstruct(InputArrayOfArrays points2d, OutputArray Ps, OutputArray points3d, InputOutputArray K,
              bool is_projective)
  {
    const int nviews = points2d.total();
    CV_Assert( nviews >= 2 );

    // OpenCV data types
    std::vector<Mat> pts2d;
    points2d.getMatVector(pts2d);
    const int depth = pts2d[0].depth();

    Matx33d Ka = K.getMat();

    // Projective reconstruction

    if (is_projective)
    {

      if ( nviews == 2 )
      {
        // Get Projection matrices
        Matx33d F;
        Matx34d P, Pp;

        normalizedEightPointSolver(pts2d[0], pts2d[1], F);
        projectionsFromFundamental(F, P, Pp);
        Ps.create(2, 1, depth);
        Mat(P).copyTo(Ps.getMatRef(0));
        Mat(Pp).copyTo(Ps.getMatRef(1));

        // Triangulate and find 3D points using inliers
        triangulatePoints(points2d, Ps, points3d);
      }
      else
      {
        std::vector<Mat> Rs, Ts;
        reconstruct(points2d, Rs, Ts, Ka, points3d, is_projective);

        // From Rs and Ts, extract Ps
        const int nviews = Rs.size();
        Ps.create(nviews, 1, depth);

        Matx34d P;
        for (size_t i = 0; i < nviews; ++i)
        {
          projectionFromKRt(Ka, Rs[i], Vec3d(Ts[i]), P);
          Mat(P).copyTo(Ps.getMatRef(i));
        }

        Mat(Ka).copyTo(K.getMat());
      }

    }


    // Affine reconstruction

    else
    {
      // TODO: implement me
    }

  }
    void KAZE::operator()(InputArray _image, InputArray _mask, vector<KeyPoint>& _keypoints,
        OutputArray _descriptors, bool useProvidedKeypoints) const
    {

        bool do_keypoints = !useProvidedKeypoints;
        bool do_descriptors = _descriptors.needed();

        if( (!do_keypoints && !do_descriptors) || _image.empty() )
            return;
        
        cv::Mat img1_8, img1_32;

        // Convert to gray scale iamge and float image
        if (_image.getMat().channels() == 3)
            cv::cvtColor(_image, img1_8, CV_RGB2GRAY);
        else
            _image.getMat().copyTo(img1_8);

        img1_8.convertTo(img1_32, CV_32F, 1.0/255.0,0);

        // Construct KAZE
        toptions opt = options;
        opt.img_width = img1_32.cols;
        opt.img_height = img1_32.rows;

        ::KAZE kazeEvolution(opt);

        // Create nonlinear scale space
        kazeEvolution.Create_Nonlinear_Scale_Space(img1_32);        

        // Feature detection
        std::vector<Ipoint> kazePoints;

        if (do_keypoints)
        {
            kazeEvolution.Feature_Detection(kazePoints);
            filterDuplicated(kazePoints);

            if (!_mask.empty())
            {
                filterByPixelsMask(kazePoints, _mask.getMat());
            }

            if (opt.nfeatures > 0)
            {
                filterRetainBest(kazePoints, opt.nfeatures);
            }
            
        }
        else
        {
            kazePoints.resize(_keypoints.size());

            #pragma omp parallel for
            for (int i = 0; i < kazePoints.size(); i++)
            {
                convertPoint(_keypoints[i], kazePoints[i]);    
            }
        }
        
        // Descriptor caculation
        if (do_descriptors)
        {
            kazeEvolution.Feature_Description(kazePoints);

            cv::Mat& descriptors = _descriptors.getMatRef();
            descriptors.create(kazePoints.size(), descriptorSize(), descriptorType());

            for (size_t i = 0; i < kazePoints.size(); i++)
            {
                std::copy(kazePoints[i].descriptor.begin(), kazePoints[i].descriptor.end(), (float*)descriptors.row(i).data);
            }
        }

        // Transfer from KAZE::Ipoint to cv::KeyPoint
        if (do_keypoints)
        {
            _keypoints.resize(kazePoints.size());

            #pragma omp parallel for
            for (int i = 0; i < kazePoints.size(); i++)
            {
                convertPoint(kazePoints[i], _keypoints[i]);
            }
        }
        
    }
        bool PositionCalculatorImpl::computeStateImpl( double /*time*/, OutputArray _state, OutputArray _covariance )
        {
            _state.create( 3, 1, CV_64F );
            Mat& state = _state.getMatRef();

            const int num = static_cast< int >(positions.size());
            F.create( 2 * num, 3, CV_64F );
            b.create( 2 * num, 1, CV_64F );

            for ( int i = 0; i < num; ++i )
            {
                F.at< double >( i, 0 ) = sin( angles[i].x );
                F.at< double >( i, 1 ) = -cos( angles[i].x );
                F.at< double >( i, 2 ) = 0;

                F.at< double >( num + i, 0 ) = -sin( angles[i].y ) * cos( angles[i].x );
                F.at< double >( num + i, 1 ) = -sin( angles[i].y ) * sin( angles[i].x );
                F.at< double >( num + i, 2 ) = cos( angles[i].y );

                b.at< double >( i ) = ( F( Rect( 0, i, 3, 1 ) ) * positions[i] ).operator Mat().at< double >( 0 );
                b.at< double >( num + i ) = ( F( Rect( 0, num + i, 3, 1 ) ) * positions[i] ).operator Mat().at< double
                >( 0 );
            }

            Mat iple;
            solve( F, b, iple, DECOMP_SVD );

            G.create( 2 * num, 3, CV_64F );
            W.create( 2 * num, 2 * num, CV_64F );
            W.setTo( 0 );

            for ( int i = 0; i < num; ++i )
            {
                const Point2d new_ang = getAzEl( positions[i], iple );
                G.at< double >( i, 0 ) = sin( new_ang.x );
                G.at< double >( i, 1 ) = -cos( new_ang.x );
                G.at< double >( i, 2 ) = 0;

                G.at< double >( num + i, 0 ) = -sin( new_ang.y ) * cos( new_ang.x );
                G.at< double >( num + i, 1 ) = -sin( new_ang.y ) * sin( new_ang.x );
                G.at< double >( num + i, 2 ) = cos( new_ang.y );

                Point2f diff2;
                Point3f diff3;

                diff2.x = ( iple.at< double >( 0 ) - positions[i].at< double >( 0 ) );
                diff2.y = ( iple.at< double >( 1 ) - positions[i].at< double >( 1 ) );
                diff3.x = diff2.x;
                diff3.y = diff2.y;
                diff3.z = ( iple.at< double >( 2 ) - positions[i].at< double >( 2 ) );
                W.at< double >( i, i ) = norm( diff2 );
                W.at< double >( i + num, i + num ) = ( norm( diff3 ) - W.at< double >( i, i ) );
            }

            state = ( G.t() * W * F ).operator Mat().inv() * G.t() * W * b;

            if ( _covariance.needed() )
            {
                Mat& cov = _covariance.getMatRef();
                cov = G.t() * F / ( num / 2.0 );
                multiply( cov, sum( F * state - b )( 0 ), cov );
            }
            return true;
        }
Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
{
    LOGLN("Warping images (auxiliary)... ");

    vector<Mat> imgs;
    images.getMatVector(imgs);
    if (!imgs.empty())
    {
        CV_Assert(imgs.size() == imgs_.size());

        Mat img;
        seam_est_imgs_.resize(imgs.size());

        for (size_t i = 0; i < imgs.size(); ++i)
        {
            imgs_[i] = imgs[i];
            resize(imgs[i], img, Size(), seam_scale_, seam_scale_);
            seam_est_imgs_[i] = img.clone();
        }

        vector<Mat> seam_est_imgs_subset;
        vector<Mat> imgs_subset;

        for (size_t i = 0; i < indices_.size(); ++i)
        {
            imgs_subset.push_back(imgs_[indices_[i]]);
            seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
        }

        seam_est_imgs_ = seam_est_imgs_subset;
        imgs_ = imgs_subset;
    }

    Mat &pano_ = pano.getMatRef();

    int64 t = getTickCount();

    vector<Point> corners(imgs_.size());
    vector<Mat> masks_warped(imgs_.size());
    vector<Mat> images_warped(imgs_.size());
    vector<Size> sizes(imgs_.size());
    vector<Mat> masks(imgs_.size());

    // Prepare image masks
    for (size_t i = 0; i < imgs_.size(); ++i)
    {
        masks[i].create(seam_est_imgs_[i].size(), CV_8U);
        masks[i].setTo(Scalar::all(255));
    }

    // Warp images and their masks
    Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
    for (size_t i = 0; i < imgs_.size(); ++i)
    {
        Mat_<float> K;
        cameras_[i].K().convertTo(K, CV_32F);
        K(0,0) *= (float)seam_work_aspect_;
        K(0,2) *= (float)seam_work_aspect_;
        K(1,1) *= (float)seam_work_aspect_;
        K(1,2) *= (float)seam_work_aspect_;

        corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
        sizes[i] = images_warped[i].size();

        w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
    }

    vector<Mat> images_warped_f(imgs_.size());
    for (size_t i = 0; i < imgs_.size(); ++i)
        images_warped[i].convertTo(images_warped_f[i], CV_32F);

    LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    // Find seams
    exposure_comp_->feed(corners, images_warped, masks_warped);
    seam_finder_->find(images_warped_f, corners, masks_warped);

    // Release unused memory
    seam_est_imgs_.clear();
    images_warped.clear();
    images_warped_f.clear();
    masks.clear();

    LOGLN("Compositing...");
    t = getTickCount();

    Mat img_warped, img_warped_s;
    Mat dilated_mask, seam_mask, mask, mask_warped;

    //double compose_seam_aspect = 1;
    double compose_work_aspect = 1;
    bool is_blender_prepared = false;

    double compose_scale = 1;
    bool is_compose_scale_set = false;

    Mat full_img, img;
    for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
    {
        LOGLN("Compositing image #" << indices_[img_idx] + 1);

        // Read image and resize it if necessary
        full_img = imgs_[img_idx];
        if (!is_compose_scale_set)
        {
            if (compose_resol_ > 0)
                compose_scale = min(1.0, sqrt(compose_resol_ * 1e6 / full_img.size().area()));
            is_compose_scale_set = true;

            // Compute relative scales
            //compose_seam_aspect = compose_scale / seam_scale_;
            compose_work_aspect = compose_scale / work_scale_;

            // Update warped image scale
            warped_image_scale_ *= static_cast<float>(compose_work_aspect);
            w = warper_->create((float)warped_image_scale_);

            // Update corners and sizes
            for (size_t i = 0; i < imgs_.size(); ++i)
            {
                // Update intrinsics
                cameras_[i].focal *= compose_work_aspect;
                cameras_[i].ppx *= compose_work_aspect;
                cameras_[i].ppy *= compose_work_aspect;

                // Update corner and size
                Size sz = full_img_sizes_[i];
                if (std::abs(compose_scale - 1) > 1e-1)
                {
                    sz.width = cvRound(full_img_sizes_[i].width * compose_scale);
                    sz.height = cvRound(full_img_sizes_[i].height * compose_scale);
                }

                Mat K;
                cameras_[i].K().convertTo(K, CV_32F);
                Rect roi = w->warpRoi(sz, K, cameras_[i].R);
                corners[i] = roi.tl();
                sizes[i] = roi.size();
            }
        }
        if (std::abs(compose_scale - 1) > 1e-1)
            resize(full_img, img, Size(), compose_scale, compose_scale);
        else
            img = full_img;
        full_img.release();
        Size img_size = img.size();

        Mat K;
        cameras_[img_idx].K().convertTo(K, CV_32F);

        // Warp the current image
        w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);

        // Warp the current image mask
        mask.create(img_size, CV_8U);
        mask.setTo(Scalar::all(255));
        w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

        // Compensate exposure
        exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);

        img_warped.convertTo(img_warped_s, CV_16S);
        img_warped.release();
        img.release();
        mask.release();

        // Make sure seam mask has proper size
        dilate(masks_warped[img_idx], dilated_mask, Mat());
        resize(dilated_mask, seam_mask, mask_warped.size());

        mask_warped = seam_mask & mask_warped;

        if (!is_blender_prepared)
        {
            blender_->prepare(corners, sizes);
            is_blender_prepared = true;
        }

        // Blend the current image
        blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
    }

    Mat result, result_mask;
    blender_->blend(result, result_mask);

    LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    // Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
    // so convert it to avoid user confusing
    result.convertTo(pano_, CV_8U);

    return OK;
}