Exemplo n.º 1
0
void FastCornerDetector::detect
(
  const image::Image<unsigned char> & ima,
  std::vector<PointFeature> & regions
)
{
  using FastDetectorCall =
    xy* (*) (const unsigned char *, int, int, int, int, int *);

  FastDetectorCall detector = nullptr;
  if (size_ ==  9) detector =  fast9_detect_nonmax;
  if (size_ == 10) detector = fast10_detect_nonmax;
  if (size_ == 11) detector = fast11_detect_nonmax;
  if (size_ == 12) detector = fast12_detect_nonmax;
  if (!detector)
  {
    std::cout << "Invalid size for FAST detector: " << size_ << std::endl;
    return;
  }

  int num_corners = 0;
  xy* detections = detector(ima.data(),
     ima.Width(), ima.Height(), ima.Width(),
     threshold_, &num_corners);
  regions.clear();
  regions.reserve(num_corners);
  for (int i = 0; i < num_corners; ++i)
  {
    regions.emplace_back(detections[i].x, detections[i].y);
  }
  free( detections );
}
Exemplo n.º 2
0
> void LinearFilterWx1<PixelType>::applyTo(const Image::Image<PixelType> & srcImage,Image::Image<PixelType> & dstImage) const {

	typedef typename PixelType::DataType        PixelDataType;
	typedef typename PixelType::ComputationType PixelComputationType;

	BaseLinearFilterParametersType<PixelDataType,PixelComputationType> parameters(
		getFilterData().getDataView(),
		getXoffset(),
		getYoffset(),
		srcImage.getWidth(),
		getTotalColor()
	);

	Algorithm::AlgorithmWx1<
		SimpleWx1dataOperationBaseAlgorithm<
			BaseLinearFilterAlgorithm<
				PixelDataType,
				PixelComputationType,
				BaseLinearFilterParametersType<PixelDataType,PixelComputationType>,
				Algorithm::BaseOperationTempType<PixelDataType,PixelComputationType>
			>,
			PixelDataType,
			PixelComputationType,
			BaseLinearFilterParametersType<PixelDataType,PixelComputationType>,
			Algorithm::BaseOperationTempType<PixelDataType,PixelComputationType>
		>,
		PixelDataType,
		BaseLinearFilterParametersType<PixelDataType,PixelComputationType>
	>(
		srcImage.getDataView(),
		dstImage.getDataView(),
		parameters
	);

}
void GradientsDescriptor::fillHisto( const image::Image& _dx, const image::Image& _dy, double* _histo, const jblas::vec2& _startPoint, const jblas::vec2& _direction, double _lineAngle, int _length, double _coef  )
{
  jblas::vec2 currentPoint = _startPoint + 3.0 * _direction;
  double lastNorm = DBL_MAX;
  for(int i = 0; i < _length; ++i)
//   while(true)
  {
    int ix = int(currentPoint(0));
    int iy = int(currentPoint(1));
    if( not check(ix, _dx.width() - 1) or not check(iy, _dx.height() - 1) ) return;
    int dx = _dx.getPixelValue<short>( ix, iy, 0 );
    int dy = _dy.getPixelValue<short>( ix, iy, 0 );
//     double dx = _dx.getSubPixelValue<short>( ix, iy, 0, JfrImage_INTERP_CUBIC );
//     double dy = _dy.getSubPixelValue<short>( ix, iy, 0, JfrImage_INTERP_CUBIC );
    double norm = sqrt( dx * dx + dy * dy );
//     if( norm > lastNorm) return ;
    double angle = atan2(dy, dx);
//     double diff = cos( angle - _lineAngle);
    double correctedAngle = ( angle - _lineAngle);
//     int idx = int( (m_count) * ( 1.0 + diff) * 0.5);
//     if( idx >= m_count ) idx = m_count - 1;
//     if( idx != 0 and idx != (m_count - 1 ) )
    for(int j = 0; j < m_count; ++j )
    {
//       double lnorm = norm * ( 1.0 - fabs( fpow( -1.0 + 2.0 * j / (m_count-1) - diff ), 2.0) );
//       double lnorm = norm * exp( -pow( -1.0 + 2.0 * j / (m_count-1) - diff, 2.0) );
//       JFR_DEBUG( j << " " << exp( -pow2( cos( M_PI * j / (m_count-1) - 0.5 * correctedAngle ) ) ) );
      double lnorm = norm * _coef * exp( -pow2( cos( M_PI * j / (m_count-1) - 0.5 * correctedAngle ) ) );
      _histo[ j ] += lnorm;
    }
    lastNorm = norm;
    currentPoint += _direction;
  }
}
Exemplo n.º 4
0
> void AIL_DLL_EXPORT BoxFilterWx1<PixelType>::applyTo(const Image::Image<PixelType> & srcImage,Image::Image<PixelType> & dstImage) const {

	typedef typename PixelType::DataType        PixelDataType;
	typedef typename PixelType::ComputationType PixelComputationType;

	BoxFilterWx1parametersType<PixelDataType,PixelComputationType> parameters(xOffset,filterWidth,PixelType::ComputationRange::getMinPixel(),PixelComputationType(filterWidth));

	Algorithm::AlgorithmWx1<
		Algorithm::BasicWx1baseAlgorithm<
			BaseBoxFilterAlgorithm<
				PixelDataType,
				PixelComputationType,
				BoxFilterWx1parametersType<PixelDataType,PixelComputationType>,
				Algorithm::BaseOperationTempType<PixelDataType,PixelComputationType>
			>,
			PixelDataType,
			PixelComputationType,
			BoxFilterWx1parametersType<PixelDataType,PixelComputationType>,
			Algorithm::BaseOperationTempType<PixelDataType,PixelComputationType>
		>,
		PixelDataType,
		BoxFilterWx1parametersType<PixelDataType,PixelComputationType>
	>(
		srcImage.getDataView(),
		dstImage.getDataView(),
		parameters
	);

}
void DirectSegmentsBase::coloriage( double x_1, double y_1, double x_2, double y_2, image::Image& inasegment, int _value)
{
  double xStep = (x_2 - x_1);
  double yStep = (y_2 - y_1);
  double norm_steps = sqrt( xStep * xStep + yStep * yStep );
  xStep /= norm_steps;
  yStep /= norm_steps;
  int max_coloriage = (int)norm_steps +1;
  for( int k = 0; k < max_coloriage; ++k )
  {
    int x = (int)( x_1 + k * xStep );
    int y = (int)( y_1 + k * yStep );
    for( int j = -1; j <= 1; ++j)
    {
      for( int i = -1; i <= 1; ++i)
      {
        if( check( x + i, inasegment.width() - 1 ) and check( y + j, inasegment.height() - 1 ) )
        {
          inasegment.setPixelValue<int>( _value, x+i, y+j, 0 );
        }
      }
    }
  }
#if 0
  double xStep = (x_2 - x_1);
  double yStep = (y_2 - y_1);
  double norm_steps = sqrt( xStep * xStep + yStep * yStep );
  xStep /= norm_steps;
  yStep /= norm_steps;
  int max_coloriage = (int)norm_steps +1;
  int p_x = x_1;
  int p_y = y_1;
  if( check( p_x,  inasegment.width() - 1 ) and check( p_y, inasegment.height() - 1 ) )
  {
    inasegment.setPixelValue<int>( _value, p_x, p_y, 0 );
  }
  for( int k = -1; k <= max_coloriage; ++k )
  {
    int x = (int)( x_1 + k * xStep );
    int y = (int)( y_1 + k * yStep );
    if( check( p_x, inasegment.width() - 1 ) and check( y, inasegment.height() - 1 ) )
    {
      inasegment.setPixelValue<int>( _value, p_x, y, 0 );
    }
    if( check( x, inasegment.width() - 1 ) )
    {
      if( check( p_y, inasegment.height() - 1 ) )
      {
        inasegment.setPixelValue<int>( _value, x, p_y, 0 );
      }
      if( check( y, inasegment.height() - 1 ) )
      {
        inasegment.setPixelValue<int>( _value, x, y, 0 );
      }
    }
    p_x = x;
    p_y = y;
  }
#endif
}
Exemplo n.º 6
0
  // suggest new feature point for tracking (count point are kept)
  bool detect
  (
    const image::Image<unsigned char> & ima,
    std::vector<features::PointFeature> & pt_to_track,
    const size_t count
  ) const override
  {
    cv::Mat current_img;
    cv::eigen2cv(ima.GetMat(), current_img);
    std::vector<cv::KeyPoint> m_nextKeypoints;

    cv::Ptr<cv::FeatureDetector> m_detector = cv::GFTTDetector::create(count);
    if (m_detector == NULL)
      return false;

    m_detector->detect(current_img, m_nextKeypoints);

    if (m_nextKeypoints.size() >= count)
    {
      // shuffle to avoid to sample only in one bucket
      std::mt19937 gen(std::mt19937::default_seed);
      std::shuffle(m_nextKeypoints.begin(), m_nextKeypoints.end(), gen);
    }
    const size_t kept_kp_count =  std::min(m_nextKeypoints.size(), count);
    m_nextKeypoints.resize(kept_kp_count);

    pt_to_track.resize(kept_kp_count);
    for (size_t i = 0; i  < kept_kp_count; ++i)
      pt_to_track[i] = features::PointFeature(m_nextKeypoints[i].pt.x, m_nextKeypoints[i].pt.y);

    return kept_kp_count != 0;
    // Return false if no point can be added
  }
Exemplo n.º 7
0
  /// Try to track current point set in the provided image
  /// return false when tracking failed (=> to send frame to relocalization)
  bool track
  (
    const image::Image<unsigned char> & ima,
    const std::vector<features::PointFeature> & pt_to_track,
    std::vector<features::PointFeature> & pt_tracked,
    std::vector<bool> & status
  ) override
  {
    cv::eigen2cv(ima.GetMat(), current_img_);
    if (!pt_to_track.empty())
    {
      prevPts_.resize(pt_to_track.size());
      nextPts_.resize(pt_to_track.size());

      for (size_t i=0; i < pt_to_track.size(); ++i)
      {
        prevPts_[i].x = pt_to_track[i].x();
        prevPts_[i].y = pt_to_track[i].y();
      }

      std::vector<unsigned char> status_uchar;
      cv::calcOpticalFlowPyrLK(prev_img_, current_img_, prevPts_, nextPts_, status_uchar, error_);
      status.assign(status_uchar.begin(), status_uchar.end());

      for (size_t i=0; i < nextPts_.size(); ++i)
      {
        pt_tracked[i].coords() << nextPts_[i].x, nextPts_[i].y;
      }
    }
    // swap frame for next tracking iteration
    current_img_.copyTo(prev_img_);

    const size_t tracked_point_count = std::accumulate(status.begin(), status.end(), 0);
    return (tracked_point_count != 0);
  }
Exemplo n.º 8
0
    /**
     * Fill mask from corresponding points (each point pictured by a disk of radius _radius)
     *
     * \param[out] maskLeft Mask of the left image (initialized to corresponding image size).
     * \param[out] maskRight  Mask of the right image  (initialized to corresponding image size).
     *
     * \return True if some pixel have been set to true.
     */
    virtual bool computeMask( image::Image< unsigned char > & maskLeft, image::Image< unsigned char > & maskRight )
    {
        maskLeft.fill(0);
        maskRight.fill(0);
        for( std::vector< matching::IndMatch >::const_iterator
                iter_putativeMatches = _vec_PutativeMatches.begin();
                iter_putativeMatches != _vec_PutativeMatches.end();
                ++iter_putativeMatches )
        {
            const features::SIOPointFeature & L = _vec_featsL[ iter_putativeMatches->i_ ];
            const features::SIOPointFeature & R = _vec_featsR[ iter_putativeMatches->j_ ];

            image::FilledCircle( L.x(), L.y(), ( int )_radius, ( unsigned char ) 255, &maskLeft );
            image::FilledCircle( R.x(), R.y(), ( int )_radius, ( unsigned char ) 255, &maskRight );
        }
        return _vec_PutativeMatches.size() > 0;
    }
Exemplo n.º 9
0
	double Zncc::compute(image::Image const& im1, image::Image const& im2, float const* weightMatrix)
	{
		JFR_PRECOND(im1.depth() == im2.depth(), "The depth of both images is different");
		switch(im1.depth())
		{
// 			case CV_1U:
// 				if (weightMatrix == NULL)
// 					return computeTpl<CV_1U, bool,bool,0,1,true,false>(im1,im2);
// 				else
// 					return computeTpl<CV_1U, bool,bool,0,1,true,true>(im1,im2,weightMatrix);
			case CV_8U:
				if (weightMatrix == NULL)
					return computeTpl<CV_8U, uint8_t,uint8_t,0,255,true,false>(im1,im2);
				else
					return computeTpl<CV_8U, uint8_t,uint8_t,0,255,true,true>(im1,im2,weightMatrix);
			case CV_8S:
				if (weightMatrix == NULL)
					return computeTpl<CV_8S, int8_t,int8_t, -128,127,true,false>(im1,im2);
				else
					return computeTpl<CV_8S, int8_t,int8_t, -128,127,true,true>(im1,im2,weightMatrix);
			case CV_16U:
				if (weightMatrix == NULL)
					return computeTpl<CV_16U, uint16_t,uint16_t, 0,65535,true,false>(im1,im2);
				else
					return computeTpl<CV_16U, uint16_t,uint16_t, 0,65535,true,true>(im1,im2,weightMatrix);
			case CV_16S:
				if (weightMatrix == NULL)
					return computeTpl<CV_16S, int16_t,int16_t, -32768,32767,true,false>(im1,im2);
				else
					return computeTpl<CV_16S, int16_t,int16_t, -32768,32767,true,true>(im1,im2,weightMatrix);
			case CV_32F:
				if (weightMatrix == NULL) // bool and no borne because cannot use a float as a template parameter, and anyway would be useless here
					return computeTpl<CV_32F, float,bool, 0,0,false,false>(im1,im2);
				else
					return computeTpl<CV_32F, float,bool, 0,0,false,true>(im1,im2,weightMatrix);
			case CV_64F:
				if (weightMatrix == NULL) // bool and no borne because cannot use a float as a template parameter, and anyway would be useless here
					return computeTpl<CV_64F, double,bool, 0,0,false,false>(im1,im2);
				else
					return computeTpl<CV_64F, double,bool, 0,0,false,true>(im1,im2,weightMatrix);
			default:
				JFR_PRECOND(false, "Unknown image depth");
				return FP_NAN;
		}
	}
  /**
  @brief Detect regions on the image and compute their attributes (description)
  @param image Image.
  @param regions The detected regions and attributes (the caller must delete the allocated data)
  @param mask 8-bit gray image for keypoint filtering (optional).
     Non-zero values depict the region of interest.
  */
  bool Describe(const image::Image<unsigned char>& image,
    std::unique_ptr<Regions> &regions,
    const image::Image<unsigned char> * mask = nullptr)
  {
    // Convert for opencv
    cv::Mat img;
    cv::eigen2cv(image.GetMat(), img);

    // Convert mask image into cv::Mat
    cv::Mat m_mask;
    if(mask != nullptr) {
      cv::eigen2cv(mask->GetMat(), m_mask);
    }

    // Create a SIFT detector
    std::vector< cv::KeyPoint > v_keypoints;
    cv::Mat m_desc;
    cv::Ptr<cv::Feature2D> siftdetector = cv::xfeatures2d::SIFT::create();

    // Process SIFT computation
    siftdetector->detectAndCompute(img, m_mask, v_keypoints, m_desc);

    Allocate(regions);

    // Build alias to cached data
    SIFT_Regions * regionsCasted = dynamic_cast<SIFT_Regions*>(regions.get());
    // reserve some memory for faster keypoint saving
    regionsCasted->Features().reserve(v_keypoints.size());
    regionsCasted->Descriptors().reserve(v_keypoints.size());

    // Prepare a column vector with the sum of each descriptor
    cv::Mat m_siftsum;
    cv::reduce(m_desc, m_siftsum, 1, cv::REDUCE_SUM);

    // Copy keypoints and descriptors in the regions
    int cpt = 0;
    for(std::vector< cv::KeyPoint >::const_iterator i_kp = v_keypoints.begin();
        i_kp != v_keypoints.end();
        ++i_kp, ++cpt)
    {
      SIOPointFeature feat((*i_kp).pt.x, (*i_kp).pt.y, (*i_kp).size, (*i_kp).angle);
      regionsCasted->Features().push_back(feat);

      Descriptor<unsigned char, 128> desc;
      for(int j = 0; j < 128; j++)
      {
        desc[j] = static_cast<unsigned char>(512.0*sqrt(m_desc.at<float>(cpt, j)/m_siftsum.at<float>(cpt, 0)));
      }
      regionsCasted->Descriptors().push_back(desc);
    }

    return true;
  };
Exemplo n.º 11
0
void snapshot(unsigned call_no) {
    if (!drawable ||
        (!snapshot_prefix && !compare_prefix)) {
        return;
    }

    Image::Image *ref = NULL;

    if (compare_prefix) {
        char filename[PATH_MAX];
        snprintf(filename, sizeof filename, "%s%010u.png", compare_prefix, call_no);
        ref = Image::readPNG(filename);
        if (!ref) {
            return;
        }
        if (retrace::verbosity >= 0) {
            std::cout << "Read " << filename << "\n";
        }
    }

    Image::Image *src = glstate::getDrawBufferImage(GL_RGBA);
    if (!src) {
        return;
    }

    if (snapshot_prefix) {
        char filename[PATH_MAX];
        snprintf(filename, sizeof filename, "%s%010u.png", snapshot_prefix, call_no);
        if (src->writePNG(filename) && retrace::verbosity >= 0) {
            std::cout << "Wrote " << filename << "\n";
        }
    }

    if (ref) {
        std::cout << "Snapshot " << call_no << " average precision of " << src->compare(*ref) << " bits\n";
        delete ref;
    }

    delete src;
}
Exemplo n.º 12
0
int main( int argc , char** argv ){ 
#ifdef DEBUG
	MEM_ON();
	TRACE_OFF();
#endif

	//total execution timer
	Timer totalTimer;
	totalTimer.start();


	std::cout<<"****************************************************************"<<std::endl;
	std::cout<<"*            OpenMP execution with "<<omp_get_max_threads()<<" threads                   *"<<std::endl;
	std::cout<<"****************************************************************"<<std::endl;

	std::cout<<"\n\n\n";



	std::vector<std::string> imageName;
	
	

	std::stringstream input(argv[4]);
	double factor;
	input >> factor;


	int succeded = 0;
	int failed = 0;

	std::string operation( argv[3] );
	//parallel code timer
	Timer parallelTimer;


	//how many images to run in parallel.Number of threads created for the program
        unsigned int parallelImages = omp_get_max_threads();
        //counter for total files for parallel iterations
        unsigned int counter = 0;
        //how many iterations to run in parallel
        unsigned int parallelIterations = parallelImages;
	


	if( GetDirFileNames ( argv[1] , imageName ) )
	
	try {
		parallelTimer.start();
		for( std::vector<std::string>::iterator it = imageName.begin() ; it < imageName.end() ; it += parallelImages ){
				
			counter += parallelImages;
                        parallelIterations = parallelImages;
                        if( counter > imageName.size() )
                                parallelIterations =  imageName.size() - ( counter - parallelImages );

			#pragma omp parallel for
			for( unsigned int i = 0 ; i < parallelIterations ; i++ ) {				

			
				std::cout<<(*(it + i))<<"\n";
				IMAGE::Image* oldImage = NULL;
				IMAGE::Image* newImage = NULL;
				std::string oldName  = argv[1] + (*(it + i));
				std::string newName  = argv[2] + (*(it + i));
				try{

					oldImage= IMAGE::Image::createInstance( oldName );
					newImage = IMAGE::Image::createInstance( newName ); 
				
					////////////////////
					try{
						oldImage->open( oldName, 'r' );
						newImage->open( newName , 'w' );
						oldImage->readImageRaster();
						newImage->raster.createRaster( oldImage->raster );
					
						//check which operation to do
						if( operation == REVERSE ){
	                                                IMAGE::PROCESS::reverseColor( newImage->raster );
        	                                }
						else if( operation == BRIGHTNESS ) {
							IMAGE::PROCESS::adjustBrightness( newImage->raster , atoi( argv[4] ));
						}
						else if( operation == CONTRAST ) {
							IMAGE::PROCESS::adjustContrast( newImage->raster , atoi( argv[4] ) );
						}
                        	                else if( operation == RGB2GREY ) {
                                	                IMAGE::FILTERS::convertRGB2GREY( newImage->raster , atoi( argv[4] ));
                                        	}
						else if( operation == RGB2BW ) {
							IMAGE::FILTERS::convertRGB2BW( newImage->raster );
						}
						else if( operation == RGB2SEPIA ) {
							IMAGE::FILTERS::convertRGB2SEPIA( newImage->raster );
						}
						else if( operation == BLUR ) {
							IMAGE::PROCESS::blurImage( newImage->raster , atoi(argv[4]) );
						}
						else if( operation == ROTATE ) {
							IMAGE::PROCESS::rotateImage( newImage->raster , argv[4] ) ;
						}
						else if( operation == ZOOM ) {
							IMAGE::PROCESS::zoomImage( newImage->raster , 1 , 1 , 400 , 300 );
						}
						else if( operation == SCALE ) {
							IMAGE::PROCESS::scaleImage( newImage->raster , factor );
						}
                        	                else {
                                	                std::cout<<"Not a valid operation \n";
                                        	        exit(0);
                                       	 	}





						newImage->writeRasterToImage();
						succeded++;

					}
					catch( IMAGE::file_io_failed& e ) {
						std::cout<<e.what()<<"\n";
						failed++;
					}
					catch ( IMAGE::image_format_error& e ) {
						std::cout<<e.what()<<"\n";
						failed++;
					}		
					catch( IMAGE::empty_image& e ){
						std::cout<<e.what()<<"\n";
						failed++;
					}	
					catch ( IMAGE::empty_raster& e ) {
						std::cout<<e.what()<<"\n";
						failed++;
					}
				

						oldImage->close();
						newImage->close();
						delete oldImage;
						delete newImage;

					
			
				}
			catch( IMAGE::not_supported_format& e ) {
				std::cout<<e.what()<<"\n";
				failed++;
			}
			
		}//end of omp_parallel_for


		}//end of for
		
	}
	catch( IMAGE::bad_alloc& e ){
		std::cout<<e.what()<<std::endl;
		std::cout<<"Exiting program...\n";
		exit(1);

	}
	catch(...){
		std::cout<<"cought unexpected exception...\n";
		std::cout<<"Exitig program...\n";
		exit(1);
	}



        totalTimer.stop();
        parallelTimer.stop();


	//final output         //////////////////////////////////////////////////////////////

        std::cout<<"\n\n\n";
        std::cout<<"*************************************************************"<<std::endl;
        std::cout<<"*                          Results                          *"<<std::endl;

        std::cout<<"* total files      :  "<<imageName.size()<<"                                     *"<<std::endl;
        std::cout<<"* Succeded images  :  "<<succeded<<"                                     *"<<std::endl;
        std::cout<<"* Failed images    :  "<<failed<<"                                     *"<<std::endl;


        std::cout<<"*                                                           *"<<std::endl;
        std::cout<<"* Total execution time             :  "<<totalTimer.getReal()<<" sec          *"<<std::endl;
        std::cout<<"* Parallel partial execution time  :  "<<parallelTimer.getReal()<<" sec          *"<<std::endl;

        std::cout<<"*                                                           *"<<std::endl;
        std::cout<<"*                        END OF PROGRAM                     *"<<std::endl;
        std::cout<<"*************************************************************"<<std::endl;


	///////////////////////////////////////////////////////////////////////////////////////


#ifdef DEBUG
	MEM_OFF();
#endif


	return 0;
}
Exemplo n.º 13
0
	double Zncc::compute8noborne(image::Image const& im1, image::Image const& im2)
	{
		JFR_PRECOND(im1.depth() == im2.depth(), "The depth of both images is different");
		JFR_PRECOND(im1.depth() == CV_8U, "The depth of images must be CV_8U");
		return computeTpl<CV_8U, uint8_t,uint8_t,0,255,false,false>(im1,im2);
	}
Exemplo n.º 14
0
  /**
   * Put masks to white, images are conserved
   *
   * \param[out] maskLeft Mask of the left image (initialized to corresponding image size).
   * \param[out] maskRight  Mask of the right image (initialized to corresponding image size).
   *
   * \return True.
   */
  virtual bool computeMask(
    image::Image< unsigned char > & maskLeft,
    image::Image< unsigned char > & maskRight )
  {
    std::vector< matching::IndMatch > vec_KVLDMatches;

    image::Image< unsigned char > imageL, imageR;
    image::ReadImage( _sLeftImage.c_str(), &imageL );
    image::ReadImage( _sRightImage.c_str(), &imageR );

    image::Image< float > imgA ( imageL.GetMat().cast< float >() );
    image::Image< float > imgB(imageR.GetMat().cast< float >());

    std::vector< Pair > matchesFiltered, matchesPair;

    for( std::vector< matching::IndMatch >::const_iterator iter_match = _vec_PutativeMatches.begin();
          iter_match != _vec_PutativeMatches.end();
          ++iter_match )
    {
      matchesPair.push_back( std::make_pair( iter_match->i_, iter_match->j_ ) );
    }

    std::vector< double > vec_score;

    //In order to illustrate the gvld(or vld)-consistant neighbors, the following two parameters has been externalized as inputs of the function KVLD.
    openMVG::Mat E = openMVG::Mat::Ones( _vec_PutativeMatches.size(), _vec_PutativeMatches.size() ) * ( -1 );
    // gvld-consistancy matrix, intitialized to -1,  >0 consistancy value, -1=unknow, -2=false
    std::vector< bool > valide( _vec_PutativeMatches.size(), true );// indices of match in the initial matches, if true at the end of KVLD, a match is kept.

    size_t it_num = 0;
    KvldParameters kvldparameters;//initial parameters of KVLD
    //kvldparameters.K = 5;
    while (
      it_num < 5 &&
      kvldparameters.inlierRate >
      KVLD(
        imgA, imgB,
        _vec_featsL, _vec_featsR,
        matchesPair, matchesFiltered,
        vec_score, E, valide, kvldparameters ) )
    {
      kvldparameters.inlierRate /= 2;
      std::cout<<"low inlier rate, re-select matches with new rate="<<kvldparameters.inlierRate<<std::endl;
      kvldparameters.K = 2;
      it_num++;
    }

    bool bOk = false;
    if( !matchesPair.empty())
    {
      // Get mask
      getKVLDMask(
        &maskLeft, &maskRight,
        _vec_featsL, _vec_featsR,
        matchesPair,
        valide,
        E);
      bOk = true;
    }
    else{
      maskLeft.fill( 0 );
      maskRight.fill( 0 );
    }

    return bOk;
  }
Exemplo n.º 15
0
      /**
      * @brief Extract MSER regions
      * @param img Input image
      * @param[out] regions Output regions
      */
      void MSERExtractor::Extract( const image::Image<unsigned char> & img , std::vector<MSERRegion> & regions ) const
      {
        // Compute minimum and maximum region area relative to this image
        const int minRegArea = img.Width() * img.Height() * m_minimum_area;
        const int maxRegArea = img.Width() * img.Height() * m_maximum_area;

        // List of processed pixels (maybe we can use a more efficient structure)
        std::vector<std::vector<bool >> processed;
        processed.resize( img.Width() );
        for (int i = 0; i < img.Width(); ++i )
        {
          processed[ i ].resize( img.Height() );
          std::fill( processed[ i ].begin() , processed[ i ].end() , false );
        }

        // Holds the boundary of given grayscale value (boundary[0] -> pixels in the boundary with 0 grayscale value)
        std::vector<PixelStackElt> boundary[ 256 ];

        // List of regions computed so far (not only valid MSER regions)
        std::vector<MSERRegion *> regionStack;

        // Push en empty region
        regionStack.push_back( new MSERRegion );

        // Start processing from top left pixel
        PixelStackElt cur_pix;
        cur_pix.pix_x = 0;
        cur_pix.pix_y = 0;
        cur_pix.pix_level = img( 0 , 0 );
        cur_pix.edge_index = PIXEL_RIGHT;

        processed[ cur_pix.pix_x ][ cur_pix.pix_y ] = true;

        regionStack.push_back( new MSERRegion( cur_pix.pix_level , cur_pix.pix_x , cur_pix.pix_y ) );

        int priority = 256;

        // Start process
        while (1)
        {
          bool restart = false;

          // Process neighboring to see if there's something to search with lower grayscale level
          for ( PixelNeighborsDirection curDir = cur_pix.edge_index;
                curDir <= PIXEL_BOTTOM_RIGHT;
                curDir = NextDirection( curDir , m_connectivity ) )
          {
            int nx , ny;
            GetNeighbor( cur_pix.pix_x , cur_pix.pix_y , curDir , img.Width() , img.Height() , nx , ny );

            // Pixel was not processed before
            if (ValidPixel( nx , ny , img.Width() , img.Height() ) && ! processed[ nx ][ ny ] )
            {
              const int nLevel = img( ny , nx );
              processed[ nx ][ ny ] = true;

              // Info of the neighboring pixel
              PixelStackElt n_elt;
              n_elt.pix_x = nx;
              n_elt.pix_y = ny;
              n_elt.pix_level = nLevel;
              n_elt.edge_index = PIXEL_RIGHT;

              // Now look from which pixel do we have to continue
              if (nLevel >= cur_pix.pix_level )
              {
                // Continue from the same pixel
                boundary[ nLevel ].push_back( n_elt );

                // Store the lowest value so far
                priority = std::min( nLevel , priority );
              }
              else
              {
                // Go on with the neighboring pixel (go down)
                cur_pix.edge_index = NextDirection( curDir , m_connectivity ); // Next time we have to process the next boundary pixel
                boundary[ cur_pix.pix_level ].push_back( cur_pix );

                // Store the lowest value so far
                priority = std::min( cur_pix.pix_level , priority );

                // Push the next pixel to process
                cur_pix = n_elt;
                restart = true;
                break;
              }
            }
          }
          // Do we have to restart from a new pixel ?
          if (restart )
          {
            // If so it's that because we found a lower grayscale value so let's start a new region
            regionStack.push_back( new MSERRegion( cur_pix.pix_level , cur_pix.pix_x , cur_pix.pix_y ) );
            continue;
          }

          // We have process all the neighboring pixels, current pixel is the lowest we have found so far
          // now process the current pixel
          regionStack.back()->AppendPixel( cur_pix.pix_x , cur_pix.pix_y );

          // End of the process : we have no boundary region, compute MSER from graph
          if (priority == 256 )
          {
            regionStack.back()->ComputeMSER( m_delta , minRegArea , maxRegArea , m_max_variation , m_min_diversity , regions );
            break;
          }

          PixelStackElt next_pix = boundary[ priority ].back();
          boundary[ priority ].pop_back();

          // Get the next pixel level
          while (boundary[ priority ].empty() && ( priority < 256 ))
          {
            ++priority;
          }

          // Clear the stack
          const int newLevel = next_pix.pix_level;

          // Process the current stack of pixels if the next processing pixel is not at the same curent level
          if (newLevel != cur_pix.pix_level )
          {
            // Try to merge the regions to fomr a tree
            ProcessStack( newLevel , next_pix.pix_x , next_pix.pix_y , regionStack );
          }

          // Update next pixel for processing
          cur_pix = next_pix;
        }

        // Clear region stack created so far
        for (size_t i = 0; i < regionStack.size(); ++i )
        {
          delete regionStack[ i ];
        }
      }
Exemplo n.º 16
0
		double Explorer<Correl>::exploreTranslation(image::Image const& im1, image::Image const& im2_, int xmin, int xmax, int xstep, int ymin, int ymax, int ystep, double &xres, double &yres, float const* weightMatrix)
		{
			cv::Rect roi = im1.getROI();
//			image::Image im2(im2_, cv::Rect(0,0,im2_.width(),im2_.height()));
			image::Image im2(im2_);
			double score;
			double best_score = -1.;
			int bestx = -1, besty = -1;
			
			if (xmin < 0) xmin = 0; if (xmax >= im2.width ()) xmax = im2.width ()-1;
			if (ymin < 0) ymin = 0; if (ymax >= im2.height()) ymax = im2.height()-1;
			
			int sa_w = (xmax-xmin+1), sa_h = (ymax-ymin+1); // search area
			if (sa_w < 5) xstep = 1; if (sa_h < 5) ystep = 1;
			int nresults = (sa_w+2)*(sa_h+2);
			double *results = new double[nresults]; // add 1 border for interpolation
			for(int i = 0; i < nresults; i++) results[i] = -1e6;
			
			// explore
			for(int y = ymin; y <= ymax; y += ystep)
			for(int x = xmin; x <= xmax; x += xstep)
				DO_CORRELATION(im1, im2, weightMatrix, x, y, score, best_score, bestx, besty, roi);

			// refine
// JFR_DEBUG("refine (" << bestx << "," << besty << " " << best_score << ")");
			// TODO refine several local maxima
			// TODO refine by dichotomy for large steps ?
			int newbestx = bestx, newbesty = besty;
			for(int y = besty-ystep+1; y <= besty+ystep-1; y++)
			for(int x = bestx-xstep+1; x <= bestx+xstep-1; x++)
			{
				if (x == bestx && y == besty) continue;
				DO_CORRELATION(im1, im2, weightMatrix, x, y, score, best_score, newbestx, newbesty, roi);
			}
			
			// ensure that all values that will be used by interpolation are computed
			int newnewbestx = newbestx, newnewbesty = newbesty;
/*			if (((newbestx == bestx-xstep+1 || newbestx == bestx+xstep-1) && (newbesty-ymin)%ystep) ||
			    ((newbesty == besty-ystep+1 || newbesty == besty+ystep-1) && (newbestx-xmin)%xstep))
			{
				if (newbestx == bestx-xstep+1) DO_CORRELATION(im1, im2, weightMatrix, newbestx-1, newbesty, score, best_score, newnewbestx, newnewbesty, roi);
				if (newbestx == bestx+xstep-1) DO_CORRELATION(im1, im2, weightMatrix, newbestx+1, newbesty, score, best_score, newnewbestx, newnewbesty, roi);
				if (newbesty == besty-ystep+1) DO_CORRELATION(im1, im2, weightMatrix, newbestx, newbesty-1, score, best_score, newnewbestx, newnewbesty, roi);
				if (newbesty == besty+ystep-1) DO_CORRELATION(im1, im2, weightMatrix, newbestx, newbesty+1, score, best_score, newnewbestx, newnewbesty, roi);
			}*/
// JFR_DEBUG("extra interpol (" << newbestx << "," << newbesty << " " << best_score << ")");
			do {
				newbestx = newnewbestx, newbesty = newnewbesty;
				if (newbestx>0 && RESULTS(newbesty,newbestx-1)<-1e5)
					DO_CORRELATION(im1, im2, weightMatrix, newbestx-1, newbesty, score, best_score, newnewbestx, newnewbesty, roi);
				if (newbestx<im2.width()-1 && RESULTS(newbesty,newbestx+1)<-1e5)
					DO_CORRELATION(im1, im2, weightMatrix, newbestx+1, newbesty, score, best_score, newnewbestx, newnewbesty, roi);
				if (newbesty>0 && RESULTS(newbesty-1,newbestx)<-1e5)
					DO_CORRELATION(im1, im2, weightMatrix, newbestx, newbesty-1, score, best_score, newnewbestx, newnewbesty, roi);
				if (newbesty<im2.height()-1 && RESULTS(newbesty+1,newbestx)<-1e5)
					DO_CORRELATION(im1, im2, weightMatrix, newbestx, newbesty+1, score, best_score, newnewbestx, newnewbesty, roi);
			} while (newbestx != newnewbestx || newbesty != newnewbesty);
			// FIXME this could go out of bounds
// JFR_DEBUG("final : " << newnewbestx << "," << newnewbesty << " " << best_score);
			
			bestx = newbestx;
			besty = newbesty;
			
			// TODO interpolate the score as well
			// interpolate x
			
			double a1 = RESULTS(besty,bestx-1), a2 = RESULTS(besty,bestx-0), a3 = RESULTS(besty,bestx+1);
			if (a1 > -1e5 && a3 > -1e5) jmath::parabolicInterpolation(a1,a2,a3, xres); else xres = 0;
// JFR_DEBUG("interpolating " << a1 << " " << a2 << " " << a3 << " gives shift " << xres << " plus " << bestx+0.5);
			xres += bestx+0.5;
			// interpolate y
			a1 = RESULTS(besty-1,bestx), a2 = RESULTS(besty-0,bestx), a3 = RESULTS(besty+1,bestx);
			if (a1 > -1e5 && a3 > -1e5) jmath::parabolicInterpolation(a1,a2,a3, yres); else yres = 0;
// JFR_DEBUG("interpolating " << a1 << " " << a2 << " " << a3 << " gives shift " << yres << " plus " << besty+0.5);
			yres += besty+0.5;
			
			delete[] results;
			return best_score;
		}
Exemplo n.º 17
0
	double Zncc::computeTpl(image::Image const& im1_, image::Image const& im2_, float const* weightMatrix)
	{
		// preconds
		JFR_PRECOND( im1_.depth() == depth, "Image 1 depth is different from the template parameter" );
		JFR_PRECOND( im2_.depth() == depth, "Image 2 depth is different from the template parameter" );
		JFR_PRECOND( im1_.channels() == im2_.channels(), "The channels number of both images are different" );
		JFR_PRECOND( !useWeightMatrix || weightMatrix, "Template parameter tells to use weightMatrix but no one is given" );
		
		// adjust ROIs to match size, assuming that it is reduced when set out of the image
		// FIXME weightMatrix should be a cv::Mat in order to have a ROI too, and to adjust it
		cv::Size size1; cv::Rect roi1 = im1_.getROI(size1);
		cv::Size size2; cv::Rect roi2 = im2_.getROI(size2);
		int dw = roi1.width - roi2.width, dh = roi1.height - roi2.height;
		if (dw != 0)
		{
			cv::Rect &roiA = (dw<0 ? roi1 : roi2), &roiB = (dw<0 ? roi2 : roi1);
			cv::Size &sizeA = (dw<0 ? size1 : size2);
			if (roiA.x == 0) { roiB.x += dw; roiB.width -= dw; } else
			if (roiA.x+roiA.width == sizeA.width) { roiB.width -= dw; }
		}
		if (dh != 0)
		{
			cv::Rect &roiA = (dh<0 ? roi1 : roi2), &roiB = (dh<0 ? roi2 : roi1);
			cv::Size &sizeA = (dh<0 ? size1 : size2);
			if (roiA.y == 0) { roiB.y += dh; roiB.height -= dh; } else
			if (roiA.y+roiA.height == sizeA.height) { roiB.height -= dh; }
		}
		image::Image im1(im1_); im1.setROI(roi1);
		image::Image im2(im2_); im2.setROI(roi2);

		// some variables initialization
		int height = im1.height();
		int width = im1.width();
		int step1 = im1.step1() - width;
		int step2 = im2.step1() - width;
		
		double mean1 = 0., mean2 = 0.;
		double sigma1 = 0., sigma2 = 0., sigma12 = 0.;
		double zncc_sum = 0.;
		double zncc_count = 0.;
		double zncc_total = 0.;
		
		worktype const* im1ptr = reinterpret_cast<worktype const*>(im1.data());
		worktype const* im2ptr = reinterpret_cast<worktype const*>(im2.data());
		
		float const* wptr = weightMatrix;
		double w;
		
		// start the loops
		for(int i = 0; i < height; ++i) 
		{
			for(int j = 0; j < width; ++j) 
			{
				worktype im1v = *(im1ptr++);
				worktype im2v = *(im2ptr++);
				if (useWeightMatrix) w = *(wptr++); else w = 1;
				if (useBornes) zncc_total += w;
				
//std::cout << "will correl ? " << useBornes << ", " << (int)im1v << ", " << (int)im2v << std::endl;
				if (!useBornes || (im1v != borneinf && im1v != bornesup && im2v != borneinf && im2v != bornesup))
				{
//std::cout << "correl one pixel" << std::endl;
#if 0
					double im1vw, im2vw;
					if (useWeightMatrix)
						{ im1vw = im1v * w; im2vw = im2v * w; } else
						{ im1vw = im1v;     im2vw = im2v;     }
					zncc_count += w;
					mean1 += im1vw;
					mean2 += im2vw;
					sigma1 += im1v * im1vw;
					sigma2 += im2v * im2vw;
					zncc_sum += im1v * im2vw;
#else
					zncc_count += w;
					mean1 += im1v * w;
					mean2 += im2v * w;
					sigma1 += im1v * im1v * w;
					sigma2 += im2v * im2v * w;
					zncc_sum += im1v * im2v * w;
#endif
				}
			}
			im1ptr += step1;
			im2ptr += step2;
		}
		
		if (useBornes) if (zncc_count / zncc_total < 0.75)
			{ /*std::cout << "zncc failed: " << zncc_count << "," << zncc_total << std::endl;*/ return -3; }
		
		// finish
		mean1 /= zncc_count;
		mean2 /= zncc_count;
		sigma1 = sigma1/zncc_count - mean1*mean1;
		sigma2 = sigma2/zncc_count - mean2*mean2;
		sigma1 = sigma1 > 0.0 ? sqrt(sigma1) : 0.0; // test for numerical rounding errors to avoid nan
		sigma2 = sigma2 > 0.0 ? sqrt(sigma2) : 0.0;
		sigma12 = sigma1*sigma2;
// std::cout << "normal: zncc_sum " << zncc_sum << ", count " << zncc_count << ", mean12 " << mean1*mean2 << ", sigma12 " << sigma1*sigma2 << std::endl;
		zncc_sum = (sigma12 < 1e-6 ? -1 : (zncc_sum/zncc_count - mean1*mean2) / sigma12);
		
		JFR_ASSERT(zncc_sum >= -1.01, "");
		return zncc_sum;
	}
  /**
  @brief Detect regions on the image and compute their attributes (description)
  @param image Image.
  @param regions The detected regions and attributes (the caller must delete the allocated data)
  @param mask 8-bit gray image for keypoint filtering (optional).
     Non-zero values depict the region of interest.
  */
  bool Describe
  (
    const image::Image<unsigned char>& image,
    std::unique_ptr<Regions> &regions,
    const image::Image<unsigned char> * mask = nullptr
  ) override
  {
    const int w = image.Width(), h = image.Height();
    // Convert to float in range [0;1]
    const image::Image<float> If(image.GetMat().cast<float>()/255.0f);

    // compute sift keypoints
    Allocate(regions);

    // Build alias to cached data
    SIFT_Regions * regionsCasted = dynamic_cast<SIFT_Regions*>(regions.get());
    {
      using namespace openMVG::features::sift;
      const int supplementary_images = 3;
      // => in order to ensure each gaussian slice is used in the process 3 extra images are required:
      // +1 for dog computation
      // +2 for 3d discrete extrema definition

      HierarchicalGaussianScaleSpace octave_gen(
        params_.num_octaves_,
        params_.num_scales_,
        (params_.first_octave_ == -1)
        ? GaussianScaleSpaceParams(1.6f/2.0f, 1.0f/2.0f, 0.5f, supplementary_images)
        : GaussianScaleSpaceParams(1.6f, 1.0f, 0.5f, supplementary_images));
      octave_gen.SetImage( If );

      std::vector<Keypoint> keypoints;
      keypoints.reserve(5000);
      Octave octave;
      while ( octave_gen.NextOctave( octave ) )
      {
        std::vector< Keypoint > keys;
        // Find Keypoints
        SIFT_KeypointExtractor keypointDetector(
          params_.peak_threshold_ / octave_gen.NbSlice(),
          params_.edge_threshold_);
        keypointDetector(octave, keys);
        // Find Keypoints orientation and compute their description
        Sift_DescriptorExtractor descriptorExtractor;
        descriptorExtractor(octave, keys);

        // Concatenate the found keypoints
        std::move(keys.begin(), keys.end(), std::back_inserter(keypoints));
      }
      for (const auto & k : keypoints)
      {
        // Feature masking
        if (mask)
        {
          const image::Image<unsigned char> & maskIma = *mask;
          if (maskIma(k.y, k.x) == 0)
            continue;
        }

        Descriptor<unsigned char, 128> descriptor;
        descriptor << (k.descr.cast<unsigned char>());
        {
          regionsCasted->Descriptors().emplace_back(descriptor);
          regionsCasted->Features().emplace_back(k.x, k.y, k.sigma, k.theta);
        }
      }
    }
    return true;
  };
Exemplo n.º 19
0
 /**
  * Put masks to white, all image is considered as valid pixel selection
  *
  * \param[out] maskLeft Mask of the left image (initialized to corresponding image size).
  * \param[out] maskRight  Mask of the right image (initialized to corresponding image size).
  *
  * \return True.
  */
 bool computeMask( image::Image< unsigned char > & maskLeft, image::Image< unsigned char > & maskRight ) override 
 {
   maskLeft.fill( image::WHITE );
   maskRight.fill( image::WHITE );
   return true;
 }
Exemplo n.º 20
0
    /**
    @brief Detect regions on the image and compute their attributes (description)
    @param image Image.
    @param regions The detected regions and attributes (the caller must delete the allocated data)
    @param mask 8-bit gray image for keypoint filtering (optional).
       Non-zero values depict the region of interest.
    */
    bool Describe(const image::Image<unsigned char>& image,
                  std::unique_ptr<Regions> &regions,
                  const image::Image<unsigned char> * mask = NULL)
    {
        const int w = image.Width(), h = image.Height();
        //Convert to float
        const image::Image<float> If(image.GetMat().cast<float>());

        VlSiftFilt *filt = vl_sift_new(w, h,
                                       _params._num_octaves, _params._num_scales, _params._first_octave);
        if (_params._edge_threshold >= 0)
            vl_sift_set_edge_thresh(filt, _params._edge_threshold);
        if (_params._peak_threshold >= 0)
            vl_sift_set_peak_thresh(filt, 255*_params._peak_threshold/_params._num_scales);

        Descriptor<vl_sift_pix, 128> descr;
        Descriptor<unsigned char, 128> descriptor;

        // Process SIFT computation
        vl_sift_process_first_octave(filt, If.data());

        Allocate(regions);

        // Build alias to cached data
        SIFT_Regions * regionsCasted = dynamic_cast<SIFT_Regions*>(regions.get());
        // reserve some memory for faster keypoint saving
        regionsCasted->Features().reserve(2000);
        regionsCasted->Descriptors().reserve(2000);

        while (true) {
            vl_sift_detect(filt);

            VlSiftKeypoint const *keys  = vl_sift_get_keypoints(filt);
            const int nkeys = vl_sift_get_nkeypoints(filt);

            // Update gradient before launching parallel extraction
            vl_sift_update_gradient(filt);

#ifdef OPENMVG_USE_OPENMP
            #pragma omp parallel for private(descr, descriptor)
#endif
            for (int i = 0; i < nkeys; ++i) {

                // Feature masking
                if (mask)
                {
                    const image::Image<unsigned char> & maskIma = *mask;
                    if (maskIma(keys[i].y, keys[i].x) == 0)
                        continue;
                }

                double angles [4] = {0.0, 0.0, 0.0, 0.0};
                int nangles = 1; // by default (1 upright feature)
                if (_bOrientation)
                {   // compute from 1 to 4 orientations
                    nangles = vl_sift_calc_keypoint_orientations(filt, angles, keys+i);
                }

                for (int q=0 ; q < nangles ; ++q) {
                    vl_sift_calc_keypoint_descriptor(filt, &descr[0], keys+i, angles[q]);
                    const SIOPointFeature fp(keys[i].x, keys[i].y,
                                             keys[i].sigma, static_cast<float>(angles[q]));

                    siftDescToUChar(&descr[0], descriptor, _params._root_sift);
#ifdef OPENMVG_USE_OPENMP
                    #pragma omp critical
#endif
                    {
                        regionsCasted->Descriptors().push_back(descriptor);
                        regionsCasted->Features().push_back(fp);
                    }
                }
            }
            if (vl_sift_process_next_octave(filt))
                break; // Last octave
        }
        vl_sift_delete(filt);

        return true;
    };