Beispiel #1
0
void buildPyramid_templ(
	CImagePyramid &obj,
	mrpt::utils::CImage &img,
	const size_t nOctaves,
	const bool smooth_halves,
	const bool convert_grayscale)
{
	ASSERT_ABOVE_(nOctaves,0)

	//TImageSize  img_size = img.getSize();
	obj.images.resize(nOctaves);

	// First octave: Just copy the image:
	if (convert_grayscale && img.isColor())
	{
		// In this case we have to convert to grayscale, so FASTLOAD doesn't really matter:
		img.grayscale(obj.images[0]);
	}
	else
	{
		// No need to convert to grayscale OR image already is grayscale:
		if (FASTLOAD)
		     obj.images[0].copyFastFrom(img);  // Fast copy -> "move", destroying source.
		else obj.images[0] = img;  // Normal copy
	}

	// Rest of octaves, if any:
	for (size_t o=1;o<nOctaves;o++)
	{
		if (smooth_halves)
		     obj.images[o-1].scaleHalfSmooth(obj.images[o]);
		else obj.images[o-1].scaleHalf(obj.images[o]);
	}
}
Beispiel #2
0
void CFormPlayVideo::drawHorzRules(mrpt::utils::CImage& img)
{
	if (!cbDrawStereoRules->IsChecked()) return;

	img.forceLoad();
	const size_t Ay = edHorzRuleSpace->GetValue();
	const size_t h = img.getHeight();
	const size_t w = img.getWidth();

	for (size_t y = Ay; y < h; y += Ay)
		img.line(0, y, w - 1, y, mrpt::utils::TColor::white());
}
Beispiel #3
0
/* ----------------------------------------------------------
						writeImage
   ---------------------------------------------------------- */
bool CVideoFileWriter::writeImage(const mrpt::utils::CImage& img) const
{
	if (!m_video.get())
		return false;

	if ((size_t)m_img_size.x!=img.getWidth() || (size_t)m_img_size.y!=img.getHeight())
	{
		std::cout << format("[CVideoFileWriter::writeImage] Error: video frame size is %ix%i but image is %ux%u", m_img_size.x,m_img_size.y,(unsigned)img.getWidth(),(unsigned)img.getHeight() ) << std::endl;
		return false;
	}

#if MRPT_HAS_OPENCV
	return 0!= cvWriteFrame( M_WRITER, img.getAs<IplImage>() );
#else
	return false;
#endif
}
/************************************************************************************************
*							extractFeaturesKLT
************************************************************************************************/
void CFeatureExtraction::extractFeaturesKLT(
		const mrpt::utils::CImage			&inImg,
		CFeatureList			&feats,
		unsigned int			init_ID,
		unsigned int			nDesiredFeatures,
		const TImageROI			&ROI) const
{
//#define VERBOSE_TIMING

#ifdef VERBOSE_TIMING
	CTicTac tictac;
#endif
		MRPT_START

		#if MRPT_HAS_OPENCV
        const unsigned int MAX_COUNT = 300;

		// -----------------------------------------------------------------
		// Create OpenCV Local Variables
		// -----------------------------------------------------------------
		int				count = 0;
		int				nPts;

#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif
		const cv::Mat img( cv::cvarrToMat( inImg.getAs<IplImage>() ) );

#ifdef VERBOSE_TIMING
		cout << "[KLT] Attach: " << tictac.Tac()*1000.0f << endl;
#endif
		const CImage inImg_gray( inImg, FAST_REF_OR_CONVERT_TO_GRAY );
		const cv::Mat cGrey( cv::cvarrToMat( inImg_gray.getAs<IplImage>() ) );

		nDesiredFeatures <= 0 ? nPts = MAX_COUNT : nPts = nDesiredFeatures;

#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif

#ifdef VERBOSE_TIMING
		cout << "[KLT] Create: " << tictac.Tac()*1000.0f << endl;
#endif
		count = nPts;										// Number of points to find

		// -----------------------------------------------------------------
		// Select good features with subpixel accuracy (USING HARRIS OR KLT)
		// -----------------------------------------------------------------
		const bool use_harris = ( options.featsType == featHarris );

#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif
		std::vector<cv::Point2f> points;
		cv::goodFeaturesToTrack(
			cGrey,points, nPts, 
			(double)options.harrisOptions.threshold,    // for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
			(double)options.harrisOptions.min_distance, // minimum distance between features
			cv::noArray(), // mask
			3, // blocksize
			use_harris, /* harris */
			options.harrisOptions.k 
			);
#ifdef VERBOSE_TIMING
		cout << "[KLT] Find feats: " << tictac.Tac()*1000.0f << endl;
#endif

		if( nDesiredFeatures > 0 && count < nPts )
			cout << "\n[WARNING][selectGoodFeaturesKLT]: Only " << count << " of " << nDesiredFeatures << " points could be extracted in the image." << endl;

		if( options.FIND_SUBPIXEL )
		{
#ifdef VERBOSE_TIMING
			tictac.Tic();
#endif
			// Subpixel interpolation
			cv::cornerSubPix(cGrey,points,
				cv::Size(3,3), cv::Size(-1,-1),
				cv::TermCriteria( CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.05 ));

#ifdef VERBOSE_TIMING
			cout << "[KLT] subpixel: " << tictac.Tac()*1000.0f << endl;
#endif
		}

		// -----------------------------------------------------------------
		// Fill output structure
		// -----------------------------------------------------------------
#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif

		feats.clear();
		unsigned int	borderFeats = 0;
		unsigned int	nCFeats		= init_ID;
		int				i			= 0;
		const int		limit		= min( nPts, count );
		int				offset		= (int)this->options.patchSize/2 + 1;
		unsigned int	imgH		= inImg.getHeight();
		unsigned int	imgW		= inImg.getWidth();

		while( i < limit )
		{
			const int xBorderInf = (int)floor( points[i].x - options.patchSize/2 );
			const int xBorderSup = (int)floor( points[i].x + options.patchSize/2 );
			const int yBorderInf = (int)floor( points[i].y - options.patchSize/2 );
			const int yBorderSup = (int)floor( points[i].y + options.patchSize/2 );

			if( options.patchSize==0 || ( (xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0) ) )
			{
				CFeaturePtr ft = CFeature::Create();

				ft->type		= featKLT;
				ft->x			= points[i].x;				// X position
				ft->y			= points[i].y;				// Y position
				ft->track_status = status_TRACKED;		    // Feature Status
				ft->response	= 0.0;						// A value proportional to the quality of the feature (unused yet)
				ft->ID			= nCFeats++;				// Feature ID into extraction
				ft->patchSize	= options.patchSize;		// The size of the feature patch

				if( options.patchSize > 0 )
				{
					inImg.extract_patch(
						ft->patch,
						round( ft->x ) - offset,
						round( ft->y ) - offset,
						options.patchSize,
						options.patchSize );				// Image patch surronding the feature
				}

				feats.push_back( ft );

			} // end if
			else
				borderFeats++;

			i++;
		} // end while

#ifdef VERBOSE_TIMING
		cout << "[KLT] Create output: " << tictac.Tac()*1000.0f << endl;
#endif


		#else
			THROW_EXCEPTION("The MRPT has been compiled with MRPT_HAS_OPENCV=0 !");
		#endif

		MRPT_END

} // end of function
// N_fast = 9, 10, 12
void  CFeatureExtraction::extractFeaturesFASTER_N(
	const int					N_fast,
	const mrpt::utils::CImage	& inImg,
	CFeatureList			    & feats,
	unsigned int			    init_ID,
	unsigned int			    nDesiredFeatures,
	const TImageROI			    & ROI )  const
{
	MRPT_START

#if MRPT_HAS_OPENCV
	// Make sure we operate on a gray-scale version of the image:
	const CImage inImg_gray( inImg, FAST_REF_OR_CONVERT_TO_GRAY );

	const IplImage *IPL = inImg_gray.getAs<IplImage>();

	TSimpleFeatureList corners;
	TFeatureType type_of_this_feature;

	switch (N_fast)
	{
	case 9:  fast_corner_detect_9 (IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER9; break;
	case 10: fast_corner_detect_10(IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER10; break;
	case 12: fast_corner_detect_12(IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER12; break;
	default:
		THROW_EXCEPTION("Only the 9,10,12 FASTER detectors are implemented.")
		break;
	};

	// *All* the features have been extracted.
	const size_t N = corners.size();

	// Now:
	//  1) Sort them by "response": It's ~100 times faster to sort a list of
	//      indices "sorted_indices" than sorting directly the actual list of features "corners"
	std::vector<size_t> sorted_indices(N);
	for (size_t i=0;i<N;i++)  sorted_indices[i]=i;

	// Use KLT response
	if (options.FASTOptions.use_KLT_response ||
		nDesiredFeatures!=0 // If the user wants us to limit the number of features, we need to do it according to some quality measure
		)
	{
		const int KLT_half_win = 4;
		const int max_x = inImg_gray.getWidth() - 1 - KLT_half_win;
		const int max_y = inImg_gray.getHeight() - 1 - KLT_half_win;

		for (size_t i=0;i<N;i++)
		{
			const int x = corners[i].pt.x;
			const int y = corners[i].pt.y;
			if (x>KLT_half_win && y>KLT_half_win && x<=max_x && y<=max_y)
					corners[i].response = inImg_gray.KLT_response(x,y,KLT_half_win);
			else	corners[i].response = -100;
		}

		std::sort( sorted_indices.begin(), sorted_indices.end(), KeypointResponseSorter<TSimpleFeatureList>(corners) );
	}
	else
	{
		for (size_t i=0;i<N;i++)
			corners[i].response = 0;
	}

	//  2) Filter by "min-distance" (in options.FASTOptions.min_distance)
	//  3) Convert to MRPT CFeatureList format.
	// Steps 2 & 3 are done together in the while() below.
	// The "min-distance" filter is done by means of a 2D binary matrix where each cell is marked when one
	// feature falls within it. This is not exactly the same than a pure "min-distance" but is pretty close
	// and for large numbers of features is much faster than brute force search of kd-trees.
	// (An intermediate approach would be the creation of a mask image updated for each accepted feature, etc.)

	const bool do_filter_min_dist = options.FASTOptions.min_distance>1;

	// Used half the min-distance since we'll later mark as occupied the ranges [i-1,i+1] for a feature at "i"
	const unsigned int occupied_grid_cell_size = options.FASTOptions.min_distance/2.0;
	const float occupied_grid_cell_size_inv = 1.0f/occupied_grid_cell_size;

	unsigned int grid_lx = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getWidth() * occupied_grid_cell_size_inv);
	unsigned int grid_ly = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getHeight() * occupied_grid_cell_size_inv );

	mrpt::math::CMatrixBool  occupied_sections(grid_lx,grid_ly);  // See the comments above for an explanation.
	occupied_sections.fillAll(false);


	unsigned int	nMax		= (nDesiredFeatures!=0 && N > nDesiredFeatures) ? nDesiredFeatures : N;
	const int 		offset		= (int)this->options.patchSize/2 + 1;
	const int		size_2		= options.patchSize/2;
	const size_t 	imgH		= inImg.getHeight();
	const size_t 	imgW		= inImg.getWidth();
	unsigned int	i			= 0;
	unsigned int	cont		= 0;
	TFeatureID		nextID		= init_ID;

    if( !options.addNewFeatures )
        feats.clear();


	while( cont != nMax && i!=N )
	{
		// Take the next feature fromt the ordered list of good features:
		const TSimpleFeature &feat = corners[ sorted_indices[i] ];
		i++;

		// Patch out of the image??
		const int xBorderInf =  feat.pt.x - size_2;
		const int xBorderSup =  feat.pt.x + size_2;
		const int yBorderInf =  feat.pt.y - size_2;
		const int yBorderSup =  feat.pt.y + size_2;

		if (!( xBorderSup < (int)imgW && xBorderInf > 0 && yBorderSup < (int)imgH && yBorderInf > 0 ))
			continue; // nope, skip.

		if (do_filter_min_dist)
		{
			// Check the min-distance:
			const size_t section_idx_x = size_t(feat.pt.x * occupied_grid_cell_size_inv);
			const size_t section_idx_y = size_t(feat.pt.y * occupied_grid_cell_size_inv);

			if (occupied_sections(section_idx_x,section_idx_y))
				continue; // Already occupied! skip.

			// Mark section as occupied
			occupied_sections.set_unsafe(section_idx_x,section_idx_y, true);
			if (section_idx_x>0)	occupied_sections.set_unsafe(section_idx_x-1,section_idx_y, true);
			if (section_idx_y>0)	occupied_sections.set_unsafe(section_idx_x,section_idx_y-1, true);
			if (section_idx_x<grid_lx-1)	occupied_sections.set_unsafe(section_idx_x+1,section_idx_y, true);
			if (section_idx_y<grid_ly-1)	occupied_sections.set_unsafe(section_idx_x,section_idx_y+1, true);
		}

		// All tests passed: add new feature:
		CFeaturePtr ft		= CFeature::Create();
		ft->type			= type_of_this_feature;
		ft->ID				= nextID++;
		ft->x				= feat.pt.x;
		ft->y				= feat.pt.y;
		ft->response		= feat.response;
		ft->orientation		= 0;
		ft->scale			= 1;
		ft->patchSize		= options.patchSize;		// The size of the feature patch

		if( options.patchSize > 0 )
		{
			inImg.extract_patch(
				ft->patch,
				round( ft->x ) - offset,
				round( ft->y ) - offset,
				options.patchSize,
				options.patchSize );						// Image patch surronding the feature
		}
		feats.push_back( ft );
		++cont;
	}

#endif
	MRPT_END
}
Beispiel #6
0
/* --------------------------------------------------------
					retrieveFrame
   -------------------------------------------------------- */
bool CFFMPEG_InputStream::retrieveFrame( mrpt::utils::CImage &out_img )
{
#if MRPT_HAS_FFMPEG
	if (!this->isOpen()) return false;

	TFFMPEGContext *ctx = MY_FFMPEG_STATE;

    AVPacket        packet;
    int             frameFinished;

    while(av_read_frame(ctx->pFormatCtx, &packet)>=0)
    {
        // Is this a packet from the video stream?
        if(packet.stream_index==ctx->videoStream)
        {
            // Decode video frame
#if LIBAVCODEC_VERSION_MAJOR>52 || (LIBAVCODEC_VERSION_MAJOR==52 && LIBAVCODEC_VERSION_MINOR>=72)
            avcodec_decode_video2(
				ctx->pCodecCtx,
				ctx->pFrame,
				&frameFinished,
                &packet);
#else
            avcodec_decode_video(
				ctx->pCodecCtx,
				ctx->pFrame,
				&frameFinished,
                packet.data,
                packet.size);
#endif
            // Did we get a video frame?
            if(frameFinished)
            {
                // Convert the image from its native format to RGB:
				ctx->img_convert_ctx = sws_getCachedContext(
					ctx->img_convert_ctx,
					ctx->pCodecCtx->width,
					ctx->pCodecCtx->height,
					ctx->pCodecCtx->pix_fmt,
					ctx->pCodecCtx->width,
					ctx->pCodecCtx->height,
					m_grab_as_grayscale ? PIX_FMT_GRAY8 : PIX_FMT_BGR24,   // BGR vs. RGB for OpenCV
					SWS_BICUBIC,
					NULL, NULL, NULL);

				sws_scale(
					ctx->img_convert_ctx,
					ctx->pFrame->data,
					ctx->pFrame->linesize,0,
					ctx->pCodecCtx->height,
					ctx->pFrameRGB->data,
					ctx->pFrameRGB->linesize);

				/*	JL: Old code (deprecated)
				img_convert(
					(AVPicture *)ctx->pFrameRGB,
					m_grab_as_grayscale ? PIX_FMT_GRAY8 : PIX_FMT_BGR24,   // BGR vs. RGB for OpenCV
                    (AVPicture*)ctx->pFrame,
                    ctx->pCodecCtx->pix_fmt,
                    ctx->pCodecCtx->width,
                    ctx->pCodecCtx->height
                    ); */

				//std::cout << "[retrieveFrame] Generating image: " << ctx->pCodecCtx->width << "x" << ctx->pCodecCtx->height << std::endl;
				//std::cout << "  linsize: " << ctx->pFrameRGB->linesize[0] << std::endl;

				if( ctx->pFrameRGB->linesize[0]!= ((m_grab_as_grayscale ? 1:3)*ctx->pCodecCtx->width) )
					THROW_EXCEPTION("FIXME: linesize!=width case not handled yet.")

				out_img.loadFromMemoryBuffer(
					ctx->pCodecCtx->width,
					ctx->pCodecCtx->height,
					!m_grab_as_grayscale, // Color
					ctx->pFrameRGB->data[0]
					);

				// Free the packet that was allocated by av_read_frame
				av_free_packet(&packet);
				return true;
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }

    return false; // Error reading/ EOF
#else
	return false;
#endif
}
Beispiel #7
0
/**  Stage2 operations:
  *   - Detect features on each image and on each scale.
  */
void CStereoOdometryEstimator::stage2_detect_features(
		CStereoOdometryEstimator::TImagePairData::img_data_t	& img_data,
		mrpt::utils::CImage										& gui_image,
		bool													update_dyn_thresholds )
{
	using namespace mrpt::vision;

	m_profiler.enter("_stg2");

	// :: Resize output containers:
	const size_t nOctaves = img_data.pyr.images.size();
	ASSERTDEB_(nOctaves>0)

	vector<size_t> nFeatsPassingKLTPerOctave(nOctaves);
    img_data.pyr_feats.resize(nOctaves);
    img_data.pyr_feats_index.resize(nOctaves);
    img_data.pyr_feats_kps.resize(nOctaves);
    img_data.pyr_feats_desc.resize(nOctaves);

	vector<size_t> kps_to_detect(nOctaves);			// number of kps to detect in each octave
	kps_to_detect[0] = size_t(params_detect.orb_nfeats*(2*nOctaves)/(std::pow(2,nOctaves)-1));
	for( size_t octave = 1; octave < nOctaves; ++octave )
		kps_to_detect[octave] = size_t(round(kps_to_detect[0]/std::pow(2,octave)));

	// :: For the GUI thread
	m_next_gui_info->stats_feats_per_octave.resize(nOctaves); // Reserve size for stats
    m_next_gui_info->stats_FAST_thresholds_per_octave.resize(nOctaves);

	// :: Detection parameters
	// FASTER METHOD --------------------
	// - Evaluate the KLT response of all features to discard those in texture-less zones
    const unsigned int KLT_win	= params_detect.KLT_win;
    const double minimum_KLT_response	= params_detect.minimum_KLT_response;
	// ----------------------------------

	// size_t num_feats_this_octave; 

	// :: Main loop
	for( size_t octave = 0; octave < nOctaves; ++octave )
	{
		// - Image information
        Mat input_im = cv::cvarrToMat(img_data.pyr.images[octave].getAs<IplImage>());
		const mrpt::utils::TImageSize img_size = img_data.pyr.images[octave].getSize();

		// - Profile section name
		const std::string sProfileName = mrpt::format("stg2.detect.oct=%u",static_cast<unsigned int>(octave));

		// - Auxiliar parameters that will store preliminar extracted information (before NMS)
		TKeyPointList	feats_vector;
		Mat				desc_aux;

		// ***********************************
		// KLT method (use ORB feature vector, no descriptor)
		// ***********************************
		if( params_detect.detect_method == TDetectParams::dmKLT )
		{
			m_profiler.enter(sProfileName.c_str());

			// detect Shi&Tomasi keypoints
			goodFeaturesToTrack(
				input_im,					// image
				feats_vector,				// output feature vector
				kps_to_detect[octave],		// params_detect.orb_nfeats,	// number of features to detect
				0.01,						// quality level
				20);						// minimum distance
			
			desc_aux = Mat();				// no descriptor

			m_profiler.leave(sProfileName.c_str());
		}
		// ***********************************
		// ORB method
		// ***********************************
		else if( params_detect.detect_method == TDetectParams::dmORB )
		{
			// ** NOTE ** in this case, nOctaves should be 1 (set in stage1)
			const size_t n_feats_to_extract = 
				params_detect.non_maximal_suppression ? 
					1.5*params_detect.orb_nfeats : 
					params_detect.orb_nfeats; // if non-max-sup is ON extract more features to get approx the number of desired output feats.

			m_profiler.enter(sProfileName.c_str());
			
#if CV_MAJOR_VERSION < 3  // OpenCV < 3.0.0
			ORB orbDetector( 
				n_feats_to_extract,			// number of ORB features to extract
				1.2,						// scale difference
				params_detect.orb_nlevels,  // number of levels
				31,							// edgeThreshold
				0,							// firstLevel
				2,							// WTA_K
				ORB::HARRIS_SCORE,			// scoreType
                31);						// patchSize

			// detect keypoints and descriptors
			orbDetector( input_im, Mat(), feats_vector, desc_aux );  // all the scales in the same call
#else
			Ptr<cv::ORB> orbDetector = cv::ORB::create(
				n_feats_to_extract,			// number of ORB features to extract
				1.2,						// scale difference
				params_detect.orb_nlevels,  // number of levels
				31,							// edgeThreshold
				0,							// firstLevel
				2,							// WTA_K
				ORB::HARRIS_SCORE,			// scoreType
                31,							// patchSize
                m_current_fast_th );		// fast threshold

			orbDetector->detectAndCompute( input_im, Mat(), feats_vector, desc_aux );	// all the scales in the same call
#endif

			m_profiler.enter(sProfileName.c_str());
		}

		// ***********************************
		// FAST+ORB method
		// ***********************************
		else if( params_detect.detect_method == TDetectParams::dmFAST_ORB )
		{
			m_profiler.enter(sProfileName.c_str());
#if CV_MAJOR_VERSION < 3  // OpenCV < 3.0.0
			cv::FastFeatureDetector(m_current_fast_th).detect( input_im, feats_vector );	// detect keypoints
			MRPT_TODO("Perform non-maximal suppression here -- avoids computing ORB descriptors which are going to be rejected")
			ORB().operator()(input_im, Mat(), feats_vector, desc_aux, true );				// extract descriptors
#else
			Ptr<cv::FastFeatureDetector> fastDetector = cv::FastFeatureDetector::create( m_current_fast_th );
			fastDetector->detect( input_im, feats_vector );
			cv::ORB::create()->compute( input_im, feats_vector, desc_aux );
#endif
			m_profiler.leave(sProfileName.c_str());
		}
		// ***********************************
		// FASTER method (no descriptor unless specified otherwise)
		// ***********************************
		else if( params_detect.detect_method == TDetectParams::dmFASTER )
		{
			// Use a dynamic threshold to maintain a target number of features per square pixel.
			if( m_threshold.size() != nOctaves ) 
				m_threshold.assign(nOctaves, params_detect.initial_FAST_threshold);

			m_profiler.enter(sProfileName.c_str());

            CFeatureExtraction::detectFeatures_SSE2_FASTER12(
                img_data.pyr.images[octave],
                img_data.pyr_feats[octave],
                m_threshold[octave],
                false,										// don't append to list, overwrite it
                octave,
                & img_data.pyr_feats_index[octave] );		// row-indexed list of features

            const size_t nFeats = img_data.pyr_feats[octave].size();

			if( update_dyn_thresholds )
            {
                // Compute feature density & adjust dynamic threshold:
                const double feats_density = nFeats / static_cast<double>(img_size.x * img_size.y);

                if( feats_density < 0.8*params_detect.target_feats_per_pixel )
                    m_threshold[octave] = std::max(1, m_threshold[octave]-1);
                else if( feats_density > 1.2*params_detect.target_feats_per_pixel )
                    m_threshold[octave] = m_threshold[octave]+1;

                // Save stats for the GUI:
                m_next_gui_info->stats_feats_per_octave[octave] = nFeats;
                m_next_gui_info->stats_FAST_thresholds_per_octave[octave] = m_threshold[octave];
            }

            // compute KLT response
            const std::string subSectionName = mrpt::format("stg2.detect.klt.oct=%u",static_cast<unsigned int>(octave));
            m_profiler.enter(subSectionName.c_str());

            const TImageSize img_size_min( KLT_win+1, KLT_win+1 );
            const TImageSize img_size_max( img_size.x-KLT_win-1, img_size.y-KLT_win-1 );

            size_t nPassed = 0; // Number of feats in this octave that pass the KLT threshold (for stats only)

            for (size_t i=0;i<img_data.pyr_feats[octave].size();i++)
            {
                TSimpleFeature &f = img_data.pyr_feats[octave][i];
                const TPixelCoord pt = f.pt;
                if (pt.x>=img_size_min.x && pt.y>=img_size_min.y && pt.x<img_size_max.x && pt.y<img_size_max.y) {
                     f.response = img_data.pyr.images[octave].KLT_response(pt.x,pt.y,KLT_win);
                     if (f.response>=minimum_KLT_response) nPassed++;
                }
                else f.response = 0;
            } // end-for

			// convert to TKeyPointList (opencv compatible)
			m_convert_featureList_to_keypointList( img_data.pyr_feats[octave], feats_vector );

			m_profiler.leave(sProfileName.c_str()); // end detect
		}
		else
			THROW_EXCEPTION("	[sVO -- Stg2: Detect] ERROR: Unknown detection method")

		// ***********************************
		// Non-maximal suppression
		// ***********************************
		if( params_detect.non_maximal_suppression )
		{
			if( params_detect.nmsMethod == TDetectParams::nmsmStandard )
			{
				const size_t imgH = input_im.rows;
				const size_t imgW = input_im.cols;
				vector<bool> dummy;
				m_non_max_sup( 
					kps_to_detect[octave], // params_detect.orb_nfeats
					feats_vector, 
					desc_aux, 
					img_data.pyr_feats_kps[octave], 
					img_data.pyr_feats_desc[octave], 
					imgH, imgW,
					dummy );
			}
			else if( params_detect.nmsMethod == TDetectParams::nmsmAdaptive )
			{
				m_adaptive_non_max_sup( 
					kps_to_detect[octave], // params_detect.orb_nfeats*/
					feats_vector, 
					desc_aux, 
					img_data.pyr_feats_kps[octave], 
					img_data.pyr_feats_desc[octave] );
			}
			else
				THROW_EXCEPTION("	[sVO -- Stg2: Detect] Invalid non-maximal-suppression method." );
		} // end-if-non-max-sup
		else
		{
			feats_vector.swap(img_data.pyr_feats_kps[octave]);
			img_data.pyr_feats_desc[octave] = desc_aux;					// this should be fast (just copy the header)
		}

		// update indexes here
		m_update_indexes( img_data, octave, true );

        // gui info
		m_next_gui_info->stats_feats_per_octave[octave] = 
			nFeatsPassingKLTPerOctave[octave] = img_data.pyr_feats_kps[octave].size();

	 } // end-for-octaves

	if( params_gui.show_gui && params_gui.draw_all_raw_feats )
	{
		// (It's almost as efficient to directly draw these small feature marks at this point
		// rather than send all the info to the gui thread and then draw there. A quick test shows
		// a gain of 75us -> 50us only, so don't optimize unless efficiency pushes really hard).
		m_profiler.enter("stg2.draw_feats");

        for (size_t octave=0;octave<nOctaves;octave++)
        {
			const TKeyPointList & f1 = img_data.pyr_feats_kps[octave];
            const size_t n1 = f1.size();

            const bool org_img_color	= gui_image.isColor();
            unsigned char* ptr1			= gui_image.get_unsafe(0,0);
            const size_t img1_stride	= gui_image.getRowStride();
            for(size_t i=0;i<n1;++i)
            {
                const int x=f1[i].pt.x; const int y=f1[i].pt.y;
                unsigned char* ptr = ptr1 + img1_stride*y + (org_img_color ? 3*x:x);
                if (org_img_color) {
                    *ptr++ = 0x00;
                    *ptr++ = 0x00;
                    *ptr++ = 0xFF;
                }
                else {
                    *ptr = 0xFF;
                }
            } // end-for
        } // end-for

		m_profiler.leave("stg2.draw_feats");
	} // end-if

    // for the GUI thread
    string sPassKLT = "", sDetect = "";
    for( size_t i=0;i<nOctaves;i++ )
	{
        sPassKLT += mrpt::format( "%u/",static_cast<unsigned int>(nFeatsPassingKLTPerOctave[i]) );
        sDetect  += mrpt::format( "%u/",static_cast<unsigned int>(img_data.pyr_feats_kps[i].size()) );
	}

    string aux = mrpt::format( "\n%s feats (%s passed KLT)", sDetect.c_str(), sPassKLT.c_str() );
    m_next_gui_info->text_msg_from_detect += aux;

	m_profiler.leave("_stg2");
}
Beispiel #8
0
/************************************************************************************************
*								extractFeaturesFAST
**
************************************************************************************************/
void CFeatureExtraction::extractFeaturesFAST(
	const mrpt::utils::CImage& inImg, CFeatureList& feats, unsigned int init_ID,
	unsigned int nDesiredFeatures, const TImageROI& ROI,
	const CMatrixBool* mask) const
{
	MRPT_UNUSED_PARAM(ROI);
	MRPT_START

#if MRPT_HAS_OPENCV
#if MRPT_OPENCV_VERSION_NUM < 0x210
	THROW_EXCEPTION("This function requires OpenCV > 2.1.0")
#else

	using namespace cv;

	vector<KeyPoint> cv_feats;  // The opencv keypoint output vector

	// Make sure we operate on a gray-scale version of the image:
	const CImage inImg_gray(inImg, FAST_REF_OR_CONVERT_TO_GRAY);

// JL: Instead of
//	int aux = options.FASTOptions.threshold; ....
//  It's better to use an adaptive threshold, controlled from our caller
//  outside.

#if MRPT_OPENCV_VERSION_NUM >= 0x211

	//    cv::Mat *mask ;
	//    if( _mask )
	//       mask = static_cast<cv::Mat*>(_mask);

	const Mat theImg = cvarrToMat(inImg_gray.getAs<IplImage>());

	cv::Mat cvMask;
	if (options.useMask)
	{
		cout << "using mask" << endl;
		size_t maskW = mask->getColCount(), maskH = mask->getRowCount();
		ASSERT_(
			maskW == inImg_gray.getWidth() && maskH == inImg_gray.getHeight());

		// Convert Mask into CV type
		cvMask = cv::Mat::ones(maskH, maskW, CV_8UC1);
		for (int ii = 0; ii < int(maskW); ++ii)
			for (int jj = 0; jj < int(maskH); ++jj)
			{
				if (!mask->get_unsafe(jj, ii))
				{
					cvMask.at<char>(ii, jj) = (char)0;
				}
			}
	}

#if MRPT_OPENCV_VERSION_NUM < 0x300
	FastFeatureDetector fastDetector(
		options.FASTOptions.threshold, options.FASTOptions.nonmax_suppression);
	fastDetector.detect(theImg, cv_feats);
#else
	Ptr<cv::FastFeatureDetector> fastDetector = cv::FastFeatureDetector::create(
		options.FASTOptions.threshold, options.FASTOptions.nonmax_suppression);
	fastDetector->detect(theImg, cv_feats);
#endif

#elif MRPT_OPENCV_VERSION_NUM >= 0x210
	FAST(
		inImg_gray.getAs<IplImage>(), cv_feats, options.FASTOptions.threshold,
		options.FASTOptions.nonmax_suppression);
#endif

	// *All* the features have been extracted.
	const size_t N = cv_feats.size();

	// Use KLT response instead of the OpenCV's original "response" field:
	if (options.FASTOptions.use_KLT_response)
	{
		const unsigned int KLT_half_win = 4;
		const unsigned int max_x = inImg_gray.getWidth() - 1 - KLT_half_win;
		const unsigned int max_y = inImg_gray.getHeight() - 1 - KLT_half_win;
		for (size_t i = 0; i < N; i++)
		{
			const unsigned int x = cv_feats[i].pt.x;
			const unsigned int y = cv_feats[i].pt.y;
			if (x > KLT_half_win && y > KLT_half_win && x <= max_x &&
				y <= max_y)
				cv_feats[i].response =
					inImg_gray.KLT_response(x, y, KLT_half_win);
			else
				cv_feats[i].response = -100;
		}
	}

	// Now:
	//  1) Sort them by "response": It's ~100 times faster to sort a list of
	//      indices "sorted_indices" than sorting directly the actual list of
	//      features "cv_feats"
	std::vector<size_t> sorted_indices(N);
	for (size_t i = 0; i < N; i++) sorted_indices[i] = i;
	std::sort(
		sorted_indices.begin(), sorted_indices.end(),
		KeypointResponseSorter<vector<KeyPoint>>(cv_feats));

	//  2) Filter by "min-distance" (in options.FASTOptions.min_distance)
	//  3) Convert to MRPT CFeatureList format.
	// Steps 2 & 3 are done together in the while() below.
	// The "min-distance" filter is done by means of a 2D binary matrix where
	// each cell is marked when one
	// feature falls within it. This is not exactly the same than a pure
	// "min-distance" but is pretty close
	// and for large numbers of features is much faster than brute force search
	// of kd-trees.
	// (An intermediate approach would be the creation of a mask image updated
	// for each accepted feature, etc.)

	const bool do_filter_min_dist = options.FASTOptions.min_distance > 1;

	// Used half the min-distance since we'll later mark as occupied the ranges
	// [i-1,i+1] for a feature at "i"
	const unsigned int occupied_grid_cell_size =
		options.FASTOptions.min_distance / 2.0;
	const float occupied_grid_cell_size_inv = 1.0f / occupied_grid_cell_size;

	unsigned int grid_lx =
		!do_filter_min_dist
			? 1
			: (unsigned int)(1 + inImg.getWidth() * occupied_grid_cell_size_inv);
	unsigned int grid_ly =
		!do_filter_min_dist
			? 1
			: (unsigned int)(1 + inImg.getHeight() * occupied_grid_cell_size_inv);

	mrpt::math::CMatrixBool occupied_sections(
		grid_lx, grid_ly);  // See the comments above for an explanation.
	occupied_sections.fillAll(false);

	unsigned int nMax =
		(nDesiredFeatures != 0 && N > nDesiredFeatures) ? nDesiredFeatures : N;
	const int offset = (int)this->options.patchSize / 2 + 1;
	const size_t size_2 = options.patchSize / 2;
	const size_t imgH = inImg.getHeight();
	const size_t imgW = inImg.getWidth();
	unsigned int i = 0;
	unsigned int cont = 0;
	TFeatureID nextID = init_ID;

	if (!options.addNewFeatures) feats.clear();

	while (cont != nMax && i != N)
	{
		// Take the next feature fromt the ordered list of good features:
		const KeyPoint& kp = cv_feats[sorted_indices[i]];
		i++;

		// Patch out of the image??
		const int xBorderInf = (int)floor(kp.pt.x - size_2);
		const int xBorderSup = (int)floor(kp.pt.x + size_2);
		const int yBorderInf = (int)floor(kp.pt.y - size_2);
		const int yBorderSup = (int)floor(kp.pt.y + size_2);

		if (!(xBorderSup < (int)imgW && xBorderInf > 0 &&
			  yBorderSup < (int)imgH && yBorderInf > 0))
			continue;  // nope, skip.

		if (do_filter_min_dist)
		{
			// Check the min-distance:
			const size_t section_idx_x =
				size_t(kp.pt.x * occupied_grid_cell_size_inv);
			const size_t section_idx_y =
				size_t(kp.pt.y * occupied_grid_cell_size_inv);

			if (occupied_sections(section_idx_x, section_idx_y))
				continue;  // Already occupied! skip.

			// Mark section as occupied
			occupied_sections.set_unsafe(section_idx_x, section_idx_y, true);
			if (section_idx_x > 0)
				occupied_sections.set_unsafe(
					section_idx_x - 1, section_idx_y, true);
			if (section_idx_y > 0)
				occupied_sections.set_unsafe(
					section_idx_x, section_idx_y - 1, true);
			if (section_idx_x < grid_lx - 1)
				occupied_sections.set_unsafe(
					section_idx_x + 1, section_idx_y, true);
			if (section_idx_y < grid_ly - 1)
				occupied_sections.set_unsafe(
					section_idx_x, section_idx_y + 1, true);
		}

		// All tests passed: add new feature:
		CFeature::Ptr ft = mrpt::make_aligned_shared<CFeature>();
		ft->type = featFAST;
		ft->ID = nextID++;
		ft->x = kp.pt.x;
		ft->y = kp.pt.y;
		ft->response = kp.response;
		ft->orientation = kp.angle;
		ft->scale = kp.octave;
		ft->patchSize = options.patchSize;  // The size of the feature patch

		if (options.patchSize > 0)
		{
			inImg.extract_patch(
				ft->patch, round(ft->x) - offset, round(ft->y) - offset,
				options.patchSize,
				options.patchSize);  // Image patch surronding the feature
		}
		feats.push_back(ft);
		++cont;
	}
// feats.resize( cont );  // JL: really needed???

#endif
#endif
	MRPT_END
}
/************************************************************************************************
*								extractFeaturesSURF  									        *
************************************************************************************************/
void  CFeatureExtraction::extractFeaturesSURF(
		const mrpt::utils::CImage		&inImg,
		CFeatureList			&feats,
		unsigned int			init_ID,
		unsigned int			nDesiredFeatures,
		const TImageROI			&ROI) const
{
	MRPT_UNUSED_PARAM(ROI);
#if MRPT_HAS_OPENCV && MRPT_OPENCV_VERSION_NUM >= 0x111

	const CImage img_grayscale(inImg, FAST_REF_OR_CONVERT_TO_GRAY);
	const IplImage* cGrey = img_grayscale.getAs<IplImage>();

	CvSeq *kp	=	NULL;
	CvSeq *desc	=	NULL;
	CvMemStorage *storage = cvCreateMemStorage(0);

	// Extract the SURF points:
	CvSURFParams surf_params = cvSURFParams(options.SURFOptions.hessianThreshold, options.SURFOptions.rotation_invariant ? 1:0);
	surf_params.nOctaves = options.SURFOptions.nOctaves;
	surf_params.nOctaveLayers = options.SURFOptions.nLayersPerOctave;

	cvExtractSURF( cGrey, NULL, &kp, &desc, storage, surf_params);

	// -----------------------------------------------------------------
	// MRPT Wrapping
	// -----------------------------------------------------------------
	feats.clear();
	unsigned int	nCFeats		= init_ID;
	int				limit;
	int				offset		= (int)this->options.patchSize/2 + 1;
	unsigned int	imgH		= inImg.getHeight();
	unsigned int	imgW		= inImg.getWidth();

	if( nDesiredFeatures == 0 )
		limit = kp->total;
	else
		limit = (int)nDesiredFeatures < kp->total ? (int)nDesiredFeatures : kp->total;

	for( int i = 0; i < limit; i++ )
	{
		// Get the OpenCV SURF point
		CvSURFPoint *point;
		CFeaturePtr ft = CFeature::Create();
		point = (CvSURFPoint*)cvGetSeqElem( kp, i );

		const int xBorderInf = (int)floor( point->pt.x - options.patchSize/2 );
		const int xBorderSup = (int)floor( point->pt.x + options.patchSize/2 );
		const int yBorderInf = (int)floor( point->pt.y - options.patchSize/2 );
		const int yBorderSup = (int)floor( point->pt.y + options.patchSize/2 );

		if( options.patchSize == 0 || ( (xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0) ) )
		{
			ft->type		= featSURF;
			ft->x			= point->pt.x;				// X position
			ft->y			= point->pt.y;				// Y position
			ft->orientation = point->dir;				// Orientation
			ft->scale		= point->size*1.2/9;		// Scale
			ft->ID			= nCFeats++;				// Feature ID into extraction
			ft->patchSize	= options.patchSize;		// The size of the feature patch

			if( options.patchSize > 0 )
			{
				inImg.extract_patch(
					ft->patch,
					round( ft->x ) - offset,
					round( ft->y ) - offset,
					options.patchSize,
					options.patchSize );				// Image patch surronding the feature
			}

			// Get the SURF descriptor
			float* d = (float*)cvGetSeqElem( desc, i );
			ft->descriptors.SURF.resize( options.SURFOptions.rotation_invariant ? 128 : 64 );
			std::vector<float>::iterator itDesc;
			unsigned int k;

			for( k = 0, itDesc = ft->descriptors.SURF.begin(); k < ft->descriptors.SURF.size(); k++, itDesc++ )
				*itDesc = d[k];

			feats.push_back( ft );

		} // end if
	} // end for

	cvReleaseMemStorage(&storage); // Free memory

#else
	THROW_EXCEPTION("Method not available since either MRPT has been compiled without OpenCV or OpenCV version is incorrect (Required 1.1.0)")
#endif //MRPT_HAS_OPENCV
} // end extractFeaturesSURF
/************************************************************************************************
*								selectGoodFeaturesKLT  									        *
************************************************************************************************/
void CFeatureExtraction::selectGoodFeaturesKLT(
		const mrpt::utils::CImage		&inImg,
		CFeatureList		&feats,
		unsigned int		init_ID,
		unsigned int		nDesiredFeatures,
		void				*mask_ ) const
{
//#define VERBOSE_TIMING

#ifdef VERBOSE_TIMING
	CTicTac tictac;
#endif
		MRPT_START

		#if MRPT_HAS_OPENCV
        const unsigned int MAX_COUNT = 300;

		// Reinterpret opencv formal arguments
		CvMatrix *mask = reinterpret_cast<CvMatrix*>(mask_);

		// -----------------------------------------------------------------
		// Create OpenCV Local Variables
		// -----------------------------------------------------------------
		int				count = 0;
		int				nPts;

		CvImage img, cGrey;

#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif
		img.attach( const_cast<IplImage*>(inImg.getAs<IplImage>()), false );	// Attach Image as IplImage and do not use ref counter
#ifdef VERBOSE_TIMING
		cout << "[KLT] Attach: " << tictac.Tac()*1000.0f << endl;
#endif
		if( img.channels() == 1 )
			cGrey = img;										// Input image is already 'grayscale'
		else
		{
			cGrey.create( cvGetSize( img ), 8, 1);
			cvCvtColor( img, cGrey, CV_BGR2GRAY );				// Convert input image into 'grayscale'
		}

		nDesiredFeatures <= 0 ? nPts = MAX_COUNT : nPts = nDesiredFeatures;

		std::vector<CvPoint2D32f> points(nPts);

		CvImage eig, temp;									// temporary and auxiliary images

#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif
		eig.create( cvGetSize( cGrey ), 32, 1 );
		temp.create( cvGetSize( cGrey ), 32, 1 );
#ifdef VERBOSE_TIMING
		cout << "[KLT] Create: " << tictac.Tac()*1000.0f << endl;
#endif
		count = nPts;										// Number of points to find


#if 0	// Temporary debug
		{
			static int i=0;
			cvSaveImage( format("debug_map_%05i.bmp",++i).c_str(), cGrey);
		}
#endif
		// -----------------------------------------------------------------
		// Select good features with subpixel accuracy (USING HARRIS OR KLT)
		// -----------------------------------------------------------------
		if( options.featsType == featHarris )
		{
#ifdef VERBOSE_TIMING
			tictac.Tic();
#endif
			cvGoodFeaturesToTrack( cGrey, eig, temp, &points[0], &count,	// input and output data
				(double)options.harrisOptions.threshold,					// for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
				(double)options.harrisOptions.min_distance,					// minimum distance between features
				mask ? (*mask) : static_cast<const CvMat*>(NULL),			// ROI
				(double)options.harrisOptions.radius,						// size of the block of pixels used
				1,															// use Harris
				options.harrisOptions.k );									// k factor for the Harris algorithm
#ifdef VERBOSE_TIMING
			cout << "[KLT] Find feats: " << tictac.Tac()*1000.0f << endl;
#endif
		}
		else
		{
#ifdef VERBOSE_TIMING
			tictac.Tic();
#endif
			cvGoodFeaturesToTrack( cGrey, eig, temp, &points[0], &count,	// input and output data
				(double)options.KLTOptions.threshold,						// for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
				(double)options.KLTOptions.min_distance,					// minimum distance between features
				mask ? (*mask) : static_cast<const CvMat*>(NULL),			// ROI
				options.KLTOptions.radius,									// size of the block of pixels used
				0,															// use Kanade Lucas Tomasi
				0.04 );														// un-used parameter
#ifdef VERBOSE_TIMING
			cout << "[KLT]: Find feats: " << tictac.Tac()*1000.0f << endl;
#endif
		}

		if( nDesiredFeatures > 0 && count < nPts )
			cout << "\n[WARNING][selectGoodFeaturesKLT]: Only " << count << " of " << nDesiredFeatures << " points could be extracted in the image." << endl;

		if( options.FIND_SUBPIXEL )
		{
#ifdef VERBOSE_TIMING
			tictac.Tic();
#endif
			// Subpixel interpolation
			cvFindCornerSubPix( cGrey, &points[0], count,
				cvSize(3,3), cvSize(-1,-1),
				cvTermCriteria( CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.05 ));

#ifdef VERBOSE_TIMING
			cout << "[KLT] subpixel: " << tictac.Tac()*1000.0f << endl;
#endif
		}

		// -----------------------------------------------------------------
		// Fill output structure
		// -----------------------------------------------------------------
#ifdef VERBOSE_TIMING
		tictac.Tic();
#endif

		feats.clear();
		unsigned int	borderFeats = 0;
		unsigned int	nCFeats		= init_ID;
		int				i			= 0;
		const int		limit		= min( nPts, count );
		int				offset		= (int)this->options.patchSize/2 + 1;
		unsigned int	imgH		= inImg.getHeight();
		unsigned int	imgW		= inImg.getWidth();

		while( i < limit )
		{
			const int xBorderInf = (int)floor( points[i].x - options.patchSize/2 );
			const int xBorderSup = (int)floor( points[i].x + options.patchSize/2 );
			const int yBorderInf = (int)floor( points[i].y - options.patchSize/2 );
			const int yBorderSup = (int)floor( points[i].y + options.patchSize/2 );

			if( options.patchSize==0 || ( (xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0) ) )
			{
				CFeaturePtr ft = CFeature::Create();

				ft->type		= featKLT;
				ft->x			= points[i].x;				// X position
				ft->y			= points[i].y;				// Y position
				ft->track_status = status_TRACKED;		    // Feature Status
				ft->response	= 0.0;						// A value proportional to the quality of the feature (unused yet)
				ft->ID			= nCFeats++;				// Feature ID into extraction
				ft->patchSize	= options.patchSize;		// The size of the feature patch

				if( options.patchSize > 0 )
				{
					inImg.extract_patch(
						ft->patch,
						round( ft->x ) - offset,
						round( ft->y ) - offset,
						options.patchSize,
						options.patchSize );				// Image patch surronding the feature
				}

				feats.push_back( ft );

			} // end if
			else
				borderFeats++;

			i++;
		} // end while

#ifdef VERBOSE_TIMING
		cout << "[KLT] Create output: " << tictac.Tac()*1000.0f << endl;
#endif


		#else
			THROW_EXCEPTION("The MRPT has been compiled with MRPT_HAS_OPENCV=0 !");
		#endif

		MRPT_END

} // end of function
Beispiel #11
0
 inline void setPixel(const openni::RGB888Pixel& src, mrpt::utils::CImage& rgb  , int x, int y){ rgb.setPixel(x, y, (src.r << 16) + (src.g << 8) + src.b); }
Beispiel #12
0
 inline void resize(mrpt::utils::CImage& rgb  , int w, int h){ rgb.resize(w, h, CH_RGB, true); }