コード例 #1
0
void PatternDetector::buildPatternFromImage(const cv::Mat& image, Pattern& pattern) const
{
    int numImages = 4;
    float step = sqrtf(2.0f);

    // Store original image in pattern structure
    pattern.size = cv::Size(image.cols, image.rows);
    pattern.frame = image.clone();
    getGray(image, pattern.grayImg);
    
    // Build 2d and 3d contours (3d contour lie in XY plane since it's planar)
    pattern.points2d.resize(4);
    pattern.points3d.resize(4);

    // Image dimensions
    const float w = image.cols;
    const float h = image.rows;

    // Normalized dimensions:
    const float maxSize = std::max(w,h);
    const float unitW = w / maxSize;
    const float unitH = h / maxSize;

    pattern.points2d[0] = cv::Point2f(0,0);
    pattern.points2d[1] = cv::Point2f(w,0);
    pattern.points2d[2] = cv::Point2f(w,h);
    pattern.points2d[3] = cv::Point2f(0,h);

    pattern.points3d[0] = cv::Point3f(-unitW, -unitH, 0);
    pattern.points3d[1] = cv::Point3f( unitW, -unitH, 0);
    pattern.points3d[2] = cv::Point3f( unitW,  unitH, 0);
    pattern.points3d[3] = cv::Point3f(-unitW,  unitH, 0);

    extractFeatures(pattern.grayImg, pattern.keypoints, pattern.descriptors);
}
コード例 #2
0
ファイル: AsyncPolicy.hpp プロジェクト: jpdoyle/OmegaGo
    void eval(MonteCarloNode& n) {
        auto& b = n.board;
        auto sym = n.orientation;

        feats.fill(Features());
        oriented.fill(Features());
        extractFeatures(b,feats);
        dihedralTranspose(feats,oriented,sym);

        inVec.clear();
        convertToTCNNInput(oriented,inVec);

        result = &nn.fprop(inVec);
        std::vector<size_t> inds;
        inds.assign(n.moves.size(),0);

        double total = 0;
        for(size_t i = 0; i < n.moves.size(); ++i) {
            inds[i] = /*IX(n.moves[i]);*/IX(dihedral(n.moves[i],sym));
            total += (*result)[inds[i]];
        }

        for(size_t i = 0; i < n.moves.size(); ++i) {
            n.probabilities[i].store((*result)[inds[i]]/total);
        }

    }
コード例 #3
0
void DescriptorExtractor::extractDescriptors() {
    models_descriptors.clear();
    cv::Mat model_img;
    for(int i = 0; i < models_imgs.size(); ++i) {
        cv::Mat model_descriptors;
        extractFeatures(models_imgs[i], models_keypoints[i], model_descriptors);
        models_descriptors.push_back(model_descriptors);
    }
}
コード例 #4
0
int classifyMulticlassSvm(CvSVM& classifier, Mat image)
{
    /* Extract features */
    Mat featureVector(FEAT_SIZE, 1, CV_32FC1);
    extractFeatures(image, featureVector);

    /* Classify */
    float classLabel = classifier.predict(featureVector, true);

    /* Integer class label */
    return (int)classLabel;
}
コード例 #5
0
void TORecognize::loadSingleModel(std::string filename_, std::string name_){
	CLOG(LTRACE) << "loadSingleModel";
	cv::Mat model_img;

	if ( loadImage(filename_, model_img) ) {
		std::vector<KeyPoint> model_keypoints;
		cv::Mat model_descriptors;
		extractFeatures(model_img, model_keypoints, model_descriptors);

		// Add to database.
		models_imgs.push_back(model_img);
		models_keypoints.push_back(model_keypoints);
		models_descriptors.push_back(model_descriptors);
		models_names.push_back(name_);
		CLOG(LNOTICE) << "Successfull load of model (" << models_names.size()-1 <<"): "<<models_names[models_names.size()-1];
	}//: if
}
コード例 #6
0
void DescriptorExtractor::onNewImage() {

    CLOG(LTRACE) << "onNewImage";
    try {

        // Change keypoint detector type (if required).
        setDescriptorExtractor();

        models_imgs = in_models_imgs.read();
        models_names = in_models_names.read();
        models_keypoints = in_models_keypoints.read();
        scene_keypoints = in_scene_keypoints.read();

        cv::Mat scene_descriptors;

        // Load image containing the scene.
        cv::Mat scene_img = in_img.read().clone();

        extractDescriptors();



        // Extract features from scene.
        extractFeatures(scene_img, scene_keypoints, scene_descriptors);
        CLOG(LINFO) << "Scene features: " << scene_keypoints.size();

        std::vector< std::vector<cv::Mat> > out_descriptors;
        //out_keypoints.push_back(scene_keypoints);
        for(int i = 0; i < models_descriptors.size(); ++i) {
            out_descriptors.push_back(models_descriptors[i]);
        }
        out_models_descriptors.write(out_descriptors);
        out_scene_descriptors.write(scene_descriptors);




    } catch (...) {
        CLOG(LERROR) << "onNewImage failed";
    }

}
コード例 #7
0
int classifyOneAgainstAllSvm(vector<CvSVM> classifiers, Mat image)
{
    /* Extract features */
    Mat featureVector(FEAT_SIZE, 1, CV_32FC1);
    extractFeatures(image, featureVector);

    /* Responses from the classifiers */
    vector<float> responses(classifiers.size());
    for (int i = 0; i < classifiers.size(); i++) {
        responses[i] = classifiers[i].predict(featureVector, true);
    }

    /* Argmax of responses */
    int argmax = 0;
    for (int i = 1; i < responses.size(); i++) {
        if (responses[i] > responses[argmax]) {
            argmax = i;
        }
    }

    /* Got a label or a rejection? */
    return (responses[argmax] > 0) ? (argmax+1) : 0;
}
コード例 #8
0
bool PatternDetector::findPattern(const cv::Mat& image, PatternTrackingInfo& info)
{
    // Convert input image to gray
    getGray(image, m_grayImg);
    
    // Extract feature points from input gray image
    extractFeatures(m_grayImg, m_queryKeypoints, m_queryDescriptors);
    
    // Get matches with current pattern
    getMatches(m_queryDescriptors, m_matches);

#if _DEBUG
    cv::showAndSave("Raw matches", getMatchesImage(image, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
#endif

#if _DEBUG
    cv::Mat tmp = image.clone();
#endif

    // Find homography transformation and detect good matches
    bool homographyFound = refineMatchesWithHomography(
        m_queryKeypoints, 
        m_pattern.keypoints, 
        homographyReprojectionThreshold, 
        m_matches, 
        m_roughHomography);

    if (homographyFound)
    {
#if _DEBUG
        cv::showAndSave("Refined matches using RANSAC", getMatchesImage(image, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
#endif
        // If homography refinement enabled improve found transformation
        if (enableHomographyRefinement)
        {
            // Warp image using found homography
            cv::warpPerspective(m_grayImg, m_warpedImg, m_roughHomography, m_pattern.size, cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
#if _DEBUG
            cv::showAndSave("Warped image",m_warpedImg);
#endif
            // Get refined matches:
            std::vector<cv::KeyPoint> warpedKeypoints;
            std::vector<cv::DMatch> refinedMatches;

            // Detect features on warped image
            extractFeatures(m_warpedImg, warpedKeypoints, m_queryDescriptors);

            // Match with pattern
            getMatches(m_queryDescriptors, refinedMatches);

            // Estimate new refinement homography
            homographyFound = refineMatchesWithHomography(
                warpedKeypoints, 
                m_pattern.keypoints, 
                homographyReprojectionThreshold, 
                refinedMatches, 
                m_refinedHomography);
#if _DEBUG
            cv::showAndSave("MatchesWithRefinedPose", getMatchesImage(m_warpedImg, m_pattern.grayImg, warpedKeypoints, m_pattern.keypoints, refinedMatches, 100));
#endif
            // Get a result homography as result of matrix product of refined and rough homographies:
            info.homography = m_roughHomography * m_refinedHomography;

            // Transform contour with rough homography
#if _DEBUG
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
            info.draw2dContour(tmp, CV_RGB(0,200,0));
#endif

            // Transform contour with precise homography
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, info.homography);
#if _DEBUG
            info.draw2dContour(tmp, CV_RGB(200,0,0));
#endif
        }
        else
        {
            info.homography = m_roughHomography;

            // Transform contour with rough homography
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
#if _DEBUG
            info.draw2dContour(tmp, CV_RGB(0,200,0));
#endif
        }
    }

#if _DEBUG
    if (1)
    {
        cv::showAndSave("Final matches", getMatchesImage(tmp, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
    }
    std::cout << "Features:" << std::setw(4) << m_queryKeypoints.size() << " Matches: " << std::setw(4) << m_matches.size() << std::endl;
#endif

    return homographyFound;
}
コード例 #9
0
ファイル: PatternExtractor.cpp プロジェクト: matoilic/ardoor
 void PatternExtractor::extract(const cv::Mat &img, Pattern &pattern)
 {
     initializePattern(img, pattern);
     extractFeatures(img, pattern.keypoints, pattern.descriptors);
 }
コード例 #10
0
ファイル: qt_gui.cpp プロジェクト: ZhuangYanDLUT/dlut-ros-pkg
void Graphical_UI::extractFeaturesCmd ()
{
  QString message = tr ("Extract global spatial features and local SURF features, then save to database");
  statusBar ()->showMessage (message);
  Q_EMIT extractFeatures();
}
コード例 #11
0
void TORecognize::onNewImage()
{
	CLOG(LTRACE) << "onNewImage";
	try {


		// Change keypoint detector and descriptor extractor types (if required).
		setKeypointDetector();

		setDescriptorExtractor();

		// Re-load the model - extract features from model.
		loadModels();

		std::vector<KeyPoint> scene_keypoints;
		cv::Mat scene_descriptors;
		std::vector< DMatch > matches;

		// Clear vectors! ;)
		recognized_names.clear();
		recognized_centers.clear();
		recognized_corners.clear();
		recognized_scores.clear();


		// Load image containing the scene.
		cv::Mat scene_img = in_img.read();



		// Extract features from scene.
		extractFeatures(scene_img, scene_keypoints, scene_descriptors);
		CLOG(LINFO) << "Scene features: " << scene_keypoints.size();

		// Check model.
		for (unsigned int m=0; m < models_imgs.size(); m++) {
			CLOG(LDEBUG) << "Trying to recognize model (" << m <<"): " << models_names[m];

			if ((models_keypoints[m]).size() == 0) {
				CLOG(LWARNING) << "Model not valid. Please load model that contain texture";
				return;
			}//: if

			CLOG(LDEBUG) << "Model features: " << models_keypoints[m].size();

			// Change matcher type (if required).
			setDescriptorMatcher();

			// Find matches.
			matcher->match( models_descriptors[m], scene_descriptors, matches );

			CLOG(LDEBUG) << "Matches found: " << matches.size();

			if (m == prop_returned_model_number) {
				// Draw all found matches.
				Mat img_matches1;
				drawMatches( models_imgs[m], models_keypoints[m], scene_img, scene_keypoints,
					     matches, img_matches1, Scalar::all(-1), Scalar::all(-1),
					     vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
				out_img_all_correspondences.write(img_matches1);
			}//: if


			// Filtering.
			double max_dist = 0;
			double min_dist = 100;

			//-- Quick calculation of max and min distances between keypoints
			for( int i = 0; i < matches.size(); i++ ) {
				double dist = matches[i].distance;
				if( dist < min_dist ) min_dist = dist;
				if( dist > max_dist ) max_dist = dist;
			}//: for

			CLOG(LDEBUG) << "Max dist : " << max_dist;
			CLOG(LDEBUG) << "Min dist : " << min_dist;

			//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
			std::vector< DMatch > good_matches;

			for( int i = 0; i < matches.size(); i++ ) {
				if( matches[i].distance < 3*min_dist )
					good_matches.push_back( matches[i]);
			}//: for

			CLOG(LDEBUG) << "Good matches: " << good_matches.size();

			// Localize the object
			std::vector<Point2f> obj;
			std::vector<Point2f> scene;

			// Get the keypoints from the good matches.
			for( int i = 0; i < good_matches.size(); i++ ) {
			  obj.push_back( models_keypoints [m] [ good_matches[i].queryIdx ].pt );
			  scene.push_back( scene_keypoints [ good_matches[i].trainIdx ].pt );
			}//: for

			// Find homography between corresponding points.
			Mat H = findHomography( obj, scene, CV_RANSAC );

			// Get the corners from the detected "object hypothesis".
			std::vector<Point2f> obj_corners(4);
			obj_corners[0] = cv::Point2f(0,0);
			obj_corners[1] = cv::Point2f( models_imgs[m].cols, 0 );
			obj_corners[2] = cv::Point2f( models_imgs[m].cols, models_imgs[m].rows );
			obj_corners[3] = cv::Point2f( 0, models_imgs[m].rows );
			std::vector<Point2f> hypobj_corners(4);

			// Transform corners with found homography.
			perspectiveTransform( obj_corners, hypobj_corners, H);

			// Verification: check resulting shape of object hypothesis.
			// Compute "center of mass".
			cv::Point2f center = (hypobj_corners[0] + hypobj_corners[1] + hypobj_corners[2] + hypobj_corners[3])*.25;
			std::vector<double> angles(4);
			cv::Point2f tmp ;
			// Compute angles.
			for (int i=0; i<4; i++) {
				tmp = (hypobj_corners[i] - center);
				angles[i] = atan2(tmp.y,tmp.x);
				CLOG(LDEBUG)<< tmp << " angle["<<i<<"] = "<< angles[i];
			}//: if


			// Find smallest element.
			int imin = -1;
			double amin = 1000;
			for (int i=0; i<4; i++)
				if (amin > angles[i]) {
					amin = angles[i];
					imin = i;
				}//: if

			// Reorder table.
			for (int i=0; i<imin; i++) {
				angles.push_back (angles[0]);
				angles.erase(angles.begin());
			}//: for

			for (int i=0; i<4; i++) {
				CLOG(LDEBUG)<< "reordered angle["<<i<<"] = "<< angles[i];
			}//: if

			cv::Scalar colour;
			double score = (double)good_matches.size()/models_keypoints [m].size();
			// Check dependency between corners.
			if ((angles[0] < angles[1]) && (angles[1] < angles[2]) && (angles[2] < angles[3])) {
				// Order is ok.
				colour = Scalar(0, 255, 0);
				CLOG(LINFO)<< "Model ("<<m<<"): keypoints "<< models_keypoints [m].size()<<" corrs = "<< good_matches.size() <<" score "<< score << " VALID";
				// Store the model in a list in proper order.
				storeObjectHypothesis(models_names[m], center, hypobj_corners, score);

			} else {
				// Hypothesis not valid.
				colour = Scalar(0, 0, 255);
				CLOG(LINFO)<< "Model ("<<m<<"): keypoints "<< models_keypoints [m].size()<<" corrs = "<< good_matches.size() <<" score "<< score << " REJECTED";
			}//: else


			if (m == prop_returned_model_number) {
				Mat img_matches2;
				// Draw good matches.
				drawMatches( models_imgs[m], models_keypoints[m], scene_img, scene_keypoints,
					     good_matches, img_matches2, Scalar::all(-1), Scalar::all(-1),
					     vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
				// Draw the object as lines, with center and top left corner indicated.
				line( img_matches2, hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), hypobj_corners[1] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[1] + Point2f( models_imgs[m].cols, 0), hypobj_corners[2] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[2] + Point2f( models_imgs[m].cols, 0), hypobj_corners[3] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[3] + Point2f( models_imgs[m].cols, 0), hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				circle( img_matches2, center + Point2f( models_imgs[m].cols, 0), 2, colour, 4);
				circle( img_matches2, hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), 2, Scalar(255, 0, 0), 4);
				out_img_good_correspondences.write(img_matches2);

			}//: if
		}//: for

		Mat img_object = scene_img.clone();
		if (recognized_names.size() == 0) {
			CLOG(LWARNING)<< "None of the models was not properly recognized in the image";
		} else {

			for (int h=0; h<recognized_names.size(); h++) {
				// Draw the final object - as lines, with center and top left corner indicated.
				line( img_object, recognized_corners[h][0], recognized_corners[h][1], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][1], recognized_corners[h][2], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][2], recognized_corners[h][3], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][3], recognized_corners[h][0], Scalar(0, 255, 0), 4 );
				circle( img_object, recognized_centers[h], 2, Scalar(0, 255, 0), 4);
				circle( img_object, recognized_corners[h][0], 2, Scalar(255, 0, 0), 4);
				CLOG(LNOTICE)<< "Hypothesis (): model: "<< recognized_names[h]<< " score: "<< recognized_scores[h];
			}//: for
		}//: else
		// Write image to port.
		out_img_object.write(img_object);

	} catch (...) {
		CLOG(LERROR) << "onNewImage failed";
	}//: catch
}