Exemplo n.º 1
0
//--------------------------------------------------------------
void testApp::mousePressed(int x, int y, int button)
{
    ofxUIRectangle * guiWindow = gui->getRect();
    if(!guiWindow->inside(x,y)){//mouseX>10 && mouseX<340 && mouseY>260 && mouseY<500){
        if(mapOpen && people.size()){
            int mX=x*kinect.width/ofGetWidth();//2*(mouseX-10);
            int mY=y*kinect.height/ofGetHeight();;//2*(mouseY-260);
            mapScreen[mapPoint]=ofPoint(mX,mY);
            map[mapPoint++]=people[0]->position;
            if(mapPoint>=MAP_POINTS){
                mapOpen=false;
                mapPoint=0;
            }
            homography=findHomography(map,mapScreen);
            drawMap();
            drawZones();
        }
    }
}
Exemplo n.º 2
0
Mat Assignment2::computeRANSAC_opencv(Assignment2 &m2) // used openCV function to counter check with my calculated H
{
	vector< DMatch > good_matches=this->FindMatchesEuclidian(m2);
	
	vector<Point2f> query;
	vector<Point2f> train;

	for(unsigned int i=0; i<(good_matches.size()); i++)
	{
		query.push_back(this->keypoints[good_matches[i].queryIdx].pt);
		train.push_back(m2.keypoints[good_matches[i].trainIdx].pt);
	}
	
	Mat H = findHomography(query, train, CV_RANSAC, PIXEL_DIST);

	cout<<"Homography"<<std::endl;
	//cout<<H<<std::endl;

	return H;
}
Exemplo n.º 3
0
void Calibrador::calcHomography() {
	if(ptsSrc.size() >= 4) {
		vector<Point2f> srcPoints, dstPoints;
		for(int i = 0; i < ptsSrc.size(); i++) {
			srcPoints.push_back(Point2f(ptsSrc[i].x, ptsSrc[i].x));
			dstPoints.push_back(Point2f(ptsDst[i].x, ptsDst[i].y));
		}
		
		// generate a homography from the two sets of points
		homography = findHomography(Mat(srcPoints), Mat(dstPoints));
		homographyReady = true;
		cv::invert(homography, homography_inv);
		
		// getPerspective
		// esto necesita 4 puntos (ni mas ni menos)
		srcPoints.erase(srcPoints.begin()+4,srcPoints.end());
		dstPoints.erase(dstPoints.begin()+4,dstPoints.end());
		
		map_matrix = getPerspectiveTransform(srcPoints, dstPoints);
	}
}
Exemplo n.º 4
0
	//--------------------------------------------------
	vector<KeyPoint> MatchableImage::updateFeatures( MatchableImage& previous, Mat& H )
	{	
		
		vector<Point2f> prevPts; KeyPoint::convert(previous.features, prevPts);
		int nPrevPts = prevPts.size();
		vector<Point2f> nextPts;
		vector<uchar> status;
		vector<float> err;
		calcOpticalFlowPyrLK(previous.cvImage, cvImage, prevPts, nextPts, status, err);
		
		
		
		features.clear();
		for(int i=0; i<nPrevPts; i++)
		{
			if(status[i]>0)
			{
				KeyPoint feature = previous.features[i];
				feature.pt = nextPts[i];
				features.push_back( feature );
			}
		}
		int pctPtsLost=features.size() / (float)nPrevPts;
		
		
		if(nPrevPts>3)
		{
			Mat prevMat = points2mat(prevPts);
			Mat nextMat = points2mat(nextPts);
			
			H = findHomography(prevMat, nextMat, status, CV_RANSAC, 2);
			int ptsUsedInHomography=0;
			for(int i=0; i<status.size(); i++)
			{
				if(status[i]>0) ptsUsedInHomography++;
			}
			//log(LOG_LEVEL_DEBUG, "%f%% points used in homography", (ptsUsedInHomography/(float)npts)*100);
		}
		return features;
	}
Mat calculaMosaico(Mat im1, Mat im2,vector<KeyPoint> keypoints1,vector<KeyPoint> keypoints2,vector<DMatch> coincidencias){
    Mat mosaico;
    Mat homografia1, homografia2;
    vector<Point2f> puntosIm1, puntosIm2;

  //   homografia1=Mat(3,3, CV_32F);
     
    for(int i=0; i<coincidencias.size(); i++){
        puntosIm1.push_back(keypoints1[coincidencias.at(i).queryIdx].pt);
        puntosIm2.push_back(keypoints2[coincidencias.at(i).trainIdx].pt);
    }
    
//    for(int i=0; i<keypoints2.size(); i++){
//        puntosIm2.at(i).x=keypoints2.at(i).pt.x;
//        puntosIm2.at(i).y=keypoints2.at(i).pt.y;        
//    }

   homografia1= cv::Mat::zeros(3,3, CV_32F);
   
	homografia1.at<float>(0, 0) = 1;
	homografia1.at<float>(0, 2) = floor(im2.rows/2);
	homografia1.at<float>(1, 1) = 1;
	homografia1.at<float>(1, 2) = floor(im2.cols/2);
	homografia1.at<float>(2, 2) = 1;  
    
        cout<<"HOMOGRAFIA 1 BIEN"<<homografia1<<endl;
    homografia2=findHomography(puntosIm1, puntosIm2, CV_RANSAC, 1);
    cout<<"HOMOGRAFIA 2 BIEN"<<homografia2<<endl;
    Size tam_mosaico;
    tam_mosaico.height=im1.rows+im2.rows;
    tam_mosaico.width=im2.cols+im2.cols;
    
    warpPerspective(im2, mosaico, homografia1, tam_mosaico);
    homografia1.convertTo(homografia1,homografia2.type());
    cout << "TIPO HOMO1"<<homografia1.type()<<endl;
    cout << "TIPO HOMO2"<<homografia2.type()<<endl;
    homografia2=homografia1*homografia2;
    warpPerspective(im1,mosaico, homografia2,tam_mosaico,  INTER_LINEAR, BORDER_TRANSPARENT);
    return mosaico;
}
Exemplo n.º 6
0
void computeTransforms(const vector<vector<Point2f>>& srcFeaturesTab, const vector<vector<Point2f>>& refFeaturesTab, vector<Mat>& transforms, vector<int>& isMatched)
{
	int regionum = transforms.size();
	for (int i = 0; i < regionum; ++i)
	{
		const vector<Point2f>& srcReFts = srcFeaturesTab[i];
		const vector<Point2f>& refReFts = refFeaturesTab[i];

		Mat perspective ;

		if (refReFts.size() == 0)
		{
			perspective = Mat::zeros(3,3,CV_64FC1);
			isMatched[i] = 0;
		}
		else if (refReFts.size() <= 4)
		{
			int deltax = 0, cnt = refReFts.size();
			for (int j = 0; j < cnt; ++j)
			{
				deltax += (refReFts[j].x - srcReFts[j].x);
			}
			deltax /= cnt;

			perspective = Mat::zeros(3,3,CV_64FC1);
			perspective.at<double>(0,2) = deltax * 1.0;
			isMatched[i] = 1;
		}
		else
		{
			perspective = findHomography(srcReFts, refReFts, RANSAC);
			isMatched[i] = 2;
		}

		transforms[i] = perspective.clone();
	}	
}
Exemplo n.º 7
0
dhMat ddARToolkitPlus::getHomography(int markerIndex, vector<dhVector> &src) {
	vector<dhVector> corners;
	getDetectedMarkerOrderedBorderCorners(markerIndex, corners);
	return findHomography(src, corners);
}
bool CustomPattern::findPatternPass(const Mat& image, vector<Point2f>& matched_features, vector<Point3f>& pattern_points,
                                    Mat& H, vector<Point2f>& scene_corners, const double pratio, const double proj_error,
                                    const bool refine_position, const Mat& mask, OutputArray output)
{
    if (!initialized) {return false; }
    matched_features.clear();
    pattern_points.clear();

    vector<vector<DMatch> > matches;
    vector<KeyPoint> f_keypoints;
    Mat f_descriptor;

    detector->detect(image, f_keypoints, mask);
    if (refine_position) refineKeypointsPos(image, f_keypoints);

    descriptorExtractor->compute(image, f_keypoints, f_descriptor);
    descriptorMatcher->knnMatch(f_descriptor, descriptor, matches, 2); // k = 2;
    vector<DMatch> good_matches;
    vector<Point2f> obj_points;

    for(int i = 0; i < f_descriptor.rows; ++i)
    {
        if(matches[i][0].distance < pratio * matches[i][1].distance)
        {
            const DMatch& dm = matches[i][0];
            good_matches.push_back(dm);
            // "keypoints1[matches[i].queryIdx] has a corresponding point in keypoints2[matches[i].trainIdx]"
            matched_features.push_back(f_keypoints[dm.queryIdx].pt);
            pattern_points.push_back(points3d[dm.trainIdx]);
            obj_points.push_back(keypoints[dm.trainIdx].pt);
        }
    }

    if (good_matches.size() < MIN_POINTS_FOR_H) return false;

    Mat h_mask;
    H = findHomography(obj_points, matched_features, RANSAC, proj_error, h_mask);
    if (H.empty())
    {
        // cout << "findHomography() returned empty Mat." << endl;
        return false;
    }

    for(unsigned int i = 0; i < good_matches.size(); ++i)
    {
        if(!h_mask.data[i])
        {
            deleteStdVecElem(good_matches, i);
            deleteStdVecElem(matched_features, i);
            deleteStdVecElem(pattern_points, i);
        }
    }

    if (good_matches.empty()) return false;

    uint numb_elem = good_matches.size();
    check_matches(matched_features, obj_points, good_matches, pattern_points, H);
    if (good_matches.empty() || numb_elem < good_matches.size()) return false;

    // Get the corners from the image
    scene_corners = vector<Point2f>(4);
    perspectiveTransform(obj_corners, scene_corners, H);

    // Check correctnes of H
    // Is it a convex hull?
    bool cConvex = isContourConvex(scene_corners);
    if (!cConvex) return false;

    // Is the hull too large or small?
    double scene_area = contourArea(scene_corners);
    if (scene_area < MIN_CONTOUR_AREA_PX) return false;
    double ratio = scene_area/img_roi.size().area();
    if ((ratio < MIN_CONTOUR_AREA_RATIO) ||
        (ratio > MAX_CONTOUR_AREA_RATIO)) return false;

    // Is any of the projected points outside the hull?
    for(unsigned int i = 0; i < good_matches.size(); ++i)
    {
        if(pointPolygonTest(scene_corners, f_keypoints[good_matches[i].queryIdx].pt, false) < 0)
        {
            deleteStdVecElem(good_matches, i);
            deleteStdVecElem(matched_features, i);
            deleteStdVecElem(pattern_points, i);
        }
    }

    if (output.needed())
    {
        Mat out;
        drawMatches(image, f_keypoints, img_roi, keypoints, good_matches, out);
        // Draw lines between the corners (the mapped object in the scene - image_2 )
        line(out, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 2);
        line(out, scene_corners[1], scene_corners[2], Scalar(0, 255, 0), 2);
        line(out, scene_corners[2], scene_corners[3], Scalar(0, 255, 0), 2);
        line(out, scene_corners[3], scene_corners[0], Scalar(0, 255, 0), 2);
        out.copyTo(output);
    }

    return (!good_matches.empty()); // return true if there are enough good matches
}
Exemplo n.º 9
0
void EarlyDetection(int gid){
    
    vector<int> mos; ldLabel(gid, mos);

    char cmd[512], vname[512]; FILE *_fp = NULL;

    sprintf(cmd,"mkdir -p FlowFeature/"); exec(cmd);
    sprintf(cmd,"FlowFeature/%d.txt",gid);
    
    _fp = fopen(cmd,"w+"); 
     
    for(int vid = 1; vid <300; ++vid){

        sprintf(vname, "/home/fengzy/Projects/XProject/dataset/Set%.02d/video/video%.03d.mp4", gid, vid);
        CvCapture *cap = cvCaptureFromFile(vname);
        if(!cap) return;

        printf("[gid:%d; vid:%d]\n", gid, vid);
        int width = cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH);
        int height= cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT);
        int vlen  = cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT);
        double qualityLevel = 0.01,minDistance = 10,k = 0.04;
        int blockSize = 3, maxCorners = 300;
        bool useHarrisDetector = false;

        IplImage *pre = cvCreateImage(cvSize(width,height),8,3);
        IplImage *cur = cvCreateImage(cvSize(width,height),8,3);
        IplImage *frame = cvQueryFrame(cap);
        cvCopy(frame,cur);

        vector<float> tflen, tfratio;
        // mean; var
        float fTol = 0, fMean = 0, fVar = 0, fNum = 0;
        float rTol(0), rMean(0), rMax(-INFINITY);
        for(int idx = 0; idx < 50;++idx){
            frame = cvQueryFrame(cap);
            if(!frame) break;
            cvCopy(cur, pre); cvCopy(frame, cur);
            vector<Point2f> p_pre, p_cur, set1, set2;
            vector<uchar> status, inlier;
            vector<float>err;
            Mat m_pre, m_cur;
            cvtColor(Mat(pre), m_pre, CV_RGB2GRAY);
            cvtColor(Mat(cur), m_cur, CV_RGB2GRAY);
            goodFeaturesToTrack( m_pre,
                     p_pre,
                     maxCorners,
                     qualityLevel,
                     minDistance,
                     Mat(),
                     blockSize,
                     useHarrisDetector,
                     k );
            calcOpticalFlowPyrLK(m_pre, m_cur, p_pre, p_cur, status, err);
            
            for(int flen = 0; flen < status.size(); ++flen){
                if(!status[flen]) continue;
                float d = dist(p_pre[flen], p_cur[flen]);
                if(d > 21) continue; // default search window size
                set1.push_back(p_pre[flen] ); set2.push_back(p_cur[flen]);
            }
            p_pre.clear(), p_cur.clear();
            float tol = 0, tnum = 0;
            if(set1.size() > 4){
                Mat H = findHomography(set1, set2, inlier, CV_RANSAC);
                for(int plen = 0; plen < set1.size(); ++plen)
                    if(!inlier[plen]){ 
                        float d = dist(set1[plen],set2[plen]);
                        tol += d; tnum += 1;
                    }
                fTol += tol/max(1.0f,tnum);
                tflen.push_back(tol);
                float tratio = tnum / float(set1.size());
                rMean += tratio; tfratio.push_back(tratio); rMax = max(rMax, tratio);
            }
        }
        fMean = fTol / float(tflen.size()); 
        rTol = rMean; rMean /= float(tfratio.size()); 
        for(int flen = 0; flen < tflen.size(); ++flen){
            fVar += pow(tflen[flen]-fMean,2);
        }
        fVar /= float(tflen.size()-1);
        
        float rVar = 0;
        for(int flen = 0; flen < tfratio.size(); ++flen){
            rVar += pow(tfratio[flen]-rMean,2);
        }
        rVar /= float(tfratio.size()-1);
        
       // total: zoom in/out;mean: same; variance: a little
        float fPre(0), fNex(0), rPre(0), rNex(0);
        for(int flen =0 ; flen < 10; ++flen){
            fPre+= tflen[flen], fNex += tflen[tflen.size()-1-flen];
            rPre+= tfratio[flen], rNex += tfratio[tfratio.size()-1-flen];
        }
         
       fprintf(_fp,"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", 
               float(fNex-fPre)/10.0f, fMean , fVar, float(rNex-rPre)/10.0f, rMean, rVar);
       cvReleaseCapture(&cap);
    }
    fclose(_fp);

}
Exemplo n.º 10
0
//平面物体检测
void Feature::objectDetect( Mat& objectImage, Mat& sceneImage , Mat&outImage,
							Mat& objectDescriptor,Mat& sceneDescriptor, vector<DMatch>& matches,
							vector<KeyPoint>& objectKeypoints, vector<KeyPoint>& sceneKeypoints)
{
	double max_dist = 0; double min_dist = 100;

	//特征点最大最小距离
	for( int i = 0; i < objectDescriptor.rows; i++ )
	{ 
		double dist = matches[i].distance;
		if( dist < min_dist ) 
			min_dist = dist;
		if( dist > max_dist ) 
			max_dist = dist;
	}


	//找出强度较大的特征点(也可以用半径)
	std::vector< DMatch > good_matches;
	double acceptedDist = 2*min_dist;

	for( int i = 0; i < objectDescriptor.rows; i++ )
	{
		if( matches[i].distance < acceptedDist )
		{ 
			good_matches.push_back( matches[i]); 
		}
	}
	
	//画出匹配结果
	drawMatches( objectImage, objectKeypoints, sceneImage, sceneKeypoints,
				 good_matches, outImage, Scalar::all(-1), Scalar::all(-1),
				 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	//得到好的特征点的位置
	std::vector<Point2f> object;//目标图片中的点
	std::vector<Point2f> scene;//场景图片中的点
	for( int i = 0; i < good_matches.size(); i++ )
	{
		object.push_back( objectKeypoints[ good_matches[i].queryIdx ].pt );
		scene.push_back( sceneKeypoints[ good_matches[i].trainIdx ].pt );
	}

	//目标图片和场景图片的透视变化关系
	Mat H = findHomography( object, scene, CV_RANSAC );

	//目标图像的四个角的坐标
	std::vector<Point2f> object_corners(4);

	object_corners[0] = cvPoint(0,0); 
	object_corners[1] = cvPoint( objectImage.cols, 0 );
	object_corners[2] = cvPoint( objectImage.cols, objectImage.rows ); 
	object_corners[3] = cvPoint( 0, objectImage.rows );

	std::vector<Point2f> scene_corners(4);

	perspectiveTransform( object_corners, scene_corners, H);//透视变换

	//在输出图像的场景部分画出边框
	line( outImage, scene_corners[0] + Point2f( objectImage.cols, 0), scene_corners[1] + Point2f( objectImage.cols, 0), Scalar(0, 255, 0), 4 );
	line( outImage, scene_corners[1] + Point2f( objectImage.cols, 0), scene_corners[2] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );
	line( outImage, scene_corners[2] + Point2f( objectImage.cols, 0), scene_corners[3] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );
	line( outImage, scene_corners[3] + Point2f( objectImage.cols, 0), scene_corners[0] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );

}
Exemplo n.º 11
0
void TORecognize::onNewImage()
{
	CLOG(LTRACE) << "onNewImage";
	try {


		// Change keypoint detector and descriptor extractor types (if required).
		setKeypointDetector();

		setDescriptorExtractor();

		// Re-load the model - extract features from model.
		loadModels();

		std::vector<KeyPoint> scene_keypoints;
		cv::Mat scene_descriptors;
		std::vector< DMatch > matches;

		// Clear vectors! ;)
		recognized_names.clear();
		recognized_centers.clear();
		recognized_corners.clear();
		recognized_scores.clear();


		// Load image containing the scene.
		cv::Mat scene_img = in_img.read();



		// Extract features from scene.
		extractFeatures(scene_img, scene_keypoints, scene_descriptors);
		CLOG(LINFO) << "Scene features: " << scene_keypoints.size();

		// Check model.
		for (unsigned int m=0; m < models_imgs.size(); m++) {
			CLOG(LDEBUG) << "Trying to recognize model (" << m <<"): " << models_names[m];

			if ((models_keypoints[m]).size() == 0) {
				CLOG(LWARNING) << "Model not valid. Please load model that contain texture";
				return;
			}//: if

			CLOG(LDEBUG) << "Model features: " << models_keypoints[m].size();

			// Change matcher type (if required).
			setDescriptorMatcher();

			// Find matches.
			matcher->match( models_descriptors[m], scene_descriptors, matches );

			CLOG(LDEBUG) << "Matches found: " << matches.size();

			if (m == prop_returned_model_number) {
				// Draw all found matches.
				Mat img_matches1;
				drawMatches( models_imgs[m], models_keypoints[m], scene_img, scene_keypoints,
					     matches, img_matches1, Scalar::all(-1), Scalar::all(-1),
					     vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
				out_img_all_correspondences.write(img_matches1);
			}//: if


			// Filtering.
			double max_dist = 0;
			double min_dist = 100;

			//-- Quick calculation of max and min distances between keypoints
			for( int i = 0; i < matches.size(); i++ ) {
				double dist = matches[i].distance;
				if( dist < min_dist ) min_dist = dist;
				if( dist > max_dist ) max_dist = dist;
			}//: for

			CLOG(LDEBUG) << "Max dist : " << max_dist;
			CLOG(LDEBUG) << "Min dist : " << min_dist;

			//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
			std::vector< DMatch > good_matches;

			for( int i = 0; i < matches.size(); i++ ) {
				if( matches[i].distance < 3*min_dist )
					good_matches.push_back( matches[i]);
			}//: for

			CLOG(LDEBUG) << "Good matches: " << good_matches.size();

			// Localize the object
			std::vector<Point2f> obj;
			std::vector<Point2f> scene;

			// Get the keypoints from the good matches.
			for( int i = 0; i < good_matches.size(); i++ ) {
			  obj.push_back( models_keypoints [m] [ good_matches[i].queryIdx ].pt );
			  scene.push_back( scene_keypoints [ good_matches[i].trainIdx ].pt );
			}//: for

			// Find homography between corresponding points.
			Mat H = findHomography( obj, scene, CV_RANSAC );

			// Get the corners from the detected "object hypothesis".
			std::vector<Point2f> obj_corners(4);
			obj_corners[0] = cv::Point2f(0,0);
			obj_corners[1] = cv::Point2f( models_imgs[m].cols, 0 );
			obj_corners[2] = cv::Point2f( models_imgs[m].cols, models_imgs[m].rows );
			obj_corners[3] = cv::Point2f( 0, models_imgs[m].rows );
			std::vector<Point2f> hypobj_corners(4);

			// Transform corners with found homography.
			perspectiveTransform( obj_corners, hypobj_corners, H);

			// Verification: check resulting shape of object hypothesis.
			// Compute "center of mass".
			cv::Point2f center = (hypobj_corners[0] + hypobj_corners[1] + hypobj_corners[2] + hypobj_corners[3])*.25;
			std::vector<double> angles(4);
			cv::Point2f tmp ;
			// Compute angles.
			for (int i=0; i<4; i++) {
				tmp = (hypobj_corners[i] - center);
				angles[i] = atan2(tmp.y,tmp.x);
				CLOG(LDEBUG)<< tmp << " angle["<<i<<"] = "<< angles[i];
			}//: if


			// Find smallest element.
			int imin = -1;
			double amin = 1000;
			for (int i=0; i<4; i++)
				if (amin > angles[i]) {
					amin = angles[i];
					imin = i;
				}//: if

			// Reorder table.
			for (int i=0; i<imin; i++) {
				angles.push_back (angles[0]);
				angles.erase(angles.begin());
			}//: for

			for (int i=0; i<4; i++) {
				CLOG(LDEBUG)<< "reordered angle["<<i<<"] = "<< angles[i];
			}//: if

			cv::Scalar colour;
			double score = (double)good_matches.size()/models_keypoints [m].size();
			// Check dependency between corners.
			if ((angles[0] < angles[1]) && (angles[1] < angles[2]) && (angles[2] < angles[3])) {
				// Order is ok.
				colour = Scalar(0, 255, 0);
				CLOG(LINFO)<< "Model ("<<m<<"): keypoints "<< models_keypoints [m].size()<<" corrs = "<< good_matches.size() <<" score "<< score << " VALID";
				// Store the model in a list in proper order.
				storeObjectHypothesis(models_names[m], center, hypobj_corners, score);

			} else {
				// Hypothesis not valid.
				colour = Scalar(0, 0, 255);
				CLOG(LINFO)<< "Model ("<<m<<"): keypoints "<< models_keypoints [m].size()<<" corrs = "<< good_matches.size() <<" score "<< score << " REJECTED";
			}//: else


			if (m == prop_returned_model_number) {
				Mat img_matches2;
				// Draw good matches.
				drawMatches( models_imgs[m], models_keypoints[m], scene_img, scene_keypoints,
					     good_matches, img_matches2, Scalar::all(-1), Scalar::all(-1),
					     vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
				// Draw the object as lines, with center and top left corner indicated.
				line( img_matches2, hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), hypobj_corners[1] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[1] + Point2f( models_imgs[m].cols, 0), hypobj_corners[2] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[2] + Point2f( models_imgs[m].cols, 0), hypobj_corners[3] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[3] + Point2f( models_imgs[m].cols, 0), hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				circle( img_matches2, center + Point2f( models_imgs[m].cols, 0), 2, colour, 4);
				circle( img_matches2, hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), 2, Scalar(255, 0, 0), 4);
				out_img_good_correspondences.write(img_matches2);

			}//: if
		}//: for

		Mat img_object = scene_img.clone();
		if (recognized_names.size() == 0) {
			CLOG(LWARNING)<< "None of the models was not properly recognized in the image";
		} else {

			for (int h=0; h<recognized_names.size(); h++) {
				// Draw the final object - as lines, with center and top left corner indicated.
				line( img_object, recognized_corners[h][0], recognized_corners[h][1], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][1], recognized_corners[h][2], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][2], recognized_corners[h][3], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][3], recognized_corners[h][0], Scalar(0, 255, 0), 4 );
				circle( img_object, recognized_centers[h], 2, Scalar(0, 255, 0), 4);
				circle( img_object, recognized_corners[h][0], 2, Scalar(255, 0, 0), 4);
				CLOG(LNOTICE)<< "Hypothesis (): model: "<< recognized_names[h]<< " score: "<< recognized_scores[h];
			}//: for
		}//: else
		// Write image to port.
		out_img_object.write(img_object);

	} catch (...) {
		CLOG(LERROR) << "onNewImage failed";
	}//: catch
}
Exemplo n.º 12
0
void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
                                  MatchesInfo &matches_info)
{
    (*impl_)(features1, features2, matches_info);

    // Check if it makes sense to find homography
    if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
        return;

    // Construct point-point correspondences for homography estimation
    Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
    Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
    for (size_t i = 0; i < matches_info.matches.size(); ++i)
    {
        const DMatch& m = matches_info.matches[i];

        Point2f p = features1.keypoints[m.queryIdx].pt;
        p.x -= features1.img_size.width * 0.5f;
        p.y -= features1.img_size.height * 0.5f;
        src_points.at<Point2f>(0, static_cast<int>(i)) = p;

        p = features2.keypoints[m.trainIdx].pt;
        p.x -= features2.img_size.width * 0.5f;
        p.y -= features2.img_size.height * 0.5f;
        dst_points.at<Point2f>(0, static_cast<int>(i)) = p;
    }

    // Find pair-wise motion
    matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, CV_RANSAC);
    if (std::abs(determinant(matches_info.H)) < numeric_limits<double>::epsilon())
        return;

    // Find number of inliers
    matches_info.num_inliers = 0;
    for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
        if (matches_info.inliers_mask[i])
            matches_info.num_inliers++;

    // These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
    // using Invariant Features"
    matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());

    // Set zero confidence to remove matches between too close images, as they don't provide
    // additional information anyway. The threshold was set experimentally.
    matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;

    // Check if we should try to refine motion
    if (matches_info.num_inliers < num_matches_thresh2_)
        return;

    // Construct point-point correspondences for inliers only
    src_points.create(1, matches_info.num_inliers, CV_32FC2);
    dst_points.create(1, matches_info.num_inliers, CV_32FC2);
    int inlier_idx = 0;
    for (size_t i = 0; i < matches_info.matches.size(); ++i)
    {
        if (!matches_info.inliers_mask[i])
            continue;

        const DMatch& m = matches_info.matches[i];

        Point2f p = features1.keypoints[m.queryIdx].pt;
        p.x -= features1.img_size.width * 0.5f;
        p.y -= features1.img_size.height * 0.5f;
        src_points.at<Point2f>(0, inlier_idx) = p;

        p = features2.keypoints[m.trainIdx].pt;
        p.x -= features2.img_size.width * 0.5f;
        p.y -= features2.img_size.height * 0.5f;
        dst_points.at<Point2f>(0, inlier_idx) = p;

        inlier_idx++;
    }

    // Rerun motion estimation on inliers only
    matches_info.H = findHomography(src_points, dst_points, CV_RANSAC);
}
Exemplo n.º 13
0
//-----------------------------------------------------
void ofxSURFTracker::detect(unsigned char *pix, int inputWidth, int inputHeight) {

    /***
    code adapted from http://docs.opencv.org/doc/tutorials/features2d/feature_homography/feature_homography.html
     ***/

    if( inputWidth != inputImg.getWidth() || inputHeight != inputImg.getHeight()) {
        // this should only happen once
        inputImg.clear();
        inputImg.allocate(inputWidth, inputHeight);
        cout << "ofxSURFTracker : re-allocated the input image."<<endl;
    }

    // create the cvImage from the ofImage
    inputImg.setFromPixels(pix, inputWidth, inputHeight);
    inputImg.setROI( ofRectangle((inputWidth-width)/2,
                                 (inputHeight-height)/2,
                                 width,
                                 height
                                )
                   );

    // take out the piece that we want to use.
    croppedImg.setFromPixels(inputImg.getRoiPixels(), width, height);

    // make it into a trackable grayscale image
    trackImg = croppedImg;

    // do some fancy contrast stuff
    if(bContrast) {
        trackImg.contrastStretch();
    }

    // set up the feature detector
    detector =  SurfFeatureDetector(hessianThreshold,
                                    octaves,
                                    octaveLayers,
                                    bUpright);

    // clear existing keypoints from previous frame
    keypoints_scene.clear();

    // get the Mat to do the feature detection on
    Mat trackMat = cvarrToMat(trackImg.getCvImage());
    detector.detect( trackMat, keypoints_scene);

    // Calculate descriptors (feature vectors)
    extractor.compute( trackMat, keypoints_scene, descriptors_scene );

    // Matching descriptor vectors using FLANN matcher
    vector< DMatch > matches;
    if(!descriptors_object.empty() && !descriptors_scene.empty() ) {
        flannMatcher.match( descriptors_object, descriptors_scene, matches);
    }

    // Quick calculation of max and min distances between keypoints
    double max_dist = 0;
    double min_dist = 100;
    for( int i = 0; i < matches.size(); i++ ) {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    // Filter matches upon quality : distance is between 0 and 1, lower is better
    good_matches.clear();
    for( int i = 0; i < matches.size(); i++ ) {
        if(matches[i].distance < 3 * min_dist && matches[i].distance < distanceThreshold) {
            good_matches.push_back( matches[i]);
        }
    }

    // find the homography
    // transform the bounding box for this scene
    vector <Point2f> scene_pts;
    vector <Point2f> object_pts;
    object_transformed.clear();
    if(good_matches.size() > minMatches) {
        for( int i = 0; i < good_matches.size(); i++ )
        {
            //-- Get the keypoints from the good matches
            object_pts.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
            scene_pts.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
        }
        if( scene_pts.size() >5 && object_pts.size() > 5) {
            homography = findHomography( object_pts, scene_pts, CV_RANSAC);
            perspectiveTransform( object, object_transformed, homography);
        }
        // being here means we have found a decent match
        objectLifeTime += 0.05;

    } else {
        // we haven't found a decent match
        objectLifeTime -= 0.05;
    }
    if(objectLifeTime > 1) {
        objectLifeTime = 1;
    } else if( objectLifeTime < 0) {
        objectLifeTime = 0;
    }
}
Exemplo n.º 14
0
ofMatrix4x4 ofxARToolkitPlus::getHomography(int markerIndex, vector<ofPoint> &src) {
	vector<ofPoint> corners;
	getDetectedMarkerOrderedBorderCorners(markerIndex, corners);
	return findHomography(src, corners);
}
Exemplo n.º 15
0
// get ROI + edgeDectection
void FindContour::cellDetection(const Mat &img, vector<Point> &cir_org,
                                Mat &dispImg1, Mat &dispImg2,
                                vector<Point2f> &points1, vector<Point2f> &points2,
                                int &area,
                                int &perimeter,
                                Point2f &ctroid,
                                float &shape,
//                                vector<int> &blebs,
                                int &frameNum){
    frame = &img;
    //rect = boundingRect(Mat(cir));


    Mat frameGray;
    cv::cvtColor(*frame, frameGray, CV_RGB2GRAY);
/*
    QString cellFileName0 = "frame" + QString::number(frameNum) + ".png";
    imwrite(cellFileName0.toStdString(), frameGray);*/

    vector<Point> cir; //***global coordinates of circle***
    for(unsigned int i = 0; i < cir_org.size(); i++){
        cir.push_back(Point(cir_org[i].x / scale, cir_org[i].y / scale));
    }
    //cout << "original circle: " << cir_org << "\n" << " scaled circle: " << cir << endl;

    //enlarge the bounding rect by adding a margin (e) to it
    rect = enlargeRect(boundingRect(Mat(cir)), 5, img.cols, img.rows);

    //global circle mask
    Mat mask_cir_org = Mat::zeros(frame->size(), CV_8UC1);
    fillConvexPoly(mask_cir_org, cir, Scalar(255));

    // flow points
    vector<unsigned int> cell_pts_global;
    vector<Point2f> longOptflow_pt1, longOptflow_pt2;
    Point2f avrg_vec = Point2f(0,0);
    for(unsigned int i = 0; i < points1.size(); i++){
        Point p1 = Point(points1[i].x, points1[i].y);
        Point p2 = Point(points2[i].x, points2[i].y);
        if(mask_cir_org.at<uchar>(p1.y,p1.x) == 255 ){
            cell_pts_global.push_back(i);
            if(dist_square(p1, p2) > 2.0){
                longOptflow_pt1.push_back(Point2f(p1.x, p1.y));
                longOptflow_pt2.push_back(Point2f(p2.x, p2.y));
                avrg_vec.x += (p2.x-p1.x);
                avrg_vec.y += (p2.y-p1.y);
            }
        }
    }

//    if(longOptflow_pt1.size()!= 0){
//        avrg_vec.x = avrg_vec.x / longOptflow_pt1.size();
//        avrg_vec.y = avrg_vec.y / longOptflow_pt1.size();
//    }
    Rect trans_rect = translateRect(rect, avrg_vec);


    // ***
    // if (the homography is a good one) use the homography to update the rectangle
    // otherwise use the same rectangle
    // ***
    if (longOptflow_pt1.size() >= 4){
        Mat H = findHomography(Mat(longOptflow_pt1), Mat(longOptflow_pt2), CV_RANSAC, 2);
        //cout << "H: " << H << endl;

        if(determinant(H) >= 0){
            vector<Point> rect_corners;
            rect_corners.push_back(Point(rect.x, rect.y));
            rect_corners.push_back(Point(rect.x+rect.width, rect.y));
            rect_corners.push_back(Point(rect.x, rect.y+rect.height));
            rect_corners.push_back(Point(rect.x+rect.width, rect.y+rect.height));

            vector<Point> rect_update_corners = pointTransform(rect_corners, H);
            trans_rect = boundingRect(rect_update_corners);
        }
    }


    rectangle(frameGray, trans_rect, Scalar(255), 2);
    imshow("frameGray", frameGray);









    dispImg1 = (*frame)(rect).clone();
    //dispImg2 = Mat(dispImg1.rows, dispImg1.cols, CV_8UC3);

    Mat sub; //*** the rectangle region of ROI (Gray) ***
    cv::cvtColor(dispImg1, sub, CV_RGB2GRAY);
    int width = sub.cols;
    int height = sub.rows;

    vector<Point> circle_ROI; //***local coordinates of circle***
    for (unsigned int i = 0; i < cir.size(); i++){
        Point p = Point(cir[i].x - rect.x, cir[i].y - rect.y);
        circle_ROI.push_back(p);
    }

    Mat adapThreshImg1 = Mat::zeros(height, width, sub.type());
    //image edge detection for the sub region (roi rect)
    adaptiveThreshold(sub, adapThreshImg1, 255.0, ADAPTIVE_THRESH_GAUSSIAN_C,
                          CV_THRESH_BINARY_INV, blockSize, constValue);
    //imshow("adapThreshImg1", adapThreshImg1);

    // dilation and erosion
    Mat dilerod;
    dilErod(adapThreshImg1, dilerod, dilSize);

    //display image 2 -- dilerod of adaptive threshold image
    GaussianBlur(dilerod, dilerod, Size(3, 3), 2, 2 );

    //mask for filtering out the cell of interest
    Mat mask_conv = Mat::zeros(height, width, CV_8UC1);
    fillConvexPoly(mask_conv, circle_ROI, Scalar(255));
    //imshow("mask_before", mask_conv);

    //dilate the mask -> region grows
    Mat mask_conv_dil;
    Mat element = getStructuringElement( MORPH_ELLIPSE, Size( 2*2+2, 2*2+1 ), Point(2,2) );
    dilate(mask_conv, mask_conv_dil, element);
    //imshow("mask_dil", mask_conv_dil);



    /*
    Mat mask_conv_ero;
    erode(mask_conv, mask_conv_ero, element);
    Mat ring_dil, ring_ero;
    bitwise_xor(mask_conv, mask_conv_dil, ring_dil);
    bitwise_xor(mask_conv, mask_conv_ero, ring_ero);
    Mat ring;
    bitwise_or(ring_dil, ring_ero, ring);
    imshow("ring", ring);

    vector<unsigned int> opt_onRing_index;
    // use optflow info set rectangle
    for(unsigned int i = 0; i < points2.size(); i++){
        Point p2 = Point(points2[i].x, points2[i].y);
        Point p1 = Point(points1[i].x, points1[i].y);
        if(ring.at<uchar>(p1.y,p1.x) != 255 &&
                ring.at<uchar>(p2.y,p2.x) != 255)
            continue;
        else{
            opt_onRing_index.push_back(i);
        }
    }*/

    /*
    // draw the optflow on dispImg1
    unsigned int size = opt_inside_cl_index.size();
    for(unsigned int i = 0; i < size; i++ ){
        Point p0( ceil( points1[i].x - rect.x ), ceil( points1[i].y - rect.y ) );
        Point p1( ceil( points2[i].x - rect.x ), ceil( points2[i].y - rect.y) );
        //std::cout << "(" << p0.x << "," << p0.y << ")" << "\n";
        //std::cout << "(" << p1.x << "," << p1.y << ")" << std::endl;

        //draw lines to visualize the flow
        double angle = atan2((double) p0.y - p1.y, (double) p0.x - p1.x);
        double arrowLen = 0.01 * (double) (width);
        line(dispImg1, p0, p1, CV_RGB(255,255,255), 1 );
        Point p;
        p.x = (int) (p1.x + arrowLen * cos(angle + 3.14/4));
        p.y = (int) (p1.y + arrowLen * sin(angle + 3.14/4));
        line(dispImg1, p, p1, CV_RGB(255,255,255), 1 );
        p.x = (int) (p1.x + arrowLen * cos(angle - 3.14/4));
        p.y = (int) (p1.y + arrowLen * sin(angle - 3.14/4));

        line(dispImg1, p, Point(2*p1.x - p0.x, 2*p1.y - p0.y), CV_RGB(255,255,255), 1 );
        //line(dispImg1, p, p1, CV_RGB(255,255,255), 1 );
    }*/

/*
    //stop growing when meeting with canny edges that outside the circle

    Mat canny;
    CannyWithBlur(sub, canny);
    Mat cannyColor;
    cvtColor(canny, cannyColor, CV_GRAY2RGB);
    imwrite("canny.png", canny);
    vector<Point> outsideCircle;
    vector<Point> onRing;
    for(int j = 0; j < height; j++){
        for(int i = 0; i < width; i++){
            if(canny.at<uchar>(j,i) != 0 && mask_conv.at<uchar>(j,i) == 0){
                cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+0] = 81;
                cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+1] = 172;
                cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+2] = 251;
                outsideCircle.push_back(Point(i, j));
                if(ring.at<uchar>(j,i) != 0){
                    cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+0] = 255;
                    cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+1] = 255;
                    cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+2] = 0;
                    onRing.push_back(Point(i,j));
                }
            }
        }
    } */

//    QString cannyFileName = "canny" + QString::number(frameNum) + ".png";
//    imwrite(cannyFileName.toStdString(), cannyColor);



    //bitwise AND on mask and dilerod
    bitwise_and(mask_conv/*_dil*/, dilerod, dispImg2);

    // findcontours
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    unsigned int largest_contour_index;
    dilErodContours(dispImg2, contours, hierarchy, largest_contour_index, perimeter, dispImg1);

    // find the area of the cell by counting the white area inside the largest contour
    Mat cellArea = Mat::zeros(height, width, CV_8UC1);
    drawContours(cellArea, contours, largest_contour_index, Scalar(255), -1, 8, hierarchy, 0, Point() );
    //imshow("cell", cell);
    area = countNonZero(cellArea);

    //cout << "frame " << frameNum << "\n";
    //cout << contours[largest_contour_index] << endl;


    //change dispImg2 from gray to rgb for displaying
    cvtColor(dispImg2, dispImg2, CV_GRAY2RGB);

    //renew circle points as the convex hull
    vector<Point> convHull;
    convexHull(contours[largest_contour_index], convHull);


    // find the centroid of the contour
    Moments mu = moments(contours[largest_contour_index]);
    ctroid = Point2f(mu.m10/mu.m00 + rect.x, mu.m01/mu.m00 + rect.y);

    // find the shape of the cell by the largest contour and centroid
    shape = findShape(ctroid, contours[largest_contour_index]);

    ////---- draw largest contour start ----
    //draw the largest contour
    Mat borderImg = Mat::zeros(height, width, CV_8UC1);
    drawContours(borderImg, contours, largest_contour_index, Scalar(255), 1, 8, hierarchy, 0, Point());
    //QString cellFileName0 = "border" + QString::number(frameNum) + ".png";
    //imwrite(cellFileName0.toStdString(), borderImg);
    ////---- draw largest contour end ----

    // find the number and the sizes of blebs of the cell
    Mat smooth;
    vector<Point> smoothCurve;
    int WIN = 25;
    vector< vector<Point> > tmp;
    smooth = curveSmooth(borderImg, WIN, contours[largest_contour_index], smoothCurve, convHull/*ctroid*/);
    tmp.push_back(smoothCurve);
    drawContours(dispImg1, tmp, 0, Scalar(255, 0, 0));

    bitwise_not(smooth, smooth);
    Mat blebsImg;
    bitwise_and(smooth, cellArea, blebsImg);
    //imshow("blebs", blebs);
    //QString cellFileName2 = "blebs" + QString::number(frameNum) + ".png";
    //imwrite(cellFileName2.toStdString(), blebs);

//    vector<Point> blebCtrs;
//    recursive_connected_components(blebsImg, blebs, blebCtrs);
//    for(unsigned int i = 0; i < blebCtrs.size(); i++){
//        circle(dispImg1, blebCtrs[i], 2, Scalar(255, 255, 0));
//    }


    cir_org.clear();
    for(unsigned int i = 0; i < convHull.size(); i++)
        cir_org.push_back(Point((convHull[i].x + rect.x)*scale, (convHull[i].y + rect.y)*scale));

}
Exemplo n.º 16
0
/**
 * @function main
 * @brief Main function
 */
int SURF_main(Mat img_scene, Mat img_object)
{
  if( !img_object.data || !img_scene.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
  printf("Coming to SURF");
  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_object, keypoints_scene;

  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_object, descriptors_scene;

  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );
  
  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_object.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
    { good_matches.push_back( matches[i]); }
  }  

  Mat img_matches;
  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, 
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), 
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 


  //-- Localize the object from img_1 in img_2 
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( int i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); 
  }

  Mat H = findHomography( obj, scene, CV_RANSAC );

  //-- Get the corners from the image_1 ( the object to be "detected" )
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
  obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);

   
  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
  line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );

  //-- Show detected matches
  imshow( "Good Matches & Object detection", img_matches );

  waitKey(1);

  return 0;
}
void pkmDetector::update()
{
	if (bSetImageSearch && bSetImageTemplate) 
	{
		if (!bInitialized) 
		{
			
			
			// do matching between the template and image search
			// without tracking previous features since none initialized
			filteredMatches.clear();
			switch( matcherFilterType )
			{
				case CROSS_CHECK_FILTER :
					crossCheckMatching( descriptorMatcher, img_template_descriptors, img_search_descriptors, filteredMatches, 1 );
					break;
				default :
					simpleMatching( descriptorMatcher, img_template_descriptors, img_search_descriptors, filteredMatches );
			}
			
			// reindex based on found matches
			vector<int> queryIdxs( filteredMatches.size() ), trainIdxs( filteredMatches.size() );
			for( size_t i = 0; i < filteredMatches.size(); i++ )
			{
				queryIdxs[i] = filteredMatches[i].queryIdx;
				trainIdxs[i] = filteredMatches[i].trainIdx;
			}
			
			// build homograhpy w/ ransac
			vector<cv::Point2f> points1; cv::KeyPoint::convert(img_template_keypoints, points1, queryIdxs);
			vector<cv::Point2f> points2; cv::KeyPoint::convert(img_search_keypoints, points2, trainIdxs);
			H12 = findHomography( cv::Mat(points1), cv::Mat(points2), CV_RANSAC, ransacReprojThreshold );
			
			// create a mask of the current inliers based on transform distance
			vector<char> matchesMask( filteredMatches.size(), 0 );
			
			printf("Matched %d features.\n", filteredMatches.size());
			
			// convert previous image points to current image points via homography 
			// although this is a transformation from image to world coordinates
			// it should estimate the current image points
			cv::Mat points1t; perspectiveTransform(cv::Mat(points1), points1t, H12);
			for( size_t i1 = 0; i1 < points1.size(); i1++ )
			{
				if( norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) < 4 ) // inlier
				{
					matchesMask[i1] = 1;
					img_search_points_inliers[1].push_back(points2[i1]);
				}
				else {
					img_search_points_outliers[1].push_back(points2[i1]);
				}
			}
			
			
			// update bounding box
			cv::Mat bb;
			perspectiveTransform(cv::Mat(img_template_boundingbox), bb, H12);
			for( int i = 0; i < 4; i++ )
			{
				dst_corners[i] = bb.at<cv::Point2f>(i,0);
				//img_template_boundingbox[i] = bb.at<cv::Point2f>(i,0);
			}
			
			/*
			// draw inliers
			drawMatches( img_search, img_template_keypoints, 
						 img_template, img_search_keypoints, 
						filteredMatches, drawImg, 
						CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
	#if DRAW_RICH_KEYPOINTS_MODE
						, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
	#endif
						);
			
	#if DRAW_OUTLIERS_MODE
			// draw outliers
			for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
				matchesMask[i1] = !matchesMask[i1];
			drawMatches( img_template, img_template_keypoints, 
						img_search, img_search_keypoints, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask,
						DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
			
	#endif
			
			imshow( winName, drawImg );
			*/
			
		}
		else {
			// track features from previous frame into the current frame and see which 
			// features are inliers, and which are outliers.  among the features that
			// are outliers, see if any were marked as inliers in the previous frame and
			// remark then as outliers
			
			// mark decsriptors on new features marked as outliers once the number of 
			// inliers drops to a certain threshold and perform matching on the template.
			
			// patch based seems powerful as well.  creating a manifold or detecting planes 
			// in the set of inliers and tracking them as a whole may be more powerful.
			//<#statements#>
		}
		
		//std::swap(current_template_points, previous_template_points);
		std::swap(img_search_points_inliers[1], img_search_points_inliers[0]);
		std::swap(img_search_points_outliers[1], img_search_points_outliers[0]);
		swap(prev_img_search, img_search);
	} // end bSetImageSearch && bSetImageTemplate
	
}
void FieldLineDetector::findFieldMatch(std::vector<FieldCorner*> fieldCrossings,
		cv::Mat& H)
{
	if (fieldCrossings.size() < 4)
	{
		return;
	}
	vector<FieldCorner*> bestcorners;
	double smallestError = INFINITY;
	for (int i = 0; i < fieldModel->corners.size(); i++)
	{
		clock_t start = clock();
		vector<FieldCorner*> corners;
		vector<FieldCorner*> newBestCorners;
		corners.push_back(this->fieldModel->corners.at(i));
		double error = findFieldMatchRec(fieldCrossings, corners,
				newBestCorners);
		double time = (double) (clock() - start) / CLOCKS_PER_SEC;
		cout << i << ") error=" << error << " time=" << time << "s" << endl;
		if (error < 100)
		{
			bestcorners = newBestCorners;
			break;
		}
		if (error < smallestError)
		{
			smallestError = error;
			bestcorners = newBestCorners;
		}
	}
	if (bestcorners.size() > 3)
	{
		vector<Point2f> cornersPoints;
		vector<Point2f> fieldPoints;
		for (int i = 0; i < bestcorners.size(); i++)
		{
			cornersPoints.push_back(bestcorners.at(i)->point);
			fieldPoints.push_back(fieldCrossings.at(i)->point);
		}
		H = findHomography(cornersPoints, fieldPoints, RANSAC);

		if (botPosField.size() > 3)
		{
			// compare with robots on field and mirrored H
			float error = calcMatchErrorBots(botPosField, H);

			H.at<double>(0,0) *= -1;
			H.at<double>(0,1) *= -1;
			H.at<double>(1,0) *= -1;
			H.at<double>(1,1) *= -1;
			H.at<double>(2,0) *= -1;
			H.at<double>(2,1) *= -1;

			float error2 = calcMatchErrorBots(botPosField, H);

			if(error2 > error)
			{
				H.at<double>(0,0) *= -1;
				H.at<double>(0,1) *= -1;
				H.at<double>(1,0) *= -1;
				H.at<double>(1,1) *= -1;
				H.at<double>(2,0) *= -1;
				H.at<double>(2,1) *= -1;
			}
		}
	}
}
Exemplo n.º 19
0
int main(int argc, const char * argv[])
{
    
    ft_data ftdata;
    if (argc<3) {
        cout<<argv[0]<<" user_profile_dir camera_profile.yaml";
        return 0;
    }

    fs::path baseDirPath(argv[1]);
    ASM_Gaze_Tracker poseTracker(baseDirPath / "trackermodel.yaml", fs::path(argv[2]));
    
    
    vector<Point3f> faceCrdRefVecs;
    faceCrdRefVecs.push_back(Point3f(0,0,0));
    faceCrdRefVecs.push_back(Point3f(50,0,0));
    faceCrdRefVecs.push_back(Point3f(0,50,0));
    faceCrdRefVecs.push_back(Point3f(0,0,50));
    
    VideoCapture cam;
    cam.open(0);
    if(!cam.isOpened()){
        return 0;
    }
    Mat rvec, tvec;
    Mat im;
    captureImage(cam,im);
    

    while(true){
        bool success = captureImage(cam, im, true);
        if (success == false) {
            break;
        }
        
        bool succeeded = poseTracker.featureTracking(im);
        if (succeeded)
            poseTracker.estimateFacePose();

        
        
        
        Mat frontim,flipback;
        flip(im,flipback,1);
        
        vector<Point2f> reprjCrdRefPts;
        vector<Point2f> reprjFeaturePts;
        poseTracker.projectPoints(poseTracker.facialPointsIn3D, reprjFeaturePts);
        poseTracker.projectPoints(faceCrdRefVecs, reprjCrdRefPts);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[1], Scalar(255,0,0),2);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[2], Scalar(0,255,0),2);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[3], Scalar(0,0,255),2);
        drawPoints(im, reprjFeaturePts);
        drawStringAtTopLeftCorner(im, "distance to camera:" + boost::lexical_cast<string>(poseTracker.distanceToCamera()));
        imshow("head pose",im);
        
        vector<Point2f> transformedPoints = poseTracker.tracker.points;
        fliplr(transformedPoints, im.size());
        Mat part;
        
        Mat hM = findHomography(poseTracker.facialPointsIn2D ,transformedPoints, 0);
        warpPerspective(flipback(boundingRect(transformedPoints)), frontim, hM, im.size());
        imshow("front", im);

        
        int c = waitKey(1)%256;
        if(c == 'q')break;
        
    }
    
}
Exemplo n.º 20
0
void CModelQuery2D::computePoseEstimation()
{
    std::vector<cv::DMatch> & validCorr = matchingResults_;

    if ( best_k_sorted_2D_ > 0 )
    {
        std::sort (validCorr.begin (), validCorr.end (),
                   sort2DCorrespondencesByDistance ());

        validCorr.resize(best_k_sorted_2D_);
    }

    vector<cv::KeyPoint>	modelKeyPoints = modelFeatures_.getKeypoints();
    vector<cv::KeyPoint>	queryKeyPoints = queryFeatures_.getKeypoints();

    for ( unsigned i = 0; i < validCorr.size(); i++ )
    {
        modelPoints_.push_back(modelKeyPoints[ validCorr[i].queryIdx].pt);
        queryPoints_.push_back(queryKeyPoints[ validCorr[i].trainIdx].pt);
    }

    vector<unsigned char> inliersMask(modelPoints_.size());

    H_ = findHomography(modelPoints_ , queryPoints_,  CV_RANSAC, inputParams_.ransac_inlier_threshold_2D, inliersMask );
    
    vector<cv::DMatch> inliers;
    unsigned inliersCount = 0;

    for ( unsigned i = 0; i < inliersMask.size(); i++ )
    {
        if ( inliersMask[i] )
        {
            inliersCount++;
            inliers.push_back(validCorr[i]);
        }
    }

    // fill our performance data structure

    perfData  & pd = pGlobalPerf_->pd[descriptorType_];

    pd.descID = descriptorType_;
    pd.actualRotation = inputParams_.rot_deg;
    pd.inlierCount = inliersCount; 
    pd.inlierRate  = (float) inliersCount / validCorr.size(); 

    if ( inputParams_.live_sensor )	// assumes horizontal rotation
    {
        pd.computedRotation = pcl::rad2deg ((atan(H_.at<double>(6)/H_.at<double>(0))));
        // if live_sensor, perfData_.actualRotation is ignored.
    }
    else  // simulated rotation
    {
        pd.computedRotation = pcl::rad2deg ((atan(H_.at<double>(3)/H_.at<double>(0))));
    }

    pd.rotEstError = abs(abs(pd.computedRotation) - abs(pd.actualRotation));

    pd.averageDistance = -1.0;	//## not in use

    if ( inputParams_.debug_level > OUTPUT_TRANSFORMATIONS )
    {
        // print out our H matrix
        pcl::console::print_info ("----------------------------------------------------------------------------\n");
        pcl::console::print_info ("2D DescID: %d,  Computed Rotation: %8.3f, actual Rotation: %8.3f\n",
                                   pd.descID,pd.computedRotation, pd.actualRotation);
        pcl::console::print_info ("inlierCount: %d, inlierRate: %8.3f\n", pd.inlierCount, pd.inlierRate);
        pcl::console::print_info ("\n");
        pcl::console::print_info ("    | %8.3f %8.3f %8.3f | \n", H_.at<double>(0), H_.at<double>(1), H_.at<double>(2) );
        pcl::console::print_info ("R = | %8.3f %8.3f %8.3f | \n", H_.at<double>(3), H_.at<double>(4), H_.at<double>(5) );
        pcl::console::print_info ("    | %8.3f %8.3f %8.3f | \n", H_.at<double>(6), H_.at<double>(7), H_.at<double>(8) );
        pcl::console::print_info ("\n");
    }
}
vector<unsigned int> objRecognition::rankedQueryDB(vector<KeyPoint> &kpts,
												   Mat &features,
													std::vector< vector<float> > &distances,
													bool do_bin_NNDR,
													int NND_bin_thr,
													float NNDR_ratio,
													bool do_ransac,
													int ransac_min_matches,
													float ransac_threshold)
{

	distances.clear();

	std::vector<float> aux_dist;
	aux_dist.clear();

	std::vector<std::vector<cv::DMatch> >::iterator it_all_matches;
	std::vector<std::vector<cv::DMatch> > all_matches;
	std::vector<DMatch>::iterator it_fmatches;
	std::vector<DMatch> filt_matches1;
	std::vector<cv::Point2f> q_pts, db_pts;
	std::vector<uchar>::iterator it_mask;
	std::vector<uchar> mask;
	unsigned int inliers, outliers;
	std::vector<unsigned int> score;
	Mat q_img, H;
	int nDeletions;

	q_keyp = kpts;
	q_descr = features;


	// allocate memory
	all_matches.reserve(q_keyp.size());
	filt_matches1.reserve(q_keyp.size());
	q_pts.reserve(q_keyp.size());
	db_pts.reserve(q_keyp.size());
	score.reserve(m_nDBImages);

	// look for all the images ...
	for ( unsigned int nImg = 0; nImg < m_nDBImages; nImg++)
	{

		// clean data
		all_matches.clear();
		filt_matches1.clear();
		q_pts.clear();
		db_pts.clear();

		m_pMatcher->knnMatch(q_descr, m_DbDescr[nImg], all_matches, 2);


		// find correspondences
		for ( it_all_matches = all_matches.begin() ; it_all_matches < all_matches.end(); it_all_matches++ )
		{
			if ( it_all_matches->size() != 2 ) continue;

			if ( feature_type == 1 && do_bin_NNDR==0 ){
				if ( it_all_matches->at(0).distance <= NND_bin_thr )
				{
					filt_matches1.push_back(it_all_matches->at(0));
				}
			}
			else{
				if ( it_all_matches->at(0).distance <= NNDR_ratio * it_all_matches->at(1).distance )
				{
					filt_matches1.push_back(it_all_matches->at(0));
				}
			}
		}


		if( do_ransac ){
			// get the 'good' points from the filtered matches
			for(it_fmatches = filt_matches1.begin(); it_fmatches < filt_matches1.end(); it_fmatches++ )
			{
				q_pts.push_back(q_keyp.at(it_fmatches->queryIdx).pt);
				db_pts.push_back(m_DbKeyp[nImg].at(it_fmatches->trainIdx).pt);
			}
			if ( filt_matches1.size() >= ransac_min_matches )
			{
				// find homography with RANSAC
				H = findHomography(q_pts, db_pts, RANSAC, ransac_threshold, mask);
				// filter query's
				for(inliers = 0, outliers = 0, it_mask = mask.begin(); it_mask < mask.end(); it_mask++)
				{
					*it_mask ? ++inliers : ++outliers;
				}
			}
			else
			{
				inliers = 0;
			}
		}
		else{
			inliers = filt_matches1.size();
		}


		score.push_back(inliers);

		aux_dist.clear();
		for(unsigned int j=0;j<inliers;j++){
			aux_dist.push_back(filt_matches1[j].distance);
		}

		distances.push_back(aux_dist);

	}



	vector<unsigned int>::iterator max = max_element(score.begin(),score.end());
	return score;
}
/*
 * @function surf_feature_detect_RANSAC SURF特征提取及匹配,RANSAC错误点消除以及物体标记
 * @return null
 * @method SURF feature detector
 * @method SURF descriptor
 * @method findFundamentalMat RANSAC错误去除
 * @method findHomography 寻找透视变换矩阵
 */
void surf_feature_detect_bruteforce_RANSAC_Homography(Mat SourceImg, Mat SceneImg, Mat imageMatches, char* string)
{
	vector<KeyPoint> keyPoints1, keyPoints2;
	SurfFeatureDetector detector(400);
	detector.detect(SourceImg, keyPoints1); //标注原图特征点
	detector.detect(SceneImg, keyPoints2); //标注场景图特征点

	SurfDescriptorExtractor surfDesc;
	Mat SourceImgDescriptor, SceneImgDescriptor;
	surfDesc.compute(SourceImg, keyPoints1, SourceImgDescriptor); //描述原图surf特征点
	surfDesc.compute(SceneImg, keyPoints2, SceneImgDescriptor); //描述场景图surf特征点

	//计算两张图片的特征点匹配数
	BruteForceMatcher<L2<float>>matcher;
	vector<DMatch> matches;
	matcher.match(SourceImgDescriptor, SceneImgDescriptor, matches);
	std::nth_element(matches.begin(), matches.begin() + 29 ,matches.end());
	matches.erase(matches.begin() + 30, matches.end());

	//FLANN匹配检测算法
	//vector<DMatch> matches;
	//DescriptorMatcher *pMatcher = new FlannBasedMatcher;
	//pMatcher->match(SourceImgDescriptor, SceneImgDescriptor, matches);
	//delete pMatcher;

	//keyPoints1 图1提取的关键点
	//keyPoints2 图2提取的关键点
	//matches 关键点的匹配
	int ptCount = (int)matches.size();
	Mat p1(ptCount, 2, CV_32F);
	Mat p2(ptCount, 2, CV_32F);
	Point2f pt;
	for(int i = 0; i < ptCount; i++)
	{
		pt = keyPoints1[matches[i].queryIdx].pt;
		p1.at<float>(i, 0) = pt.x;
		p1.at<float>(i, 1) = pt.y;

		pt = keyPoints2[matches[i].trainIdx].pt;
		p2.at<float>(i, 0) = pt.x;
		p2.at<float>(i, 1) = pt.y;
	}
	Mat m_Fundamental;
	vector<uchar> m_RANSACStatus;
	m_Fundamental = findFundamentalMat(p1, p2, m_RANSACStatus, FM_RANSAC);
	int OutlinerCount = 0;
	for(int i = 0; i < ptCount; i++)
	{
		if(m_RANSACStatus[i] == 0)
		{
			OutlinerCount++;
		}
	}

	// 计算内点
	vector<Point2f> m_LeftInlier;
	vector<Point2f> m_RightInlier;
	vector<DMatch> m_InlierMatches;

	// 上面三个变量用于保存内点和匹配关系
	int InlinerCount = ptCount - OutlinerCount;
	m_InlierMatches.resize(InlinerCount);
	m_LeftInlier.resize(InlinerCount);
	m_RightInlier.resize(InlinerCount);
	InlinerCount = 0;
	for (int i=0; i<ptCount; i++)
	{
		if (m_RANSACStatus[i] != 0)
		{
			m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
			m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
			m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
			m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
			m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
			m_InlierMatches[InlinerCount].trainIdx = InlinerCount;
			InlinerCount++;
		}
	}

	// 把内点转换为drawMatches可以使用的格式
	vector<KeyPoint> key1(InlinerCount);
	vector<KeyPoint> key2(InlinerCount);
	KeyPoint::convert(m_LeftInlier, key1);
	KeyPoint::convert(m_RightInlier, key2);

	//显示计算F过后的内点匹配
	drawMatches(SourceImg, key1, SceneImg, key2, m_InlierMatches, imageMatches);
	//drawKeypoints(SourceImg, key1, SceneImg, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	
	vector<Point2f> obj;
	vector<Point2f> scene;
	for(unsigned int i = 0; i < m_InlierMatches.size(); i++)
	{
		obj.push_back(key1[m_InlierMatches[i].queryIdx].pt); //查询图像,即目标图像的特征描述
		scene.push_back(key2[m_InlierMatches[i].trainIdx].pt); //模版图像,即场景图像的特征描述
	}
	//求解变换矩阵
	//作用同getPerspectiveTransform函数,输入原始图像和变换之后图像中对应的4个点,然后建立起变换映射关系,即变换矩阵
	//findHomography直接使用透视平面来找变换公式
	Mat H = findHomography(obj, scene, CV_RANSAC);
	vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(SourceImg.cols, 0);
	obj_corners[2] = cvPoint(SourceImg.cols, SourceImg.rows);
	obj_corners[3] = cvPoint(0, SourceImg.rows);
	vector<Point2f> scene_corners(4);
	//透视变换,将图片投影到一个新的视平面
	//根据以求得的变换矩阵
	perspectiveTransform(obj_corners, scene_corners, H);

	line(imageMatches, scene_corners[0] + Point2f(SourceImg.cols, 0), scene_corners[1] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[1] + Point2f(SourceImg.cols, 0), scene_corners[2] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[2] + Point2f(SourceImg.cols, 0), scene_corners[3] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[3] + Point2f(SourceImg.cols, 0), scene_corners[0] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	imshow(string, imageMatches);

	imwrite("feature_detect.jpg", imageMatches);
}
int main(int argc, char * argv[])
{
    system("bash dataprep.sh");

    #pragma omp parallel for
    for(int n=1; n<argc; n++)
    {
        Mat srcImg=imread(argv[n],CV_LOAD_IMAGE_GRAYSCALE);
        Mat img=imread(argv[n],CV_LOAD_IMAGE_COLOR);
        string str=argv[n];
        cout<<"\n\n processing the image: "<<str<<endl;
        if (!srcImg.data)
        {
            cout<<"failed to load the image!\n";
            // return 0;
            continue;
        }

        for(int i=0; i< srcImg.rows; i++)
        {
            for(int j=0; j<5; j++)
            {
                srcImg.at<uchar>(i,j)=uchar(0);
                srcImg.at<uchar>(i,srcImg.cols-j-1)=uchar(0);
            }
        }

        for(int i=0; i<srcImg.cols; i++)
        {
            for(int j=0; j<5; j++)
            {
                srcImg.at<uchar>(j,i)=uchar(0);
                srcImg.at<uchar>(srcImg.rows-j-1,i)=uchar(0);
            }
        }

        //detect lsd lines in an input image;
        int cols=srcImg.rows;
        int rows=srcImg.cols;
        double*  lsd_srcImg=new double[cols*rows];
        for (int i=0; i<cols; i++)
        {
            for (int j=0; j<rows; j++)
            {
                lsd_srcImg[i+j*cols]=static_cast<double>(srcImg.at<uchar>(i,j));
            }
        }
        double* lsd_dstImg;
        int n=0;
        lsd_dstImg=lsd(&n,lsd_srcImg,cols,rows);

        cout<<"finished the lsd detection!\n";
        vector<LSDline> lsdLine;
        for (int i=0; i<n; i++)
        {
            LSDline lsdLine_tmp;
            lsdLine_tmp.lineBegin.y=lsd_dstImg[i*7+0];
            lsdLine_tmp.lineBegin.x=lsd_dstImg[i*7+1];
            lsdLine_tmp.lineEnd.y=lsd_dstImg[i*7+2];
            lsdLine_tmp.lineEnd.x=lsd_dstImg[i*7+3];
            lsdLine_tmp.width=lsd_dstImg[i*7+4];
            lsdLine_tmp.p=lsd_dstImg[i*7+5];
            lsdLine_tmp.log_nfa=lsd_dstImg[i*7+6];
            lsdLine_tmp.tagBegin=1;
            lsdLine_tmp.tagEnd=1;
            cout<<lsdLine_tmp.lineBegin.x<<" "<<lsdLine_tmp.lineBegin.y<<" "<<lsdLine_tmp.lineEnd.x<<"  "<<lsdLine_tmp.lineEnd.y<<endl;
            float distThreshold=12;
            if(sqrt((lsdLine_tmp.lineBegin.x-lsdLine_tmp.lineEnd.x)*(lsdLine_tmp.lineBegin.x-lsdLine_tmp.lineEnd.x)+
                    (lsdLine_tmp.lineBegin.y-lsdLine_tmp.lineEnd.y)*(lsdLine_tmp.lineBegin.y-lsdLine_tmp.lineEnd.y))>distThreshold)
            {
                lsdLine.push_back(lsdLine_tmp);
            }
        }

        cout<<"the detected lsd lines' number is: "<<lsdLine.size()<<endl;
        //define the img1 to display the detected LSD lines and junctions;
        Mat img1(img.size(),CV_8UC3,Scalar::all(0));
        delete[] lsd_srcImg;


        displayLSDline(lsdLine,img1);
        //imwrite("img1.bmp",img1);

        vector<Ljunct> Jlist;
        vector<LsdJunction> lsdJunction;



        if(LSD2Junct(lsdLine,Jlist,lsdJunction,search_distance,img))
        {
            cout<<"transform successfully!\n";
        }
        else
        {
            cout<<"cannot form L-junctions from LSD lines!\n";
            //for processing, we also need to write the the detect result;
            char c='_';
            int name_end=str.find(c,0);
            string ori_name_tmp1=str.substr(7,name_end-7);
            // char* ch2=".bmp";
            // int location=str.find(ch2,0);
            // string ori_name_tmp1 = str.substr(7,location-7);
            string dst_tmp = "./DetectResultOri/"+ori_name_tmp1;
            string ori_name_tmp="./OrigImg/"+ori_name_tmp1;

            // Mat oriImg_tmp = imread(ori_name_tmp.c_str(),CV_LOAD_IMAGE_COLOR);
            // imwrite(dst_tmp,oriImg_tmp);
            string filestring_tmp="./dstFile/"+str.substr(srcImgDir.size(),str.size()-4)+".jpg.txt";
            ofstream file_out(filestring_tmp.c_str());
            if(!file_out.is_open())
            {
                cout<<"cannot open the txt file!\n";
            }
            string imageName=str.substr(srcImgDir.size(),str.size()-4)+".jpg";
            file_out<<imageName<<"\t"<<img.cols<<"\t"<<img.rows<<endl;

            continue;
        }
        //vector<string> code_string1;
        vector<Ljunct> Jlist_coding;
        vector<codeStringBoundingBox> code_string;
        code_string=encodingFromLsdJunction(lsdJunction, Jlist_coding,srcImg);
        classifyRoadMarking(code_string,srcImg);

        string str_tmp=str.substr(srcImgDir.size(),str.size());
        cout<<"!!!!!the Jlist_coding size is: "<<Jlist_coding.size()<<endl<<endl;
        
        displayLjunct(Jlist_coding,img1,str_tmp);

        DrawBoundingBox(code_string,img,str_tmp);

        //drawing the bounding box in original image;
        char c='_';
        int name_end=str.find(c,0);
      //  string ori_name=str.substr(7,name_end-7);
        
         char* ch=".bmp";
         int location=str.find(ch,0);
         cout<<"the find .bmp in "<<str<<" is in "<<location<<endl;
         string ori_name=str.substr(7,location-7);
        
         cout<<ori_name<<endl;
         string ori_img="./OrigImg/"+ori_name+".JPG";

         Mat oriImg=imread(ori_img.c_str(),CV_LOAD_IMAGE_COLOR);
         if(!oriImg.data)
         {
             cout<<"cannot load the original image!\n";
             //return 0;
             char ch;
             cin.get(ch);
             continue;
         }
         
        /*
        Point2f imgP1=Point2f(219,668);
        Point2f imgP2=Point2f(452,469);
        Point2f imgP3=Point2f(622,472);
        Point2f imgP4=Point2f(882,681);
        Point2f imgP5=Point2f(388,520);
        Point2f imgP6=Point2f(688,523);
        Point2f imgP7=Point2f(454,538);
        Point2f imgP8=Point2f(645,539);
        Point2f imgP9=Point2f(508,486);
        Point2f imgP10=Point2f(573,509);

        Point2f imgP[10]= {imgP1,imgP2,imgP3,imgP4,imgP5,imgP6,imgP7,imgP8,imgP9,imgP10};
        Point2f objP1=Point2f(250,900);
        Point2f objP2=Point2f(250,100);
        Point2f objP3=Point2f(800,100);
        Point2f objP4=Point2f(800,900);
        Point2f objP5=Point2f(250,550);
        Point2f objP6=Point2f(800,550);
        Point2f objP7=Point2f(400,625);
        Point2f objP8=Point2f(650,625);
        Point2f objP9=Point2f(450,300);
        Point2f objP10=Point2f(600,475);



        Point2f objP[10]= {objP1,objP2,objP3,objP4,objP5,objP6,objP7,objP8,objP9,objP10};
        */
        vector<Point2f> imgP;
        imgP.push_back(Point2f(300,450));
        imgP.push_back(Point2f(700,450));
        imgP.push_back(Point2f(465,450));
        imgP.push_back(Point2f(535,450));
        imgP.push_back(Point2f(260,820));
        imgP.push_back(Point2f(740,820));

        vector<Point2f> objP;
        objP.push_back(Point2f(0,0));
        objP.push_back(Point2f(1000,0));
        objP.push_back(Point2f(400,0));
        objP.push_back(Point2f(600,0));
        objP.push_back(Point2f(400,1000));
        objP.push_back(Point2f(600,1000));

        //Mat H=getPerspectiveTransform(objP,imgP);
        Mat H=findHomography(objP,imgP,CV_RANSAC);
        DrawBoundingBox_Ori(code_string,oriImg,ori_name,H,str_tmp);
    }
    return 0;
}
Exemplo n.º 24
0
	void computeHomography(vector<Point2f> &pointSet1, vector<Point2f> &pointSet2, Mat &homography){
		homography = findHomography(Mat(pointSet1), Mat(pointSet2), CV_RANSAC, 10);
	}
Exemplo n.º 25
0
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {
    //get image cv::Pointer
    cv_bridge::CvImagePtr cv_ptr;

    //acquire image frame
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    const std::string filename =  
    "/home/cam/Documents/catkin_ws/src/object_detection/positive_images/wrench.png";

    //read in calibration image
    cv::Mat object = cv::imread(filename, 
      CV_LOAD_IMAGE_GRAYSCALE);

    cv::namedWindow("Good Matches", CV_WINDOW_AUTOSIZE);
    //SURF Detector, and descriptor parameters
    int minHess=2000;
    std::vector<cv::KeyPoint> kpObject, kpImage;
    cv::Mat desObject, desImage;

    //Display keypoints on training image
    cv::Mat interestPointObject=object;

    //SURF Detector, and descriptor parameters, match object initialization
    cv::SurfFeatureDetector detector(minHess);
    detector.detect(object, kpObject);
    cv::SurfDescriptorExtractor extractor;
    extractor.compute(object, kpObject, desObject);
    cv::FlannBasedMatcher matcher;

    //Object corner cv::Points for plotting box
    std::vector<cv::Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );

    double frameCount = 0;
    float thresholdMatchingNN=0.7;
    unsigned int thresholdGoodMatches=4;
    unsigned int thresholdGoodMatchesV[]={4,5,6,7,8,9,10};

    char escapeKey = 'k';

    for (int j=0; j<7;j++)
    {
      thresholdGoodMatches = thresholdGoodMatchesV[j];
      
      while (escapeKey != 'q')
      {
        frameCount++;
        cv::Mat image;
        cvtColor(cv_ptr->image, image, CV_RGB2GRAY);

        cv::Mat des_image, img_matches, H;
        std::vector<cv::KeyPoint> kp_image;
        std::vector<std::vector<cv::DMatch > > matches;
        std::vector<cv::DMatch> good_matches;
        std::vector<cv::Point2f> obj;
        std::vector<cv::Point2f> scene;
        std::vector<cv::Point2f> scene_corners(4);

        detector.detect( image, kp_image );
        extractor.compute( image, kp_image, des_image );
        matcher.knnMatch(desObject, des_image, matches, 2);

        for(int i = 0; i < std::min(des_image.rows-1, (int) matches.size()); i++) 
        //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
          if((matches[i][0].distance < thresholdMatchingNN*(matches[i][1].distance)) 
            && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

        //Draw only "good" matches
        cv::drawMatches(object, kpObject, image, kp_image, good_matches, img_matches, 
          cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), 
          cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
        
        if (good_matches.size() >= thresholdGoodMatches)
        {

          //Display that the object is found
          cv::putText(img_matches, "Object Found", cvPoint(10,50), 0, 2, 
            cvScalar(0,0,250), 1, CV_AA);
            for(unsigned int i = 0; i < good_matches.size(); i++ )
            {
              //Get the keypoints from the good matches
              obj.push_back( kpObject[ good_matches[i].queryIdx ].pt );
              scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            cv::line( img_matches, scene_corners[0] + cv::Point2f( object.cols, 0), 
              scene_corners[1] + cv::Point2f( object.cols, 0), cv::Scalar(0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[1] + cv::Point2f( object.cols, 0), 
              scene_corners[2] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[2] + cv::Point2f( object.cols, 0), 
              scene_corners[3] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[3] + cv::Point2f( object.cols, 0), 
              scene_corners[0] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        }
        else
        {
          putText(img_matches, "", cvPoint(10,50), 0, 3, cvScalar(0,0,250), 1, CV_AA);
        }

        //Show detected matches
        imshow("Good Matches", img_matches);
        
        escapeKey=cvWaitKey(10);

        if(frameCount>10)
        {
          escapeKey='q';
        }


      }

      frameCount=0;
      escapeKey='a';
    }

    // Update GUI Window
    //cv::namedWindow(OPENCV_WINDOW);
    //cv::imshow(OPENCV_WINDOW, cv_ptr->image);
    //cv::waitKey(3);
    
    // Output modified video stream
    image_pub_.publish(cv_ptr->toImageMsg());
  }
Exemplo n.º 26
0
int main()
{
	cv::Mat imCalibColor;	
	cv::Mat imCalibGray;	
	cv::vector<cv::vector<cv::Point> > contours;
	cv::vector<cv::Vec4i> hierarchy;
	cv::vector<cv::Point2f> pointQR;
	cv::Mat imCalibNext;
	cv::Mat imQR;
	cv::vector<cv::Mat> tabQR;
	/*cv::vector<cv::Point2f> corners1;
	cv::vector<cv::Point2f> corners2;
	cv::vector<cv::Point2f> corners3;
	cv::vector<cv::Point2f> corners4;
	cv::vector<cv::Point2f> corners5;*/

	double qualityLevel = 0.01;
	double minDistance = 10;
	int blockSize = 3;
	bool useHarrisDetector = false;
	double k = 0.04;
	int maxCorners = 600;

	int A = 0, B= 0, C= 0;
	char key;
	int mark;
	bool patternFound = false;
	
	cv::VideoCapture vcap("../rsc/capture2.avi");

	for (int i = 1; i < 5; i++)
	{
		std::ostringstream oss;
		oss << "../rsc/QrCodes/QR" << i << ".jpg";
		imQR = cv::imread(oss.str());
		cv::cvtColor(imQR, imQR, CV_BGR2GRAY);
		std::cout<< "Bouh!!!!!!" << std::endl;
		tabQR.push_back(imQR);
	}

	do
	{
		while(imCalibColor.empty())
		{
			vcap >> imCalibColor;
		}
		vcap >> imCalibColor;

		cv::Mat edges(imCalibColor.size(),CV_MAKETYPE(imCalibColor.depth(), 1));
		cv::cvtColor(imCalibColor, imCalibGray, CV_BGR2GRAY);
		Canny(imCalibGray, edges, 100 , 200, 3);

		cv::findContours( edges, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
		
		cv::imshow("pointInteret", imCalibColor);

		mark = 0;

		cv::vector<cv::Moments> mu(contours.size());
  		cv::vector<cv::Point2f> mc(contours.size());

		for( int i = 0; i < contours.size(); i++ )
		{	
			mu[i] = moments( contours[i], false ); 
			mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
		}

		for( int i = 0; i < contours.size(); i++ )
		{
			int k=i;
			int c=0;

			while(hierarchy[k][2] != -1)
			{
				k = hierarchy[k][2] ;
				c = c+1;
			}
			if(hierarchy[k][2] != -1)
			c = c+1;

			if (c >= 5)
			{	
				if (mark == 0)		A = i;
				else if  (mark == 1)	B = i;		// i.e., A is already found, assign current contour to B
				else if  (mark == 2)	C = i;		// i.e., A and B are already found, assign current contour to C
				mark = mark + 1 ;
			}
		} 

		if (A !=0 && B !=0 && C!=0)
		{

			cv::Mat imagecropped = imCalibColor;
			cv::Rect ROI(280/*pointQR[0].x*/, 260/*pointQR[0].y*/, 253, 218);
			cv::Mat croppedRef(imagecropped, ROI);
			cv::cvtColor(croppedRef, imagecropped, CV_BGR2GRAY);
			cv::threshold(imagecropped, imagecropped, 180, 255, CV_THRESH_BINARY);

			pointQR.push_back(mc[A]);
			cv::circle(imCalibColor, cv::Point(pointQR[0].x, pointQR[0].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(mc[B]);
			cv::circle(imCalibColor, cv::Point(pointQR[1].x, pointQR[1].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(mc[C]);
			cv::circle(imCalibColor, cv::Point(pointQR[2].x, pointQR[2].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);

			cv::Point2f D(0.0f,0.0f);
			cv::Point2f E(0.0f,0.0f);
			cv::Point2f F(0.0f,0.0f);

			D.x = (mc[A].x + mc[B].x)/2;
			E.x = (mc[B].x + mc[C].x)/2;
			F.x = (mc[C].x + mc[A].x)/2;

			D.y = (mc[A].y + mc[B].y)/2;
			E.y = (mc[B].y + mc[C].y)/2;
			F.y = (mc[C].y + mc[A].y)/2;

			pointQR.push_back(D);
			cv::circle(imCalibColor, cv::Point(pointQR[3].x, pointQR[3].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(E);
			cv::circle(imCalibColor, cv::Point(pointQR[4].x, pointQR[4].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(F);
			cv::circle(imCalibColor, cv::Point(pointQR[5].x, pointQR[5].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);

			patternFound = true;
			std::cout << "patternfound" << std::endl;
			
			cv::SiftFeatureDetector detector;
			cv::vector<cv::KeyPoint> keypoints1, keypoints2;
			detector.detect(tabQR[3], keypoints1);
			detector.detect(imagecropped, keypoints2);

			cv::Ptr<cv::DescriptorExtractor> descriptor = cv::DescriptorExtractor::create("SIFT");
			cv::Mat descriptors1, descriptors2;
			descriptor->compute(tabQR[3], keypoints1, descriptors1 );
			descriptor->compute(imagecropped, keypoints2, descriptors2 );

			cv::FlannBasedMatcher matcher; 
			std::vector< cv::DMatch > matches; 
			matcher.match( descriptors1, descriptors2, matches ); 
			double max_dist = 0; double min_dist = 100;

			for( int i = 0; i < descriptors1.rows; i++ ) 
			{ 
				double dist = matches[i].distance; 
				if( dist < min_dist ) min_dist = dist; 
				if( dist > max_dist ) max_dist = dist; 
			}

			std::vector< cv::DMatch > good_matches;
			for( int i = 0; i < descriptors1.rows; i++ ) 
				if( matches[i].distance <= 2*min_dist ) 
					good_matches.push_back( matches[i]); 
			cv::Mat imgout; 
			drawMatches(tabQR[3], keypoints1, imagecropped, keypoints2, good_matches, imgout); 

			std::vector<cv::Point2f> pt_img1; 
			std::vector<cv::Point2f> pt_img2; 
			for( int i = 0; i < (int)good_matches.size(); i++ ) 
			{ 
				pt_img1.push_back(keypoints1[ good_matches[i].queryIdx ].pt ); 
				pt_img2.push_back(keypoints2[ good_matches[i].trainIdx ].pt ); 
			}
			cv::Mat H = findHomography( pt_img1, pt_img2, CV_RANSAC );

			cv::Mat result; 
			warpPerspective(tabQR[3],result,H,cv::Size(tabQR[3].cols+imagecropped.cols,tabQR[3].rows)); 
			cv::Mat half(result,cv::Rect(0,0,imagecropped.cols,imagecropped.rows)); 
			imagecropped.copyTo(half); 
			imshow( "Result", result );

			break;
		}

		key = (char)cv::waitKey(67);
	}while(patternFound != true && key != 27);

	if(patternFound)
		imCalibNext = imCalibColor;
	
	return patternFound;

}
Exemplo n.º 27
0
bool ofxFeatureFinder::detectObject(ofxFeatureFinderObject object, cv::Mat &homography) {
    ////////////////////////////
    // NEAREST NEIGHBOR MATCHING USING FLANN LIBRARY (included in OpenCV)
    //////////////////////////
    cv::Mat results;
    cv::Mat dists;
    int k=2; // find the 2 nearest neighbors

    // assume it is CV_32F
    // Create Flann KDTree index
    cv::flann::Index flannIndex(imageDescriptors, cv::flann::KDTreeIndexParams(), cvflann::FLANN_DIST_EUCLIDEAN);
    results = cv::Mat(object.descriptors.rows, k, CV_32SC1); // Results index
    dists = cv::Mat(object.descriptors.rows, k, CV_32FC1); // Distance results are CV_32FC1
        
    // search (nearest neighbor)
    flannIndex.knnSearch(object.descriptors, results, dists, k, cv::flann::SearchParams() );

    
    // Find correspondences by NNDR (Nearest Neighbor Distance Ratio)
    float nndrRatio = 0.6;
    std::vector<cv::Point2f> mpts_1, mpts_2; // Used for homography
    std::vector<int> indexes_1, indexes_2; // Used for homography
    std::vector<uchar> outlier_mask;  // Used for homography
    for(unsigned int i=0; i<object.descriptors.rows; ++i)
    {
        // Check if this descriptor matches with those of the objects
        // Apply NNDR
        if(dists.at<float>(i,0) <= nndrRatio * dists.at<float>(i,1))
        {
            mpts_1.push_back(object.keypoints.at(i).pt);
            indexes_1.push_back(i);
            
            mpts_2.push_back(imageKeypoints.at(results.at<int>(i,0)).pt);
            indexes_2.push_back(results.at<int>(i,0));
        }
    }
    
    // FIND HOMOGRAPHY

    if(mpts_1.size() >= minMatchCount)
    {
        homography = findHomography(mpts_1, mpts_2, cv::RANSAC, 1.0, outlier_mask);

        uint inliers=0, outliers=0;
        for(unsigned int k=0; k<mpts_1.size(); ++k)
        {
            if(outlier_mask.at(k))
            {
                ++inliers;
            }
            else
            {
                ++outliers;
            }
        }

        cout << inliers << " in / " << outliers << " out" << endl;
        
        return true;
    }
    
    return false;
}
Exemplo n.º 28
0
pcl::PointCloud<briskDepth> BDMatch(pcl::PointCloud<briskDepth> a, pcl::PointCloud<briskDepth> b)
{
    std::cout << "The count is: " << count << std::endl;
    pcl::PointCloud<briskDepth> pclMatch;
    try
    {
        cv::Mat descriptorsA;
        cv::Mat descriptorsB;
        for(int i =0; i < a.size(); i++)
        {
            descriptorsA.push_back(a[i].descriptor);
        }

        for(int i =0; i < b.size(); i++)
        {
            descriptorsB.push_back(b[i].descriptor);
        }

        cv::BFMatcher matcher(cv::NORM_HAMMING);
        std::vector< cv::DMatch > matches;

        matcher.match( descriptorsA, descriptorsB, matches );

        double max_dist = 0; double min_dist = 1000;

        StdDeviation sd;
        double temp[descriptorsA.rows];

        for (int i =0; i < descriptorsA.rows;i++)
        {
            double dist = matches[i].distance;
            if(max_dist<dist) max_dist = dist;
            if(min_dist>dist) min_dist = dist;
            //std::cout << dist << "\t";
            temp[i] = dist;
        }

        //std::cout << std::endl;
       // std::cout << " Brisk max dist " << max_dist << std::endl;
       // std::cout << " Brisk mins dist " << min_dist << std::endl;

        sd.SetValues(temp, descriptorsA.rows);

        double mean = sd.CalculateMean();
        double variance = sd.CalculateVariane();
        double samplevariance = sd.CalculateSampleVariane();
        double sampledevi = sd.GetSampleStandardDeviation();
        double devi = sd.GetStandardDeviation();

        std::cout << "Brisk\t" << descriptorsA.rows << "\t"
                << mean << "\t"
                << variance << "\t"
                << samplevariance << "\t"
                << devi << "\t"
                << sampledevi << "\n";

        std::vector< cv::DMatch > good_matches;

        for (int i=0;i<descriptorsA.rows;i++)
        {
            //if( matches[i].distance<10)
            if( matches[i].distance<max_dist/2)
            {
                good_matches.push_back(matches[i]);
                pclMatch.push_back(a[i]);
            }
        }

        cv::Mat img_matches;
        cv::drawMatches( brisk_lastImg, brisk_lastKeypoints, brisk_currentImg, brisk_lastKeypoints,
                           good_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
                           std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

       // cv::imshow("Brisk Matches", img_matches);
        std::vector<cv::Point2f> obj;
        std::vector<cv::Point2f> scene;

        for( int i = 0; i < good_matches.size(); i++ )
        {
          //-- Get the keypoints from the good matches
          obj.push_back( brisk_lastKeypoints[ good_matches[i].queryIdx ].pt );
          scene.push_back( brisk_currentKeypoints[ good_matches[i].trainIdx ].pt );
        }

        cv::Mat H = findHomography( obj, scene, CV_RANSAC );

        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector<cv::Point2f> obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( brisk_lastImg.cols, 0 );
        obj_corners[2] = cvPoint( brisk_lastImg.cols, brisk_lastImg.rows ); obj_corners[3] = cvPoint( 0, brisk_lastImg.rows );
        std::vector<cv::Point2f> scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);

        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        line( img_matches, scene_corners[0] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[1] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar(0, 255, 0), 4 );
        line( img_matches, scene_corners[1] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[2] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        line( img_matches, scene_corners[2] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[3] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        line( img_matches, scene_corners[3] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[0] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );

        //-- Show detected matches
        cv::imshow( "Good brisk Matches & Object detection", img_matches );
        cv::waitKey(50);
       // std::cout << good_matches.size() << " Brisk features matched from, " << a.size() << ", " << b.size() << " sets." << std::endl;
    }
    catch (const std::exception &exc)
    {
        // catch anything thrown within try block that derives from std::exception
        std::cerr << exc.what();
    }
    return pclMatch;
}
Exemplo n.º 29
0
bool orbMatch(cv::Mat& inImageScene, cv::Mat& inImageObj, cv::Rect& outBoundingBox, unsigned int inMinMatches=2, float inKnnRatio=0.7)
{
	//vector of keypoints
	std::vector< cv::KeyPoint > keypointsO;
	std::vector< cv::KeyPoint > keypointsS;

	cv::Mat descriptors_object, descriptors_scene;

	cv::Mat outImg;
	inImageScene.copyTo(outImg);

	//-- Step 1: Extract keypoints
	cv::OrbFeatureDetector orb(ORB_NUM_FEATURES);
	orb.detect(inImageScene, keypointsS);
	if (keypointsS.size() < ORB_MIN_MATCHES)
	{
		//cout << "Not enough keypoints S, object not found>" << keypointsS.size() << endl;
		return false;
	}
	orb.detect(inImageObj, keypointsO);
	if (keypointsO.size() < ORB_MIN_MATCHES)
	{
		//cout << "Not enough keypoints O, object not found>" << keypointsO.size() << endl;
		return false;
	}

	//Calculate descriptors (feature vectors)
	cv::OrbDescriptorExtractor extractor;
	extractor.compute(inImageScene, keypointsS, descriptors_scene);
	extractor.compute(inImageObj, keypointsO, descriptors_object);

	//Matching descriptor vectors using FLANN matcher
	cv::BFMatcher matcher;
	//descriptors_scene.size(), keypointsO.size(), keypointsS.size();
	std::vector< std::vector< cv::DMatch >  > matches;
	matcher.knnMatch(descriptors_object, descriptors_scene, matches, 2);
	std::vector< cv::DMatch > good_matches;
	good_matches.reserve(matches.size());

	for (size_t i = 0; i < matches.size(); ++i)
	{
		if (matches[i].size() < 3)
			continue;

		const cv::DMatch &m1 = matches[i][0];
		const cv::DMatch &m2 = matches[i][1];

		if (m1.distance <= inKnnRatio * m2.distance)
			good_matches.push_back(m1);
	}

	if ((good_matches.size() >= inMinMatches))
	{
		std::vector< cv::Point2f > obj;
		std::vector< cv::Point2f > scene;

		for (unsigned int i = 0; i < good_matches.size(); i++)
		{
			// Get the keypoints from the good matches
			obj.push_back(keypointsO[good_matches[i].queryIdx].pt);
			scene.push_back(keypointsS[good_matches[i].trainIdx].pt);
		}

		cv::Mat H = findHomography(obj, scene, CV_RANSAC);

		// Get the corners from the image_1 ( the object to be "detected" )
		std::vector< cv::Point2f > obj_corners(4);
		obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(inImageObj.cols, 0);
		obj_corners[2] = cvPoint(inImageObj.cols, inImageObj.rows); obj_corners[3] = cvPoint(0, inImageObj.rows);
		std::vector< cv::Point2f > scene_corners(4);

		perspectiveTransform(obj_corners, scene_corners, H);

		// Draw lines between the corners (the mapped object in the scene - image_2 )
		line(outImg, scene_corners[0], scene_corners[1], cv::Scalar(255, 0, 0), 2); //TOP line
		line(outImg, scene_corners[1], scene_corners[2], cv::Scalar(255, 0, 0), 2);
		line(outImg, scene_corners[2], scene_corners[3], cv::Scalar(255, 0, 0), 2);
		line(outImg, scene_corners[3], scene_corners[0], cv::Scalar(255, 0, 0), 2);

		//imshow("Scene", outImg);
		//imshow("Obj", inImageObj);
		//cvWaitKey(5);

		return true;
	}

	return false;
}
std::pair<Mat, Mat> 
ImageRegistrator::registerImages(ImageList inputImages, int resizeFactor, int cornersAmount)
{
    Scaller                     scaller;
    MatrixList                  homographies;
    ImageList::const_iterator   selectedImage = inputImages.begin();
    ImageList::iterator         nthImage = inputImages.begin();
    PointVector                 selectedImageCorners(cornersAmount);
    PointVector                 nthImageCorners(cornersAmount);
    std::vector<uchar>          status(cornersAmount);
    std::vector<float>          error(cornersAmount);

    goodFeaturesToTrack(
            *selectedImage,
            selectedImageCorners,
            cornersAmount,
            0.01,
            1);

    cv::cornerSubPix(
            *selectedImage,
            selectedImageCorners,
            Size(5, 5),
            Size(-1, -1),
            TermCriteria(
                TermCriteria::COUNT + TermCriteria::EPS,
                6000,
                0.001)
            );


    for (; nthImage != inputImages.end(); ++nthImage) {
        if (nthImage != selectedImage) {
            calcOpticalFlowPyrLK(
                    *selectedImage,
                    *nthImage,
                    selectedImageCorners,
                    nthImageCorners,
                    status,
                    error);

            PointVector selImgCor = removeBadPoints(selectedImageCorners, status);
            PointVector nthImgCor = removeBadPoints(nthImageCorners, status);

            Mat H = findHomography(
                    selImgCor,
                    nthImgCor,
                    CV_RANSAC,
                    0.1);

            if (cv::norm(Point2f(H.at<double>(0,2), H.at<double>(1,2))) > 2) {
                nthImage->release();
                continue;
            }

            roundMatrixCoefficients(H, resizeFactor);
            homographies.push_back(H);
        }
    }  
    
    inputImages.erase(std::remove_if(inputImages.begin(),
                inputImages.end(),
                ImageRegistrator::ImageRemPred()),
            inputImages.end());
        
    inputImages = scaller.upscaleImages(inputImages, resizeFactor); 
    MatrixList::iterator h = homographies.begin();

    for (nthImage = inputImages.begin(); nthImage != inputImages.end(); ++nthImage) {
        if (nthImage != selectedImage) {
            util::printMatrix(*h, 12);
            warpPerspective(
                    nthImage->clone(),
                    *nthImage,
                    *h,
                    nthImage->size(),
                    cv::INTER_NEAREST | cv::WARP_INVERSE_MAP);
            ++h;
        }
    }

    Mat                 output(selectedImage->size(), selectedImage->type());
    std::list<uchar>    pixelValues;
    Mat                 medianWeights(output.size(), output.type());
    
    for (int i = 0; i < selectedImage->rows ; ++i) {
        for (int j = 0; j < selectedImage->cols; ++j) {

            for (nthImage = inputImages.begin(); nthImage != inputImages.end(); ++nthImage) {
                uchar value = (*nthImage).at<uchar>(i,j);

                if (value != 0)  {
                    pixelValues.push_back(value);
                }
            }

            if ( !pixelValues.empty() ) {
                output.at<uchar>(i,j) = produceMedian(pixelValues);
                medianWeights.at<uchar>(i,j) = 
                    static_cast<uchar>(std::sqrt(pixelValues.size()));
            }

            pixelValues.clear();
        }
    }

    std::cout << "pixel covreage : " << pixelCovreage(output) << std::endl;

    Mat fullMedian(output.size(), output.type());
    cv::medianBlur(output, fullMedian, 1);

    for (int i = 0; i < output.rows ; ++i) {
        for (int j = 0; j < output.cols; ++j) {
            if (output.at<uchar>(i,j) == 0) {
                output.at<uchar>(i,j) = fullMedian.at<uchar>(i,j);
            }
        }
    }
    util::printImage(output, std::string("tada"));

    return std::pair<Mat, Mat>(output, medianWeights);
}