コード例 #1
0
	// prevFrame: Gray-scale frame of previous frame
	// grayFrame: Grau-scale frame of current frame
	// points: input and output points to track
	// h**o: estimated homography matrix for current frame to previous frame
	void estimateHomography(const SoccerPitchData &data, Mat &prevFrame, Mat &grayFrame, vector<Point2f> &points, Mat &currToKeyTrans, Mat &keyToTopTrans, bool &initialized) {
		Mat topToCurrTrans;
		invert(keyToTopTrans * currToKeyTrans, topToCurrTrans);
		vector<Point2f> trackedPitchPoints;
		perspectiveTransform(data.pitchPoints, trackedPitchPoints, topToCurrTrans);		

		// delete points that are outside of image
		vector<Point2f> pitchPoints;
		removeOutsiders(data, trackedPitchPoints, pitchPoints);
		
		if (!prevFrame.empty()) {
			// relocate corners
			if (frameCounter >= 60) {
				if (frameCounter == 60) {
					relocatedCorners.clear();
					relocatedPitchPoints.clear();
					reprojErr = vector<double>(28, DBL_MAX);
				}
				cout<<"relocating corners"<<endl;
				findGoodCorners2(grayFrame, data, currToKeyTrans, keyToTopTrans);
				if (!relocatedCorners.empty())
					perspectiveTransform(relocatedCorners, relocatedCorners, currToPrevTrans);
				if (relocatedCorners.size() >= 6) {
					// recalib h**o
					if (relocatedCorners.size() == 12)
						cout<<"stop"<<endl;
					keyToTopTrans = findHomography(relocatedCorners, relocatedPitchPoints, RANSAC, 1);
					currToKeyTrans = Mat::eye(Size(3, 3), CV_64FC1);
					trackedPitchPoints = relocatedCorners;
					pitchPoints = relocatedPitchPoints;
					frameCounter = 0;
					relocateCounter = 0;
				}
				else{
					frameCounter++;
					relocateCounter++;
				}
			}
			else 
				frameCounter++;

			cout<<"doing optical flow tracking"<<endl; 
			calcOpticalFlowPyrLK(prevFrame, grayFrame, trackedPitchPoints, currPts, status, err, Size(21, 21), 3);
			
			currToPrevTrans = findHomography(currPts, trackedPitchPoints, RANSAC, 1);
			currToKeyTrans = currToPrevTrans * currToKeyTrans;

			vector<Point2f> keyPts;
			perspectiveTransform(currPts, keyPts, currToKeyTrans);
			Mat temp = findHomography(keyPts, pitchPoints, RANSAC, 1);
			if (!temp.empty())
				keyToTopTrans = temp.clone();
			else
				initialized = false;
			points = currPts;		
		}
		prevFrame = grayFrame;
	}
コード例 #2
0
ファイル: training.cpp プロジェクト: asolis/detection3D
void Train::projectKeypoints(const vector<KeyPoint> &original, const MatExpr M, vector<Point> &transformedPoints)
{
    Mat keypointMatIn = keyPoint2Mat(original);
    Mat keypointMatOut;
    perspectiveTransform(keypointMatIn, keypointMatOut, M);
    transformedPoints = mat2Points(keypointMatOut);
}
コード例 #3
0
void CustomPattern::check_matches(vector<Point2f>& matched, const vector<Point2f>& pattern, vector<DMatch>& good,
                                  vector<Point3f>& pattern_3d, const Mat& H)
{
    vector<Point2f> proj;
    perspectiveTransform(pattern, proj, H);

    int deleted = 0;
    double error_sum = 0;
    double error_sum_filtered = 0;
    for (uint i = 0; i < proj.size(); ++i)
    {
        double error = norm(matched[i] - proj[i]);
        error_sum += error;
        if (error >= MAX_PROJ_ERROR_PX)
        {
            deleteStdVecElem(good, i);
            deleteStdVecElem(matched, i);
            deleteStdVecElem(pattern_3d, i);
            ++deleted;
        }
        else
        {
            error_sum_filtered += error;
        }
    }
}
コード例 #4
0
bool CameraProjections::GetOnRealCordinate(const vector<Point> contour,
		vector<Point2f> &resCountour)
{
	if (contour.size() < 1)
	{
		ROS_ERROR("Error In Programming");
		return false;
	}

	vector<Point> resC;
	if (!_distorionModel.UndistortP(contour, resC))
	{
		return false;
	}

	std::vector<Point2f> resCD;
	for (uint32_t i = 0; i < resC.size(); i++)
	{
		resCD.push_back(Point2f(resC[i].x, resC[i].y));
	}
	perspectiveTransform(resCD, resCD, realHomoFor);
	for (uint32_t i = 0; i < resCD.size(); i++)
	{
		double y = (resCD[i].y * params.topView.scale->get())
				- ((params.topView.width->get() / 2.));
		double x = (resCD[i].x * params.topView.scale->get())
				- ((params.topView.width->get() / 2.));

		resCountour.push_back(Point2f(x / 100., y / 100.));
	}
	return true;
}
コード例 #5
0
ファイル: transformation.cpp プロジェクト: 0x6e3078/openalpr
  vector<Point2f> Transformation::remapSmallPointstoCrop(vector<Point2f> smallPoints, cv::Mat transformationMatrix)
  {
    vector<Point2f> remappedPoints;
    perspectiveTransform(smallPoints, remappedPoints, transformationMatrix);

    return remappedPoints;
  }
コード例 #6
0
ファイル: main.cpp プロジェクト: T-Jin/SoccerVideoProcessing
void mouse(int button, int state, int x, int y) {
	if (initialized == false){
		if ((button == GLUT_LEFT_BUTTON) && (state == GLUT_DOWN)) {
			userAnnotations.push_back(Point2f(x, y));
		} 
		else if ((button == GLUT_LEFT_BUTTON) && (state == GLUT_UP)) {
			userPitchPoints.push_back(Point2f(x, y));
			cout<<userAnnotations.size()<<" point pair(s) entered."<<endl;
		}
	} 
	else{
		if ( button == GLUT_LEFT_BUTTON && state == GLUT_DOWN ){
			//cout<<x<<"--"<<y<<endl;

			Mat topToCurrTrans;
			invert(keyToTopTrans * currToKeyTrans, topToCurrTrans);

			vector<Point2f> augmentedPoint;
			perspectiveTransform(vector<Point2f>(1, Point2f((float) x, (float) y)),
				augmentedPoint, topToCurrTrans);

			playerAnnotation = augmentedPoint[0];
			//vector<KeyPoint> filteredPoints;

			Mat binaryImage;
			threshold(grayFrame, binaryImage, thres, 255,THRESH_BINARY);

			prevRect = closeRect(binaryImage, playerAnnotation, x_range, y_range);
			Mat watch = rawFrame;
			rectangle(rawFrame, prevRect[0], prevRect[1], Scalar(255, 255, 0));
			rectangle(rawFrame, Point(playerAnnotation.x - x_range,playerAnnotation.y - y_range), Point(playerAnnotation.x+x_range,playerAnnotation.y+y_range), Scalar(255, 0, 255));
		}
	}
}
コード例 #7
0
/* check calculated homography
 * calculate a aligned corners by using homography
 * calculate aligned rows and cols
 * if dfference over the 0.1 set to  false
 **/
bool AlignmentMatrixCalc::isHomographyValid()
{
    std::vector<cv::Point2f> inputCorners(4);
    inputCorners[0] = cvPoint(0,0);
    inputCorners[1] = cvPoint( prevFrame.cols, 0 );
    inputCorners[2] = cvPoint( prevFrame.cols, prevFrame.rows );
    inputCorners[3] = cvPoint( 0, prevFrame.rows );
    std::vector<cv::Point2f> alignedCorners(4);

    perspectiveTransform( inputCorners, alignedCorners, homography);

    float upDeltaX = fabs(alignedCorners[0].x-alignedCorners[1].x);
    float downDeltaX = fabs(alignedCorners[2].x-alignedCorners[3].x);
    float upDeltaY = fabs(alignedCorners[0].y-alignedCorners[3].y);
    float downDeltaY = fabs(alignedCorners[1].y-alignedCorners[2].y);
    float alignedCols=(upDeltaX+downDeltaX)/2;
    float alignedRows=(upDeltaY+downDeltaY)/2;
    float colsDifference=fabs(alignedCols - prevFrame.cols) / prevFrame.cols;
    float rowsDifference=fabs(alignedRows - prevFrame.rows) / prevFrame.rows;

    if( colsDifference < 0.1 && rowsDifference < 0.1 )
    {
       isHomographyCalc = true;
    }
    else
    {
       isHomographyCalc = false;
       qDebug()<<"Homography Matrix is Invalid : "<<colsDifference<<" "<<rowsDifference ;

    }

    return isHomographyCalc;

}
コード例 #8
0
ファイル: ARmsk.cpp プロジェクト: annie2008hi/armsk
	void transformPoints(Mat &HMatrix, vector<Point2f> &srcPoints, vector<Point2f> &dstPoints ){
		Mat transformedPoints;
		perspectiveTransform(Mat(srcPoints), transformedPoints, HMatrix);
		
		for (size_t i = 0; i < srcPoints.size(); i++)
			dstPoints.push_back(transformedPoints.at<Point2f> (i, 0));
	}
コード例 #9
0
bool CameraProjections::GetOnImageCordinate(const vector<Point2f> contour,
		vector<Point> &resCountour)
{
	if (contour.size() < 1)
	{
		ROS_ERROR("Error In Programming");
		return false;
	}
	vector<Point2f> resC, resCountourd, contourShiftAndScale;
	vector<Point> resCI;

	for (uint32_t i = 0; i < contour.size(); i++)
	{
		Point2f f = Point2d(contour[i].x * 100., contour[i].y * 100.);
		f = Point2d(
				f.x / params.topView.scale->get()
						+ ((params.topView.width->get()
								/ params.topView.scale->get()) / 2.),
				(f.y / params.topView.scale->get())
						+ ((params.topView.width->get()
								/ params.topView.scale->get()) / 2.));
		contourShiftAndScale.push_back(f);
	}

	perspectiveTransform(contourShiftAndScale, resC, realHomoBack);

	for (uint32_t i = 0; i < resC.size(); i++)
	{
		resCI.push_back(Point((int) resC[i].x, (int) resC[i].y));
	}
	return _distorionModel.DistortP(resCI, resCountour);

}
コード例 #10
0
	void findGoodCorners2(const Mat &grayFrame, const SoccerPitchData &data, Mat &currToKeyTrans, Mat &keyToTopTrans) {
		Mat topToCurrTrans;
		invert(keyToTopTrans * currToKeyTrans, topToCurrTrans);
		vector<Point2f> imagePitchOuterContour;
		perspectiveTransform(data.pitchOuterPoints, imagePitchOuterContour, topToCurrTrans);

		vector<Point2f> hull;
		convexHull(imagePitchOuterContour, hull);

		Mat mask = Mat::zeros(frameSize, CV_8UC1);
		fillConvexPoly(mask, vector<Point>(hull.begin(), hull.end()), Scalar(1, 0, 0));

		dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(3, 3)));

		Mat bin;
		adaptiveThreshold(grayFrame, bin, 255, ADAPTIVE_THRESH_MEAN_C , THRESH_BINARY, 5, -10);
		
		vector<Point2f> candidateCorners;
		goodFeaturesToTrack(bin, candidateCorners, 100, 0.01, 24, mask);

		cornerSubPix(bin, candidateCorners, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS & CV_TERMCRIT_ITER, 40, 0.001));

		vector<Point2f> goodPoints;
		for (Point2f corner : candidateCorners) {
			if (goodCornerCheck(corner, bin) && !closeToBoundary(corner))
				goodPoints.push_back(corner);
		}

		if (goodPoints.size() > 0) {
			vector<Point2f> reprojGoodPoints;
			perspectiveTransform(goodPoints, reprojGoodPoints, keyToTopTrans * currToKeyTrans);
			// try to add these new corners into the relocatedCorners
			for (int i = 0; i < reprojGoodPoints.size(); i++) {
				// if does not exists already and coincide with reproj of 28 points
				bool exists = hasSimilarPoint(relocatedPitchPoints, reprojGoodPoints[i], 10) ;
				int minId = findClosestPoint(data.pitchPoints, reprojGoodPoints[i]);
				double minDist = norm(reprojGoodPoints[i] - data.pitchPoints[minId]);
				if ((!exists ) && (minDist < 16) && (minDist < reprojErr[minId])) {
					relocatedCorners.push_back(goodPoints[i]);
					relocatedPitchPoints.push_back(data.pitchPoints[minId]);
					reprojErr[minId] = minDist;
				}
			}
		}

		cout<<relocatedCorners.size()<<" points relocated"<<endl;
	}
コード例 #11
0
ファイル: screendetector.cpp プロジェクト: DrawTable/GUI
Point ScreenDetector::transformPoint(Point point, Mat transformMatrix)
{
    vector<Point2f> src;
    vector<Point2f> dst;
    src.push_back(point);
    perspectiveTransform(src, dst, transformMatrix);
    return dst.at(0);
}
コード例 #12
0
// Draw the current bounding box for the painting by transforming
// the reference points using the provided homography.
// The reference_bounds_ variable contains the locations of the four
// corners in the reference coordinate frame.
// Make sure you update the current_bounds_ variable!
void Augmentor::render_bounds(ColorImage& frame, const Homography& H) {
    perspectiveTransform(reference_bounds_, current_bounds_, H);
    cv::line(frame, current_bounds_[0], current_bounds_[1], cv::Scalar(0, 255, 0), 2);
    cv::line(frame, current_bounds_[1], current_bounds_[2], cv::Scalar(0, 255, 0), 2);
    cv::line(frame, current_bounds_[2], current_bounds_[3], cv::Scalar(0, 255, 0), 2);
    cv::line(frame, current_bounds_[3], current_bounds_[0], cv::Scalar(0, 255, 0), 2);
    
}
コード例 #13
0
void CameraPoseOptimization::computeCorrespondenceBetweenTwoFrames(
	vector<cv::KeyPoint>& keypointsInCurrFrame,
	cv::Mat& descriptorsInCurrFrame,
	vector<DMatch>& filteredMatches,
	vector<char>& matchesMask)
{
	FastFeatureDetector detector;
	BriefDescriptorExtractor extractor;

	// Match the descriptors of the two images through cross-checking
	BFMatcher matcher(NORM_L2);
	crossCheckMatching(matcher, m_descriptorsInLastFrame, descriptorsInCurrFrame, filteredMatches);

	// Given the corresponding keypoint pairs of two images, compute the homography matrix,
	// which can be treated as a 3D transformation matrix between two images.
	vector<int> queryIdxs(filteredMatches.size()), trainIdxs(filteredMatches.size());
	for (size_t i = 0; i < filteredMatches.size(); i++)
	{
		queryIdxs[i] = filteredMatches[i].queryIdx;
		trainIdxs[i] = filteredMatches[i].trainIdx;
	}
	vector<Point2f> points1, points2;
	KeyPoint::convert(m_keypointsInLastFrame, points1, queryIdxs);
	KeyPoint::convert(keypointsInCurrFrame, points2, trainIdxs);
	Mat H12 = findHomography(Mat(points1), Mat(points2), CV_RANSAC, g_thresholdRansacReproj);

	//std::cout << H12.cols << "," << H12.rows << std::endl;
	//for (int i = 0; i != H12.rows; ++i)
	//{
	//	for (int j = 0; j != H12.cols; ++j)
	//	{
	//		std::cout << H12.at<double>(i,j) << " ";
	//	}
	//	std::cout << std::endl;
	//}
	//

	// For each keypoint A in the first image, perform the homography matrix on it to get its corresponding
	// transformed point B in the second image. If B is equal to A's corresponding keypoint computed by 
	// the cross-checking method, then treat this pair of points as reasonable by putting its flag in 
	// the variable "matchesMask" as 1. Otherwise, treat the pair as unreasonable by putting its flag in 
	// the variable "matchesMask" as 0.
	matchesMask.clear();
	matchesMask.resize(filteredMatches.size(), 1);

	if (!H12.empty())
	{
		Mat points1t;
		perspectiveTransform(Mat(points1), points1t, H12);
		for (size_t i1 = 0; i1 < points1.size(); i1++)
		{
			// If the distance between A's transformed point A1 and its corresponding point B is too large, 
			// then the correspondence pair (A,B) is unreasonable.
			if (norm(points2[i1] - points1t.at<Point2f>((int)i1, 0)) > g_thresholdRansacReproj)
				matchesMask[i1] = 0; // mask 0 for unreasonable pair
		}
	}
}
コード例 #14
0
ファイル: Recognition.cpp プロジェクト: GiulioGx/ChessMate
vector<Point2f> Recognition::projectPointsBack(vector<Point2f> mesh, Mat& originalImage, Mat homography){
	vector<Point2f> reprojectedPoints;
	Mat homoInverse=homography.inv();
	perspectiveTransform(mesh, reprojectedPoints, homoInverse);
	/*	for(unsigned int i=0; i< mesh.size(); i++){
		circle(originalImage, reprojectedPoints[i], 1, Scalar(0,0,255), 2);
	}*/
	return reprojectedPoints;
}
コード例 #15
0
ファイル: RelicScn.cpp プロジェクト: ping28198/TMTVision
bool RelicScn::Match_an_Obj(RelicObj obj)
{
	string message;

	FlannBasedMatcher matcher;
	vector<DMatch> matches;

	matcher.match(obj.descriptors, this->descriptors, matches);
	vector<DMatch> good_matches = Get_Good_Matches(matches);

	//-- Localize the object
	std::vector<Point2f> obj_points;
	std::vector<Point2f> scn_points;
	for (size_t i = 0; i < good_matches.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj_points.push_back(obj.keypoints[good_matches[i].queryIdx].pt);
		scn_points.push_back(this->keypoints[good_matches[i].trainIdx].pt);
	}
	Mat H = cv::findHomography(obj_points, scn_points, RANSAC);

	std::vector<Point2f> obj_corners(4);
	
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(obj.img_width-1, 0);
	obj_corners[2] = cvPoint(obj.img_width-1, obj.img_height-1);
	obj_corners[3] = cvPoint(0, obj.img_height-1);

	std::vector<Point2f> possible_obj_corners(4);
	perspectiveTransform(obj_corners, possible_obj_corners, H);
	BOOST_LOG_TRIVIAL(info) << "原始目标物体大小(像素): " << contourArea(obj_corners);
	BOOST_LOG_TRIVIAL(info) << "检测到的物体大小(像素): " << contourArea(possible_obj_corners);
	this->corners = possible_obj_corners;
	double possible_target_area = contourArea(possible_obj_corners);
	double whole_scene_area = this->img_gray.rows*this->img_gray.cols;
	BOOST_LOG_TRIVIAL(info) << "环境图像大小(像素): " << whole_scene_area;
	double ratio = possible_target_area / whole_scene_area;
	BOOST_LOG_TRIVIAL(info) << "检测到的目标占全图比例: " << ratio;
	if (ratio>0.03 && ratio<1)
	{
		for (int i;i < possible_obj_corners.size();i++)
		{
			if (possible_obj_corners[i].x < 0 || possible_obj_corners[i].y < 0)
			{
				BOOST_LOG_TRIVIAL(info) << "未能检测到目标物体!";
				return false;
			}
		}
		BOOST_LOG_TRIVIAL(info) << "成功检测到目标物体!";
		return true;
	} 
	else
	{
		BOOST_LOG_TRIVIAL(info) << "未能检测到目标物体!";
		return false;
	}
}
コード例 #16
0
 Transform TransformationBuilder::getTransform(const Mat& homography, const vector<Point2f>& objectCorners, const Size2f& imageSize)
 {
     vector<Point2f> objectCornersTransformed;
     perspectiveTransform(objectCorners, objectCornersTransformed, homography);
     
     Transform result;
     result.translation = getTranslation(objectCornersTransformed, imageSize);
     result.scale = getScale(objectCornersTransformed, objectCorners);
     result.rotation = getRotation(homography);
     return result;
 }
コード例 #17
0
double FieldLineDetector::calcMatchError(vector<FieldCorner*> &fieldCrossings,
		vector<Point2f> &cornersPoints, Mat &H)
{
	assert(fieldCrossings.size() == cornersPoints.size());
	vector<Point2f> cornersPointsTrans;
	perspectiveTransform(cornersPoints, cornersPointsTrans, H);
	double errorSum = 0;
	for (int i = 0; i < fieldCrossings.size(); i++)
	{
		double error = norm(
				fieldCrossings.at(i)->point - cornersPointsTrans.at(i));
		error *= error;
		errorSum += error;
	}

	return errorSum;
}
コード例 #18
0
void ofxSURFTracker::transFormPoints(vector<ofPoint> & points) {

    if(objectLifeTime >=1 && points.size() > 0) {
        if(homography.empty()) return;
        vector<Point2f > inputs;
        vector<Point2f > results;
        for(int i = 0; i < points.size(); i++) {
            inputs.push_back(Point2f( points[i].x,  points[i].y));
        }
        perspectiveTransform(inputs, results, homography);
        // back to the points array
        points.clear();
        for(int i = 0; i < results.size(); i++) {
            points.push_back(ofPoint( results[i].x,  results[i].y));
        }
    }
}
コード例 #19
0
float FieldLineDetector::calcMatchErrorBots(vector<Point2f>& botPosField, Mat& H)
{
	vector<Point2f> botPosFieldTrans;
	perspectiveTransform(botPosField, botPosFieldTrans, H);

//	Mat img;
//	cvtColor(imgThres, img, CV_GRAY2BGR);

	int offset = 30;
	float sumRatio = 0;
	for (int j = 0; j < botPosFieldTrans.size(); j++)
	{
		Point2f p(botPosFieldTrans.at(j));
		if (p.x - offset < 0 || p.y - offset < 0
				|| p.x + offset > imgThres.cols
				|| p.y + offset > imgThres.rows)
		{
			continue;
		}
		if (!fieldModel->field.contains(botPosField.at(j)))
		{
			continue;
		}
		Rect rect(Point2f(p.x - offset, p.y - offset),
				Point2f(p.x + offset, p.y + offset));
		Mat sub = imgThres(rect);
		int numNonBlack = countNonZero(sub);
		int total = sub.rows * sub.cols;
		float ratio = (float) numNonBlack / total;
		sumRatio += ratio;
//		rectangle(img, rect, Scalar(250, 0, 250));
//		stringstream ss;
//		ss << ratio;
//		putText(img, ss.str(), cv::Point(rect.x, rect.y - 15),
//				CV_FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 250), 1);
	}
	return sumRatio / botPosFieldTrans.size();
}
コード例 #20
0
void ofxFeatureFinder::drawDetected() {
    vector<ofxFeatureFinderObject>::iterator it = detectedObjects.begin();
    
    for(int i=0; i < detectedObjects.size(); i++){
        
        ofxFeatureFinderObject object = detectedObjects.at(i);
        
        cv::Mat H = detectedHomographies.at(i);
        
        ofSetLineWidth(2);
        ofSetColor(0, 255, 255);
        
        vector<ofPolyline>::iterator outline = object.outlines.begin();
        for(; outline != object.outlines.end(); ++outline){

            vector<cv::Point2f> objectPoints((*outline).size());
            vector<cv::Point2f> scenePoints((*outline).size());
        
            for (int i=0, l=(*outline).size(); i<l; i++) {
                ofPoint p = (*outline)[i];
                objectPoints[i] = cv::Point2f(p.x, p.y);
            }

            perspectiveTransform( objectPoints, scenePoints, H);

            ofPolyline sceneOutlines;
            for (int i=0, l=(*outline).size(); i<l; i++) {
                cv::Point2f p = scenePoints[i];
                sceneOutlines.addVertex(p.x, p.y);
            }
            sceneOutlines.close();
            sceneOutlines.draw();
        }

    }
    
}
コード例 #21
0
ファイル: kf.cpp プロジェクト: CSL-KU/Autoware
bool orbMatch(cv::Mat& inImageScene, cv::Mat& inImageObj, cv::Rect& outBoundingBox, unsigned int inMinMatches=2, float inKnnRatio=0.7)
{
	//vector of keypoints
	std::vector< cv::KeyPoint > keypointsO;
	std::vector< cv::KeyPoint > keypointsS;

	cv::Mat descriptors_object, descriptors_scene;

	cv::Mat outImg;
	inImageScene.copyTo(outImg);

	//-- Step 1: Extract keypoints
	cv::OrbFeatureDetector orb(ORB_NUM_FEATURES);
	orb.detect(inImageScene, keypointsS);
	if (keypointsS.size() < ORB_MIN_MATCHES)
	{
		//cout << "Not enough keypoints S, object not found>" << keypointsS.size() << endl;
		return false;
	}
	orb.detect(inImageObj, keypointsO);
	if (keypointsO.size() < ORB_MIN_MATCHES)
	{
		//cout << "Not enough keypoints O, object not found>" << keypointsO.size() << endl;
		return false;
	}

	//Calculate descriptors (feature vectors)
	cv::OrbDescriptorExtractor extractor;
	extractor.compute(inImageScene, keypointsS, descriptors_scene);
	extractor.compute(inImageObj, keypointsO, descriptors_object);

	//Matching descriptor vectors using FLANN matcher
	cv::BFMatcher matcher;
	//descriptors_scene.size(), keypointsO.size(), keypointsS.size();
	std::vector< std::vector< cv::DMatch >  > matches;
	matcher.knnMatch(descriptors_object, descriptors_scene, matches, 2);
	std::vector< cv::DMatch > good_matches;
	good_matches.reserve(matches.size());

	for (size_t i = 0; i < matches.size(); ++i)
	{
		if (matches[i].size() < 3)
			continue;

		const cv::DMatch &m1 = matches[i][0];
		const cv::DMatch &m2 = matches[i][1];

		if (m1.distance <= inKnnRatio * m2.distance)
			good_matches.push_back(m1);
	}

	if ((good_matches.size() >= inMinMatches))
	{
		std::vector< cv::Point2f > obj;
		std::vector< cv::Point2f > scene;

		for (unsigned int i = 0; i < good_matches.size(); i++)
		{
			// Get the keypoints from the good matches
			obj.push_back(keypointsO[good_matches[i].queryIdx].pt);
			scene.push_back(keypointsS[good_matches[i].trainIdx].pt);
		}

		cv::Mat H = findHomography(obj, scene, CV_RANSAC);

		// Get the corners from the image_1 ( the object to be "detected" )
		std::vector< cv::Point2f > obj_corners(4);
		obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(inImageObj.cols, 0);
		obj_corners[2] = cvPoint(inImageObj.cols, inImageObj.rows); obj_corners[3] = cvPoint(0, inImageObj.rows);
		std::vector< cv::Point2f > scene_corners(4);

		perspectiveTransform(obj_corners, scene_corners, H);

		// Draw lines between the corners (the mapped object in the scene - image_2 )
		line(outImg, scene_corners[0], scene_corners[1], cv::Scalar(255, 0, 0), 2); //TOP line
		line(outImg, scene_corners[1], scene_corners[2], cv::Scalar(255, 0, 0), 2);
		line(outImg, scene_corners[2], scene_corners[3], cv::Scalar(255, 0, 0), 2);
		line(outImg, scene_corners[3], scene_corners[0], cv::Scalar(255, 0, 0), 2);

		//imshow("Scene", outImg);
		//imshow("Obj", inImageObj);
		//cvWaitKey(5);

		return true;
	}

	return false;
}
コード例 #22
0
ファイル: RelicDetect.cpp プロジェクト: ping28198/TMTVision
bool RelicDetect::Match(RelicDetect obj,RelicDetect scn)
{
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	
	matcher.match(obj.descriptors, scn.descriptors, matches);
	double max_dist = 0; double min_dist = 100;
	//-- Quick calculation of max and min distances between keypoints
	for (int i = 0; i < obj.descriptors.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}
	printf("-- Max dist : %f \n", max_dist);
	printf("-- Min dist : %f \n", min_dist);
	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;
	for (int i = 0; i < obj.descriptors.rows; i++)
	{
		if (matches[i].distance <= 3 * min_dist)
		{
			good_matches.push_back(matches[i]);
		}
	}
	max_dist = 0;min_dist = 100;double total_min_dist = 0;
	for (int i = 0; i < good_matches.size(); i++)
	{
		double dist = good_matches[i].distance;
		total_min_dist += dist;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;

	}
	printf("-- good matches Max dist : %f \n", max_dist);
	printf("-- good matches Min dist : %f \n", min_dist);
	printf("-- good matches total Min dist : %f \n", total_min_dist);
	cout << "-- good matches size " << good_matches.size() << endl;
	cout << "-- dist per match" << total_min_dist / (double)good_matches.size() << endl;
	Mat img_matches;
	drawMatches(obj.img_color, obj.keypoints, scn.img_color, scn.keypoints,
		good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
		std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	//imshow("matches", img_matches);
	//-- Localize the object
	std::vector<Point2f> obj_points;
	std::vector<Point2f> scn_points;
	for (size_t i = 0; i < good_matches.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj_points.push_back(obj.keypoints[good_matches[i].queryIdx].pt);
		scn_points.push_back(scn.keypoints[good_matches[i].trainIdx].pt);
	}
	Mat H = cv::findHomography(obj_points, scn_points, RANSAC);
	cout << "H:" << endl;
	for (int i = 0;i < H.rows;i++)
	{
		for (int j = 0;j < H.cols;j++)
		{
			cout << H.at<double>(i, j) << " ";
		}
		cout << endl;
	}
	//-- Get the corners from the image_1 ( the object to be "detected" )
	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(obj.img_color.cols, 0);
	obj_corners[2] = cvPoint(obj.img_color.cols, obj.img_color.rows);
	obj_corners[3] = cvPoint(0, obj.img_color.rows);
	std::vector<Point2f> scene_corners(4);
	perspectiveTransform(obj_corners, scene_corners, H);
	cout << "object area" << contourArea(obj_corners) << endl;
	cout << "scene detected area" << contourArea(scene_corners) << endl;
	auto scene_area = contourArea(scene_corners);
	//-- Draw lines between the corners (the mapped object in the scene - image_2 )
	line(img_matches, scene_corners[0] + Point2f(obj.img_color.cols, 0), scene_corners[1] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[1] + Point2f(obj.img_color.cols, 0), scene_corners[2] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[2] + Point2f(obj.img_color.cols, 0), scene_corners[3] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[3] + Point2f(obj.img_color.cols, 0), scene_corners[0] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	//-- Show detected matches
	imshow("Good Matches & Object detection", img_matches);
	waitKey(0);
	if (scene_area>1000)
	{
		return true;
	} 
	else
	{
		return false;
	}
}
/*
 * @function surf_feature_detect_RANSAC SURF特征提取及匹配,RANSAC错误点消除以及物体标记
 * @return null
 * @method SURF feature detector
 * @method SURF descriptor
 * @method findFundamentalMat RANSAC错误去除
 * @method findHomography 寻找透视变换矩阵
 */
void surf_feature_detect_bruteforce_RANSAC_Homography(Mat SourceImg, Mat SceneImg, Mat imageMatches, char* string)
{
	vector<KeyPoint> keyPoints1, keyPoints2;
	SurfFeatureDetector detector(400);
	detector.detect(SourceImg, keyPoints1); //标注原图特征点
	detector.detect(SceneImg, keyPoints2); //标注场景图特征点

	SurfDescriptorExtractor surfDesc;
	Mat SourceImgDescriptor, SceneImgDescriptor;
	surfDesc.compute(SourceImg, keyPoints1, SourceImgDescriptor); //描述原图surf特征点
	surfDesc.compute(SceneImg, keyPoints2, SceneImgDescriptor); //描述场景图surf特征点

	//计算两张图片的特征点匹配数
	BruteForceMatcher<L2<float>>matcher;
	vector<DMatch> matches;
	matcher.match(SourceImgDescriptor, SceneImgDescriptor, matches);
	std::nth_element(matches.begin(), matches.begin() + 29 ,matches.end());
	matches.erase(matches.begin() + 30, matches.end());

	//FLANN匹配检测算法
	//vector<DMatch> matches;
	//DescriptorMatcher *pMatcher = new FlannBasedMatcher;
	//pMatcher->match(SourceImgDescriptor, SceneImgDescriptor, matches);
	//delete pMatcher;

	//keyPoints1 图1提取的关键点
	//keyPoints2 图2提取的关键点
	//matches 关键点的匹配
	int ptCount = (int)matches.size();
	Mat p1(ptCount, 2, CV_32F);
	Mat p2(ptCount, 2, CV_32F);
	Point2f pt;
	for(int i = 0; i < ptCount; i++)
	{
		pt = keyPoints1[matches[i].queryIdx].pt;
		p1.at<float>(i, 0) = pt.x;
		p1.at<float>(i, 1) = pt.y;

		pt = keyPoints2[matches[i].trainIdx].pt;
		p2.at<float>(i, 0) = pt.x;
		p2.at<float>(i, 1) = pt.y;
	}
	Mat m_Fundamental;
	vector<uchar> m_RANSACStatus;
	m_Fundamental = findFundamentalMat(p1, p2, m_RANSACStatus, FM_RANSAC);
	int OutlinerCount = 0;
	for(int i = 0; i < ptCount; i++)
	{
		if(m_RANSACStatus[i] == 0)
		{
			OutlinerCount++;
		}
	}

	// 计算内点
	vector<Point2f> m_LeftInlier;
	vector<Point2f> m_RightInlier;
	vector<DMatch> m_InlierMatches;

	// 上面三个变量用于保存内点和匹配关系
	int InlinerCount = ptCount - OutlinerCount;
	m_InlierMatches.resize(InlinerCount);
	m_LeftInlier.resize(InlinerCount);
	m_RightInlier.resize(InlinerCount);
	InlinerCount = 0;
	for (int i=0; i<ptCount; i++)
	{
		if (m_RANSACStatus[i] != 0)
		{
			m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
			m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
			m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
			m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
			m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
			m_InlierMatches[InlinerCount].trainIdx = InlinerCount;
			InlinerCount++;
		}
	}

	// 把内点转换为drawMatches可以使用的格式
	vector<KeyPoint> key1(InlinerCount);
	vector<KeyPoint> key2(InlinerCount);
	KeyPoint::convert(m_LeftInlier, key1);
	KeyPoint::convert(m_RightInlier, key2);

	//显示计算F过后的内点匹配
	drawMatches(SourceImg, key1, SceneImg, key2, m_InlierMatches, imageMatches);
	//drawKeypoints(SourceImg, key1, SceneImg, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	
	vector<Point2f> obj;
	vector<Point2f> scene;
	for(unsigned int i = 0; i < m_InlierMatches.size(); i++)
	{
		obj.push_back(key1[m_InlierMatches[i].queryIdx].pt); //查询图像,即目标图像的特征描述
		scene.push_back(key2[m_InlierMatches[i].trainIdx].pt); //模版图像,即场景图像的特征描述
	}
	//求解变换矩阵
	//作用同getPerspectiveTransform函数,输入原始图像和变换之后图像中对应的4个点,然后建立起变换映射关系,即变换矩阵
	//findHomography直接使用透视平面来找变换公式
	Mat H = findHomography(obj, scene, CV_RANSAC);
	vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(SourceImg.cols, 0);
	obj_corners[2] = cvPoint(SourceImg.cols, SourceImg.rows);
	obj_corners[3] = cvPoint(0, SourceImg.rows);
	vector<Point2f> scene_corners(4);
	//透视变换,将图片投影到一个新的视平面
	//根据以求得的变换矩阵
	perspectiveTransform(obj_corners, scene_corners, H);

	line(imageMatches, scene_corners[0] + Point2f(SourceImg.cols, 0), scene_corners[1] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[1] + Point2f(SourceImg.cols, 0), scene_corners[2] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[2] + Point2f(SourceImg.cols, 0), scene_corners[3] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[3] + Point2f(SourceImg.cols, 0), scene_corners[0] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	imshow(string, imageMatches);

	imwrite("feature_detect.jpg", imageMatches);
}
コード例 #24
0
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {
    //get image cv::Pointer
    cv_bridge::CvImagePtr cv_ptr;

    //acquire image frame
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    const std::string filename =  
    "/home/cam/Documents/catkin_ws/src/object_detection/positive_images/wrench.png";

    //read in calibration image
    cv::Mat object = cv::imread(filename, 
      CV_LOAD_IMAGE_GRAYSCALE);

    cv::namedWindow("Good Matches", CV_WINDOW_AUTOSIZE);
    //SURF Detector, and descriptor parameters
    int minHess=2000;
    std::vector<cv::KeyPoint> kpObject, kpImage;
    cv::Mat desObject, desImage;

    //Display keypoints on training image
    cv::Mat interestPointObject=object;

    //SURF Detector, and descriptor parameters, match object initialization
    cv::SurfFeatureDetector detector(minHess);
    detector.detect(object, kpObject);
    cv::SurfDescriptorExtractor extractor;
    extractor.compute(object, kpObject, desObject);
    cv::FlannBasedMatcher matcher;

    //Object corner cv::Points for plotting box
    std::vector<cv::Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );

    double frameCount = 0;
    float thresholdMatchingNN=0.7;
    unsigned int thresholdGoodMatches=4;
    unsigned int thresholdGoodMatchesV[]={4,5,6,7,8,9,10};

    char escapeKey = 'k';

    for (int j=0; j<7;j++)
    {
      thresholdGoodMatches = thresholdGoodMatchesV[j];
      
      while (escapeKey != 'q')
      {
        frameCount++;
        cv::Mat image;
        cvtColor(cv_ptr->image, image, CV_RGB2GRAY);

        cv::Mat des_image, img_matches, H;
        std::vector<cv::KeyPoint> kp_image;
        std::vector<std::vector<cv::DMatch > > matches;
        std::vector<cv::DMatch> good_matches;
        std::vector<cv::Point2f> obj;
        std::vector<cv::Point2f> scene;
        std::vector<cv::Point2f> scene_corners(4);

        detector.detect( image, kp_image );
        extractor.compute( image, kp_image, des_image );
        matcher.knnMatch(desObject, des_image, matches, 2);

        for(int i = 0; i < std::min(des_image.rows-1, (int) matches.size()); i++) 
        //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
          if((matches[i][0].distance < thresholdMatchingNN*(matches[i][1].distance)) 
            && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

        //Draw only "good" matches
        cv::drawMatches(object, kpObject, image, kp_image, good_matches, img_matches, 
          cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), 
          cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
        
        if (good_matches.size() >= thresholdGoodMatches)
        {

          //Display that the object is found
          cv::putText(img_matches, "Object Found", cvPoint(10,50), 0, 2, 
            cvScalar(0,0,250), 1, CV_AA);
            for(unsigned int i = 0; i < good_matches.size(); i++ )
            {
              //Get the keypoints from the good matches
              obj.push_back( kpObject[ good_matches[i].queryIdx ].pt );
              scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            cv::line( img_matches, scene_corners[0] + cv::Point2f( object.cols, 0), 
              scene_corners[1] + cv::Point2f( object.cols, 0), cv::Scalar(0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[1] + cv::Point2f( object.cols, 0), 
              scene_corners[2] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[2] + cv::Point2f( object.cols, 0), 
              scene_corners[3] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[3] + cv::Point2f( object.cols, 0), 
              scene_corners[0] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        }
        else
        {
          putText(img_matches, "", cvPoint(10,50), 0, 3, cvScalar(0,0,250), 1, CV_AA);
        }

        //Show detected matches
        imshow("Good Matches", img_matches);
        
        escapeKey=cvWaitKey(10);

        if(frameCount>10)
        {
          escapeKey='q';
        }


      }

      frameCount=0;
      escapeKey='a';
    }

    // Update GUI Window
    //cv::namedWindow(OPENCV_WINDOW);
    //cv::imshow(OPENCV_WINDOW, cv_ptr->image);
    //cv::waitKey(3);
    
    // Output modified video stream
    image_pub_.publish(cv_ptr->toImageMsg());
  }
void PushbroomStereo::RunStereoPushbroomStereo2(Mat leftImage, Mat rightImage,Mat laplacian_left,Mat laplacian_right,	std::vector<Point3f> *pointVector3d,std::vector<Point3i> *pointVector2d,std::vector<uchar> *pointColors)
{

    int row_start						= 0;//statet->row_start;
	int row_end							= leftImage.rows;//statet->row_end;

    //PushbroomStereoState state			= statet->state;

    // we will do this by looping through every block in the left image
    // (defined by blockSize) and checking for a matching value on
    // the right image

    std::vector<Point3f> localHitPoints;

	//待确认
    int startJ = 0;
    int stopJ = leftImage.cols - (m_iDisparity + m_iBlockSize);
    if (m_iDisparity < 0)
    {
        startJ = -m_iDisparity;
        stopJ = leftImage.cols - m_iBlockSize;
    }

    //printf("row_start: %d, row_end: %d, startJ: %d, stopJ: %d, rows: %d, cols: %d\n", row_start, row_end, startJ, stopJ, leftImage.rows, leftImage.cols);
    int hitCounter = 0;
    //if (state.random_results < 0) 
	//{
        for (int i=row_start; i < row_end;i+=m_iBlockSize)
        {
            for (int j=startJ; j < stopJ; j+=m_iBlockSize)
            {
                // get the sum of absolute differences for this location  on both images
                int sad = GetSAD(leftImage, rightImage, laplacian_left, laplacian_right, j, i);
                // check to see if the SAD is below the threshold,
                // indicating a hit
				
                if (sad < m_iSadThreshold && sad >= 0)
                {
                    // got a hit
                    // now check for horizontal invariance (ie check for parts of the image that look the same as this which would indicate that this might be a false-positive)
                    if (!m_bCheck_horizontal_invariance || (CheckHorizontalInvariance(leftImage, rightImage, laplacian_left, laplacian_right, j, i)== false)) 
					{

                        // add it to the vector of matches
                        // don't forget to offset it by the blockSize,so we match the center of the block instead of the top left corner
                        localHitPoints.push_back(Point3f(j+m_iBlockSize/2.0, i+m_iBlockSize/2.0, -m_iDisparity));
                        //localHitPoints.push_back(Point3f(state.debugJ, state.debugI, -disparity));


                        uchar pxL = leftImage.at<uchar>(i,j);
                        pointColors->push_back(pxL); // this is the corner of the box, not the center

                        hitCounter ++;

                        if (m_bShow_display)
                            pointVector2d->push_back(Point3i(j, i, sad));
                     } // check horizontal invariance
                }
            }
        }


    // now we have an array of hits -- transform them to 3d points
    if (hitCounter > 0) 
		perspectiveTransform(localHitPoints, *pointVector3d, m_matQ);

}
コード例 #26
0
bool Homography::extract(cv::Mat &H, irr::core::vector2di *corners, irr::core::vector3df *position, irr::core::vector3df *angles, int refine) {
    
    if ( matches.size() > 3 && objectKeyPoints.size() < sceneKeyPoints.size() )
    {
        std::vector<cv::Point2f> objectPoints;
        std::vector<cv::Point2f> scenePoints;
        
        // get the keypoints from the goodmatches
        for( int i = 0; i < matches.size(); i++ )
        {
            objectPoints.push_back( objectKeyPoints[ matches[i].queryIdx ].pt );
            scenePoints.push_back( sceneKeyPoints[ matches[i].trainIdx ].pt );
        }
        
        // find the homography of the keypoints.
        H = cv::findHomography( objectPoints, scenePoints, CV_RANSAC );
        
        std::vector<cv::Point2f> obj_corners(4);;
        std::vector<cv::Point2f> scene_corners(4);
        obj_corners[0] = cvPoint( 0,          0 );
        obj_corners[1] = cvPoint( objectSize.width, 0 );
        obj_corners[2] = cvPoint( objectSize.width, objectSize.height );
        obj_corners[3] = cvPoint( 0,          objectSize.height );
        
        // get the 2D points for the homography corners
        perspectiveTransform( obj_corners, scene_corners, H);
        
        if (refine > 0) {
            cv::Mat sceneCopyCopy = sceneCopy.clone();
            cv::warpPerspective(sceneCopy, sceneCopy, H, objectSize, cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
            cv::Mat H2;
            analyze(sceneCopy);
            if (extract(H2, NULL, NULL, NULL, refine - 1)) {
                H *= H2;
                perspectiveTransform( obj_corners, scene_corners, H);
            }
        }
        
        // give the caller the corners of the 2D plane
        if (corners != NULL)
            for (int i = 0; i < 4; i++) {
                corners[i] = irr::core::vector2di(scene_corners[i].x, scene_corners[i].y);
            }
        
        // init the rotation and translation vectors
        cv::Mat raux(3, 1, CV_64F), taux(3, 1, CV_64F);
        
        // calculating 3D points
        float maxSize = std::max(objectSize.width, objectSize.height);
        float unitW = objectSize.width / maxSize;
        float unitH = objectSize.height / maxSize;
        
        // get the rotation and translation vectors
        std::vector<cv::Point3f> scene_3d_corners(4);
        scene_3d_corners[0] = cv::Point3f(-unitW, -unitH, 0);
        scene_3d_corners[1] = cv::Point3f( unitW, -unitH, 0);
        scene_3d_corners[2] = cv::Point3f( unitW,  unitH, 0);
        scene_3d_corners[3] = cv::Point3f(-unitW,  unitH, 0);
        cv::solvePnP(scene_3d_corners, scene_corners, getCamIntrinsic(), cv::Mat(), raux, taux);
        
        // give the caller the 3D plane position and angle
        if (position != NULL)
            position->set(taux.at<double>(0, 0), -taux.at<double>(1, 0), taux.at<double>(2, 0));
        if (angles != NULL)
            angles->set(-raux.at<double>(0, 0) * irr::core::RADTODEG, raux.at<double>(1, 0) * irr::core::RADTODEG, -raux.at<double>(2, 0) * irr::core::RADTODEG);
        
        return true;
    }
    
    return false;
}
コード例 #27
0
//-----------------------------------------------------
void ofxSURFTracker::detect(unsigned char *pix, int inputWidth, int inputHeight) {

    /***
    code adapted from http://docs.opencv.org/doc/tutorials/features2d/feature_homography/feature_homography.html
     ***/

    if( inputWidth != inputImg.getWidth() || inputHeight != inputImg.getHeight()) {
        // this should only happen once
        inputImg.clear();
        inputImg.allocate(inputWidth, inputHeight);
        cout << "ofxSURFTracker : re-allocated the input image."<<endl;
    }

    // create the cvImage from the ofImage
    inputImg.setFromPixels(pix, inputWidth, inputHeight);
    inputImg.setROI( ofRectangle((inputWidth-width)/2,
                                 (inputHeight-height)/2,
                                 width,
                                 height
                                )
                   );

    // take out the piece that we want to use.
    croppedImg.setFromPixels(inputImg.getRoiPixels(), width, height);

    // make it into a trackable grayscale image
    trackImg = croppedImg;

    // do some fancy contrast stuff
    if(bContrast) {
        trackImg.contrastStretch();
    }

    // set up the feature detector
    detector =  SurfFeatureDetector(hessianThreshold,
                                    octaves,
                                    octaveLayers,
                                    bUpright);

    // clear existing keypoints from previous frame
    keypoints_scene.clear();

    // get the Mat to do the feature detection on
    Mat trackMat = cvarrToMat(trackImg.getCvImage());
    detector.detect( trackMat, keypoints_scene);

    // Calculate descriptors (feature vectors)
    extractor.compute( trackMat, keypoints_scene, descriptors_scene );

    // Matching descriptor vectors using FLANN matcher
    vector< DMatch > matches;
    if(!descriptors_object.empty() && !descriptors_scene.empty() ) {
        flannMatcher.match( descriptors_object, descriptors_scene, matches);
    }

    // Quick calculation of max and min distances between keypoints
    double max_dist = 0;
    double min_dist = 100;
    for( int i = 0; i < matches.size(); i++ ) {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    // Filter matches upon quality : distance is between 0 and 1, lower is better
    good_matches.clear();
    for( int i = 0; i < matches.size(); i++ ) {
        if(matches[i].distance < 3 * min_dist && matches[i].distance < distanceThreshold) {
            good_matches.push_back( matches[i]);
        }
    }

    // find the homography
    // transform the bounding box for this scene
    vector <Point2f> scene_pts;
    vector <Point2f> object_pts;
    object_transformed.clear();
    if(good_matches.size() > minMatches) {
        for( int i = 0; i < good_matches.size(); i++ )
        {
            //-- Get the keypoints from the good matches
            object_pts.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
            scene_pts.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
        }
        if( scene_pts.size() >5 && object_pts.size() > 5) {
            homography = findHomography( object_pts, scene_pts, CV_RANSAC);
            perspectiveTransform( object, object_transformed, homography);
        }
        // being here means we have found a decent match
        objectLifeTime += 0.05;

    } else {
        // we haven't found a decent match
        objectLifeTime -= 0.05;
    }
    if(objectLifeTime > 1) {
        objectLifeTime = 1;
    } else if( objectLifeTime < 0) {
        objectLifeTime = 0;
    }
}
コード例 #28
0
ファイル: pushbroom-stereo.cpp プロジェクト: soulsheng/flight
void PushbroomStereo::RunStereoPushbroomStereo( Mat leftImage, Mat rightImage, Mat laplacian_left, Mat laplacian_right,
	cv::vector<Point3f> *pointVector3d, cv::vector<Point3i> *pointVector2d, cv::vector<uchar> *pointColors,
	int row_start,  int row_end, PushbroomStereoState state )
{
    // we will do this by looping through every block in the left image
    // (defined by blockSize) and checking for a matching value on
    // the right image

    cv::vector<Point3f> localHitPoints;

    int blockSize = state.blockSize;
    int disparity = state.disparity;
    int sadThreshold = state.sadThreshold;

    int startJ = 0;
    int stopJ = leftImage.cols - (disparity + blockSize);
    if (disparity < 0)
    {
        startJ = -disparity;
        stopJ = leftImage.cols - blockSize;
    }

    //printf("row_start: %d, row_end: %d, startJ: %d, stopJ: %d, rows: %d, cols: %d\n", row_start, row_end, startJ, stopJ, leftImage.rows, leftImage.cols);

    int hitCounter = 0;


    if (state.random_results < 0) {
		int *sadArray = new int[ leftImage.rows * leftImage.step ];
		int iStep, jStep;
#ifdef USE_GPU
		StopWatchInterface	*timer;
		sdkCreateTimer( &timer );
		sdkResetTimer( &timer );
		sdkStartTimer( &timer );

		//GetSADBlock(row_start, row_end, blockSize, startJ, stopJ, sadArray, leftImage, rightImage, laplacian_left, laplacian_right, state);
		m_sadCalculator.runGetSAD( row_start, row_end, startJ, stopJ, sadArray, leftImage.data, rightImage.data, laplacian_left.data, laplacian_right.data, leftImage.step,
			state.blockSize, state.disparity, state.sobelLimit );

		sdkStopTimer( &timer );
		//printf("RunStereo bottleneck timer: %.2f ms \n", sdkGetTimerValue( &timer) );
		sdkDeleteTimer( &timer );

#endif

		int gridY = (row_end - row_start)/blockSize;
		int gridX = (stopJ - startJ)/blockSize;

		for (int y=0; y< gridY; y++)
		{
			for (int x=0; x< gridX; x++)
			{               
                // check to see if the SAD is below the threshold,
                // indicating a hit
				int i = row_start + y * blockSize;
				int j = startJ + x * blockSize;
#ifdef USE_GPU
				int sad = sadArray[ y * gridX + x];
#else
				int sad= GetSAD(leftImage, rightImage, laplacian_left, laplacian_right, j, i, state);
#endif
                if (sad < sadThreshold && sad >= 0)
                {
                    // got a hit

                    // now check for horizontal invariance
                    // (ie check for parts of the image that look the same as this
                    // which would indicate that this might be a false-positive)

                    if (!state.check_horizontal_invariance || CheckHorizontalInvariance(leftImage, rightImage, laplacian_left, laplacian_right, j, i, state) == false) {

                        // add it to the vector of matches
                        // don't forget to offset it by the blockSize,
                        // so we match the center of the block instead
                        // of the top left corner
                        localHitPoints.push_back(Point3f(j+blockSize/2.0, i+blockSize/2.0, -disparity));

                        //localHitPoints.push_back(Point3f(state.debugJ, state.debugI, -disparity));


                        uchar pxL = leftImage.at<uchar>(i,j);
                        pointColors->push_back(pxL); // TODO: this is the corner of the box, not the center

                        hitCounter ++;

                        if (state.show_display)
                        {
                            pointVector2d->push_back(Point3i(j, i, sad));
                        }
                    } // check horizontal invariance
                }
            }
        }
    } else {

        double intpart;

        float fractpart = modf(state.random_results , &intpart);
        hitCounter = int(intpart);

        // determine if this is a time we'll use that last point
        std::random_device rd;
        std::default_random_engine generator(rd()); // rd() provides a random seed
        std::uniform_real_distribution<float> distribution(0, 1);

        if (fractpart > distribution(generator)) {
            hitCounter ++;
        }

        for (int i = 0; i < hitCounter; i++) {

            int randx = rand() % (stopJ - startJ) + startJ;
            int randy = rand() % (row_end - row_start) + row_start;

            localHitPoints.push_back(Point3f(randx, randy, -disparity));
        }
    }

    // now we have an array of hits -- transform them to 3d points
    if (hitCounter > 0) {

        perspectiveTransform(localHitPoints, *pointVector3d, state.Q);
    }

}
コード例 #29
0
ファイル: Feature.cpp プロジェクト: xyhGit/picPro
//平面物体检测
void Feature::objectDetect( Mat& objectImage, Mat& sceneImage , Mat&outImage,
							Mat& objectDescriptor,Mat& sceneDescriptor, vector<DMatch>& matches,
							vector<KeyPoint>& objectKeypoints, vector<KeyPoint>& sceneKeypoints)
{
	double max_dist = 0; double min_dist = 100;

	//特征点最大最小距离
	for( int i = 0; i < objectDescriptor.rows; i++ )
	{ 
		double dist = matches[i].distance;
		if( dist < min_dist ) 
			min_dist = dist;
		if( dist > max_dist ) 
			max_dist = dist;
	}


	//找出强度较大的特征点(也可以用半径)
	std::vector< DMatch > good_matches;
	double acceptedDist = 2*min_dist;

	for( int i = 0; i < objectDescriptor.rows; i++ )
	{
		if( matches[i].distance < acceptedDist )
		{ 
			good_matches.push_back( matches[i]); 
		}
	}
	
	//画出匹配结果
	drawMatches( objectImage, objectKeypoints, sceneImage, sceneKeypoints,
				 good_matches, outImage, Scalar::all(-1), Scalar::all(-1),
				 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	//得到好的特征点的位置
	std::vector<Point2f> object;//目标图片中的点
	std::vector<Point2f> scene;//场景图片中的点
	for( int i = 0; i < good_matches.size(); i++ )
	{
		object.push_back( objectKeypoints[ good_matches[i].queryIdx ].pt );
		scene.push_back( sceneKeypoints[ good_matches[i].trainIdx ].pt );
	}

	//目标图片和场景图片的透视变化关系
	Mat H = findHomography( object, scene, CV_RANSAC );

	//目标图像的四个角的坐标
	std::vector<Point2f> object_corners(4);

	object_corners[0] = cvPoint(0,0); 
	object_corners[1] = cvPoint( objectImage.cols, 0 );
	object_corners[2] = cvPoint( objectImage.cols, objectImage.rows ); 
	object_corners[3] = cvPoint( 0, objectImage.rows );

	std::vector<Point2f> scene_corners(4);

	perspectiveTransform( object_corners, scene_corners, H);//透视变换

	//在输出图像的场景部分画出边框
	line( outImage, scene_corners[0] + Point2f( objectImage.cols, 0), scene_corners[1] + Point2f( objectImage.cols, 0), Scalar(0, 255, 0), 4 );
	line( outImage, scene_corners[1] + Point2f( objectImage.cols, 0), scene_corners[2] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );
	line( outImage, scene_corners[2] + Point2f( objectImage.cols, 0), scene_corners[3] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );
	line( outImage, scene_corners[3] + Point2f( objectImage.cols, 0), scene_corners[0] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );

}
コード例 #30
0
bool CustomPattern::findPatternPass(const Mat& image, vector<Point2f>& matched_features, vector<Point3f>& pattern_points,
                                    Mat& H, vector<Point2f>& scene_corners, const double pratio, const double proj_error,
                                    const bool refine_position, const Mat& mask, OutputArray output)
{
    if (!initialized) {return false; }
    matched_features.clear();
    pattern_points.clear();

    vector<vector<DMatch> > matches;
    vector<KeyPoint> f_keypoints;
    Mat f_descriptor;

    detector->detect(image, f_keypoints, mask);
    if (refine_position) refineKeypointsPos(image, f_keypoints);

    descriptorExtractor->compute(image, f_keypoints, f_descriptor);
    descriptorMatcher->knnMatch(f_descriptor, descriptor, matches, 2); // k = 2;
    vector<DMatch> good_matches;
    vector<Point2f> obj_points;

    for(int i = 0; i < f_descriptor.rows; ++i)
    {
        if(matches[i][0].distance < pratio * matches[i][1].distance)
        {
            const DMatch& dm = matches[i][0];
            good_matches.push_back(dm);
            // "keypoints1[matches[i].queryIdx] has a corresponding point in keypoints2[matches[i].trainIdx]"
            matched_features.push_back(f_keypoints[dm.queryIdx].pt);
            pattern_points.push_back(points3d[dm.trainIdx]);
            obj_points.push_back(keypoints[dm.trainIdx].pt);
        }
    }

    if (good_matches.size() < MIN_POINTS_FOR_H) return false;

    Mat h_mask;
    H = findHomography(obj_points, matched_features, RANSAC, proj_error, h_mask);
    if (H.empty())
    {
        // cout << "findHomography() returned empty Mat." << endl;
        return false;
    }

    for(unsigned int i = 0; i < good_matches.size(); ++i)
    {
        if(!h_mask.data[i])
        {
            deleteStdVecElem(good_matches, i);
            deleteStdVecElem(matched_features, i);
            deleteStdVecElem(pattern_points, i);
        }
    }

    if (good_matches.empty()) return false;

    uint numb_elem = good_matches.size();
    check_matches(matched_features, obj_points, good_matches, pattern_points, H);
    if (good_matches.empty() || numb_elem < good_matches.size()) return false;

    // Get the corners from the image
    scene_corners = vector<Point2f>(4);
    perspectiveTransform(obj_corners, scene_corners, H);

    // Check correctnes of H
    // Is it a convex hull?
    bool cConvex = isContourConvex(scene_corners);
    if (!cConvex) return false;

    // Is the hull too large or small?
    double scene_area = contourArea(scene_corners);
    if (scene_area < MIN_CONTOUR_AREA_PX) return false;
    double ratio = scene_area/img_roi.size().area();
    if ((ratio < MIN_CONTOUR_AREA_RATIO) ||
        (ratio > MAX_CONTOUR_AREA_RATIO)) return false;

    // Is any of the projected points outside the hull?
    for(unsigned int i = 0; i < good_matches.size(); ++i)
    {
        if(pointPolygonTest(scene_corners, f_keypoints[good_matches[i].queryIdx].pt, false) < 0)
        {
            deleteStdVecElem(good_matches, i);
            deleteStdVecElem(matched_features, i);
            deleteStdVecElem(pattern_points, i);
        }
    }

    if (output.needed())
    {
        Mat out;
        drawMatches(image, f_keypoints, img_roi, keypoints, good_matches, out);
        // Draw lines between the corners (the mapped object in the scene - image_2 )
        line(out, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 2);
        line(out, scene_corners[1], scene_corners[2], Scalar(0, 255, 0), 2);
        line(out, scene_corners[2], scene_corners[3], Scalar(0, 255, 0), 2);
        line(out, scene_corners[3], scene_corners[0], Scalar(0, 255, 0), 2);
        out.copyTo(output);
    }

    return (!good_matches.empty()); // return true if there are enough good matches
}