예제 #1
0
Mat
StitchedMap::get_debug()
{
  Mat out;
  drawKeypoints(image1, kpv1, image1, Scalar(255,0,0));
  drawKeypoints(image2, kpv2, image2, Scalar(255,0,0));
  drawMatches(image1,fil1, image2,fil2, matches,out,Scalar::all(-1),Scalar::all(-1));
  return out;
}
예제 #2
0
JNIEXPORT void JNICALL Java_edu_stanford_cvgl_artsy_CameraActivity_HandleFrame
  (JNIEnv *, jobject, jlong addr_native_controller, jlong addr_rgba)
{
	// Obtain SLAM object and current camera frame
	vslam::VSlam* slam = (vslam::VSlam*)(addr_native_controller);
	cv::Mat* frame = (cv::Mat*)(addr_rgba);
	cvtColor(*frame, *frame, CV_RGBA2BGR);

	// Update SLAM with the current frame
  // clock_t start = clock();
	slam->ProcessFrame(*frame);
  // clock_t end = clock();

  // double processFrameDuration = (end - start) / (double) CLOCKS_PER_SEC;
  // LOG_ERROR("NativeCore", "processFrameDuration: %f", processFrameDuration);

	// Render XYZ and YPR values of the current keyframe
	Augmentor augmentor;
	vslam::KeyFrame currKeyFrame = slam->GetCurrKeyFrame();
  Mat translationMatrix = currKeyFrame.GetTranslation();
  augmentor.DisplayTranslation(*frame, translationMatrix);
  Mat rotationMatrix = currKeyFrame.GetRotation();
  augmentor.DisplayRotation(*frame, rotationMatrix);

	// Render keypoints of the current key frame
	vslam::KeypointArray keypoints = currKeyFrame.GetTrackedKeypoints();
	Scalar kpColor = Scalar(255, 0, 0);
	drawKeypoints(*frame, keypoints, *frame, kpColor);
}
예제 #3
0
Mat RoadWatcher::Detect_And_Draw_Traffic_Signs(Mat frame)
{
	Mat returnerFrame, imgHSV, imgThresholded; 
	returnerFrame = frame.clone();
	cvtColor(returnerFrame, imgHSV, COLOR_BGR2HSV);

	inRange(imgHSV, lowestThreshold, highestThreshold, imgThresholded); //Threshold the image

	//morphological opening (remove small objects from the foreground)
	erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
	dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) ); 

	//morphological closing (fill small holes in the foreground)
	dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) ); 
	erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

	//detect!
	vector<cv::KeyPoint> keypoints;
	detector.detect(imgThresholded, keypoints);
	Mat temp;
	drawKeypoints(imgThresholded, keypoints, temp, Scalar(0,0,255), 0);
	imshow("blobs", temp);

	for (int i=0; i<keypoints.size(); i++)
  	{
    	float X = keypoints[i].pt.x; 
	    float Y = keypoints[i].pt.y;
    	circle(returnerFrame, Point(X, Y), 10, Scalar(0,255,0), -1, CV_AA, 0);
  	}
  	return returnerFrame;
}
예제 #4
0
/////////////////////////////////////////////////////////////////////
// Panel::DetectBlob() 
// Description: this function is a helper function for DetectBlob().
//  It uses a SimpleBlobDetector to detect keypoints based on the 
//  parameters specified. The parameters can be debugged currently
//  by uncommenting the following line at the top of this file:
//  #define DEBUG_BLOB_DETECTION 1
/////////////////////////////////////////////////////////////////////
void Panel::GetKeyPoints(Mat grayImage, std::vector<KeyPoint> &keypoints, bool debug)
{
	Mat dilatedEroded, dilated, blurred;
	Mat im_with_keypoints, thresh;
	Ptr<SimpleBlobDetector> detector;
	SimpleBlobDetector::Params params;
	dilate(grayImage, dilated, Mat());
	GaussianBlur(dilated, blurred, Size(7, 7), 0, 0);
	threshold(blurred, thresh, m_lowTagThreshold, 255, THRESH_BINARY);
	// Filter by Area.
	params.filterByArea = true;
	params.filterByColor = false;
	params.filterByConvexity = false;
	params.filterByCircularity = false;
	params.filterByInertia = false;
	params.minArea = (float)m_blobArea;
	// Set up the detector with default parameters.
	detector = SimpleBlobDetector::create(params);
	// Detect blobs.
	detector->detect(thresh, keypoints);
	detector.release();
	// Draw detected blobs as red circles.
	// DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures the size of the circle corresponds to the size of blob
	drawKeypoints(thresh, keypoints, im_with_keypoints, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	// Show blobs
	imshow("keypoints", im_with_keypoints);
	if (debug)
	{
		// namedWindow("Dilated and Blurred", CV_WINDOW_KEEPRATIO);
		// imshow("Dilated and Blurred", blurred);
		namedWindow("Threshold", CV_WINDOW_AUTOSIZE);
		imshow("Threshold", thresh);
	}
}
u_int32_t ORBFeatureExtractor::processNewImage(unsigned i_imageId, unsigned i_imgSize,
                                                 char *p_imgData)
{
    Mat img;
    u_int32_t i_ret = ImageLoader::loadImage(i_imgSize, p_imgData, img);
    if (i_ret != OK)
        return i_ret;

    vector<KeyPoint> keypoints;
    Mat descriptors;

    ORB(1000, 1.02, 100)(img, noArray(), keypoints, descriptors);

    unsigned i_nbKeyPoints = 0;
    list<HitForward> imageHits;
    unordered_set<u_int32_t> matchedWords;
    for (unsigned i = 0; i < keypoints.size(); ++i)
    {
        i_nbKeyPoints++;

        // Recording the angle on 16 bits.
        u_int16_t angle = keypoints[i].angle / 360 * (1 << 16);
        u_int16_t x = keypoints[i].pt.x;
        u_int16_t y = keypoints[i].pt.y;

        vector<int> indices(1);
        vector<int> dists(1);
        wordIndex->knnSearch(descriptors.row(i), indices, dists, 1);

        for (unsigned j = 0; j < indices.size(); ++j)
        {
            const unsigned i_wordId = indices[j];
            if (matchedWords.find(i_wordId) == matchedWords.end())
            {
                HitForward newHit;
                newHit.i_wordId = i_wordId;
                newHit.i_imageId = i_imageId;
                newHit.i_angle = angle;
                newHit.x = x;
                newHit.y = y;
                imageHits.push_back(newHit);
                matchedWords.insert(i_wordId);
            }
        }
    }

#if 0
    // Draw keypoints.
    Mat img_res;
    drawKeypoints(img, keypoints, img_res, Scalar::all(-1), DrawMatchesFlags::DEFAULT);

    // Show the image.
    imshow("Keypoints 1", img_res);
    waitKey();
#endif

    // Record the hits.
    return index->addImage(i_imageId, imageHits);
}
예제 #6
0
파일: Main.cpp 프로젝트: LeeLe01/Homography
void Assignment2::displayFeatures(Assignment2 &m2)
{
	Size sz1 = this->image.size(); // find size of image1
    Size sz2 = m2.image.size(); // find size of image2
	Mat output((sz1.height>sz2.height)?sz1.height:sz2.height, sz1.width+sz2.width, CV_8UC3); // create output image to display two images in one window
	
	drawKeypoints(this->image, this->keypoints, this->sift_output, Scalar(0,0,255));  // draw features
	drawKeypoints(m2.image, m2.keypoints, m2.sift_output, Scalar(0,0,255));  // draw features

	// set leftROI and rightROI on that output image for display
	Mat left(output, Rect(0, 0, sz1.width, sz1.height));
	this->sift_output.copyTo(left);
    Mat right(output, Rect(sz1.width, 0, sz2.width, sz2.height));
	m2.sift_output.copyTo(right);

	// display output image with feature displayed
	imshow("SIFT Feature Matching", output);
}
예제 #7
0
static void _prepareImgAndDrawKeypoints( const Mat& img1, const vector<KeyPoint>& keypoints1,
                                         const Mat& img2, const vector<KeyPoint>& keypoints2,
                                         Mat& outImg, Mat& outImg1, Mat& outImg2,
                                         const Scalar& singlePointColor, int flags )
{
    Size size( img1.cols + img2.cols, MAX(img1.rows, img2.rows) );
    if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )
    {
        if( size.width > outImg.cols || size.height > outImg.rows )
            CV_Error( CV_StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
    }
    else
    {
        outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );
        outImg = Scalar::all(0);
        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );

        if( img1.type() == CV_8U )
            cvtColor( img1, outImg1, CV_GRAY2BGR );
        else
            img1.copyTo( outImg1 );

        if( img2.type() == CV_8U )
            cvtColor( img2, outImg2, CV_GRAY2BGR );
        else
            img2.copyTo( outImg2 );
    }

    // draw keypoints
    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
    {
        Mat _outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );

        Mat _outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
        drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
    }
}
예제 #8
0
cv::Mat CvFeature::debugImage()
{
	if (empty()) return cv::Mat();

	cv::Mat canvas;
	image_.copyTo(canvas);	

	canvas.create(image_.size(), CV_8UC3);
	drawKeypoints(canvas, keypoints_);

	return canvas;
}
예제 #9
0
파일: draw.cpp 프로젝트: JoeHowse/opencv
static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
                                         InputArray img2, const std::vector<KeyPoint>& keypoints2,
                                         InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
                                         const Scalar& singlePointColor, DrawMatchesFlags flags )
{
    Mat outImg;
    Size img1size = img1.size(), img2size = img2.size();
    Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
    if( !!(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
    {
        outImg = _outImg.getMat();
        if( size.width > outImg.cols || size.height > outImg.rows )
            CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
        outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
    }
    else
    {
        const int cn1 = img1.channels(), cn2 = img2.channels();
        const int out_cn = std::max(3, std::max(cn1, cn2));
        _outImg.create(size, CV_MAKETYPE(img1.depth(), out_cn));
        outImg = _outImg.getMat();
        outImg = Scalar::all(0);
        outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );

        _prepareImage(img1, outImg1);
        _prepareImage(img2, outImg2);
    }

    // draw keypoints
    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
    {
        Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );

        Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
        drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
    }
}
void surf_detection(Mat &image,int minHessian){

        
	Ptr<SURF> detector = SURF::create( minHessian );
	
	vector<KeyPoint>keypoints;
	Mat descriptors;

	detector->detectAndCompute( image, Mat(), keypoints, descriptors );
	drawKeypoints(image,keypoints,image,Scalar(255,0,0),DrawMatchesFlags::DEFAULT);
	imwrite("outimage/sift_image.png",image);

/*	
    SiftFeatureDetector  siftdtc;
    vector<KeyPoint>kp1,kp2;
 
    siftdtc.detect(image,kp1);
    Mat outimg1;
    drawKeypoints(image,kp1,outimg1);
    imshow("image1 keypoints",outimg1);
	
    KeyPoint kp;
 
    vector<KeyPoint>::iterator itvc;
    for(itvc=kp1.begin();itvc!=kp1.end();itvc++)
    {
        cout<<"angle:"<<itvc->angle<<"\t"<<itvc->class_id<<"\t"<<itvc->octave<<"\t"<<itvc->pt<<"\t"<<itvc->response<<endl;
    }
 
    siftdtc.detect(img2,kp2);
    Mat outimg2;
    drawKeypoints(img2,kp2,outimg2);
    imshow("image2 keypoints",outimg2);
 
 
    SiftDescriptorExtractor extractor;
    Mat descriptor1,descriptor2;
    BruteForceMatcher<L2<float>> matcher;
    vector<DMatch> matches;
    Mat img_matches;
    extractor.compute(img,kp1,descriptor1);
    extractor.compute(img2,kp2,descriptor2);
 
 
    imshow("desc",descriptor1);
    cout<<endl<<descriptor1<<endl;
    matcher.match(descriptor1,descriptor2,matches);
 
    drawMatches(img,kp1,img2,kp2,matches,img_matches);

*/
}
예제 #11
0
 void VSlam::ProcessFrame(cv::Mat &img)
 {
     cvtColor(img, img, CV_BGRA2BGR);
     
     Mat frame;
     cvtColor(img, frame, CV_RGB2GRAY);
     
     if (curr_state == NOT_INITIALIZED)
     {
         initial_frame = frame.clone();
         curr_state = INITIALIZING;
     }
     
     if (curr_state == INITIALIZING)
     {
         if(initializer.InitializeMap(orb_handler, initial_frame, frame, keyframes))
         {
             AppendCameraPose(keyframes.back().GetRotation(), keyframes.back().GetTranslation());
             curr_state = TRACKING;
         }
     }
     
     if (curr_state == TRACKING)
     {
         Mat R_vec = world_camera_rot.back().clone();
         Mat t_vec = world_camera_pos.back().clone();
         
         bool new_kf_added = false;
         KeypointArray new_kps;
         bool is_lost = !Tracking::TrackMap(frame, keyframes, R_vec, t_vec,
                                            new_kf_added, new_kps);
         
         // Render extracted keypoints to contrast with matched keypoints
         Scalar kpColor = Scalar(255, 255, 0);
         drawKeypoints(img, new_kps, img, kpColor);
         
         if (!is_lost)
         {
             AppendCameraPose(R_vec, t_vec);
         }
         else
         {
             curr_state = LOST;
         }
     }
     
     if (curr_state == LOST)
     {
         // TODO: handle relocalization
     }
 }
예제 #12
0
Mat aplicaBRISK(Mat original, vector<KeyPoint> &keypoints,Mat &descriptor, Mat salida){
    int thresh=65; //calcula el de arriba, los otros no
    int octaves=5;
    float patternScales=1.5f;
    
 
    Ptr<BRISK> detector =BRISK::create(thresh,octaves,patternScales);
    
    detector->detect(original,keypoints);
    detector->compute(original,keypoints,descriptor);

    drawKeypoints(original,keypoints,salida);
    return salida;
}
void akaze_detection(Mat &image){

    vector<KeyPoint> kpts1, kpts2;
    Mat desc1, desc2;

    Ptr<AKAZE> akaze = AKAZE::create();
	
    akaze->detectAndCompute(image, noArray(), kpts1, desc1);
    
    Mat outimg ;
    drawKeypoints(image,kpts1,outimg);	
    imwrite("outimage/akaze_img.png", outimg);

}
//-----------------------------------【main( )函数】--------------------------------------------  
//   描述:控制台应用程序的入口函数,我们的程序从这里开始执行  
//-----------------------------------------------------------------------------------------------  
int main( int argc, char** argv )  
{  
	//【0】改变console字体颜色      
	system("color 2F");      

	//【0】显示帮助文字    
	ShowHelpText( );    

	//【1】载入源图片并显示  
	Mat srcImage1 = imread("C:\\Users\\Public\\Pictures\\Sample Pictures\\Koala.jpg", 1 );  
	Mat srcImage2 = imread("C:\\Users\\Public\\Pictures\\Sample Pictures\\Koala.jpg", 1 );  
	if( !srcImage1.data || !srcImage2.data )//检测是否读取成功  
	{ printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }   
	imshow("原始图1",srcImage1);  
	imshow("原始图2",srcImage2);  

	//【2】定义需要用到的变量和类  
	int minHessian = 400;//定义SURF中的hessian阈值特征点检测算子  
	SurfFeatureDetector detector( minHessian );//定义一个SurfFeatureDetector(SURF) 特征检测类对象  
	std::vector<KeyPoint> keypoints_1, keypoints_2;//vector模板类是能够存放任意类型的动态数组,能够增加和压缩数据  

	//【3】调用detect函数检测出SURF特征关键点,保存在vector容器中  
	detector.detect( srcImage1, keypoints_1 );  
	detector.detect( srcImage2, keypoints_2 );  

	//【4】绘制特征关键点  
	Mat img_keypoints_1; Mat img_keypoints_2;  
	drawKeypoints( srcImage1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );  
	drawKeypoints( srcImage2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );  

	//【5】显示效果图  
	imshow("特征点检测效果图1", img_keypoints_1 );  
	imshow("特征点检测效果图2", img_keypoints_2 );  

	waitKey(0);  
	return 0;  
}  
예제 #15
0
void TrackingBlobs(const Mat &img)
{
	Mat cimg = img.clone();
	CvSize size = img.size();	

	//binary threshold, val = 235
	threshold(img, cimg, 235, 255, 0);
	medianBlur(cimg, cimg, 5);

	//blob detection
	// set up the parameters (check the defaults in opencv's code in blobdetector.cpp)
	SimpleBlobDetector::Params params;
	params.minDistBetweenBlobs = 150.0f;
	params.filterByInertia = false;
	params.filterByConvexity = false;
	params.filterByColor = false;
	params.filterByCircularity = false;
	params.filterByArea = false;
	params.minArea = 10.0f;
	params.maxArea = 500.0f;

	// set up and create the detector using the parameters
	Ptr<SimpleBlobDetector> blob_detector = SimpleBlobDetector::create(params);

	// detect!
	vector<KeyPoint> keypoints;
	blob_detector->detect(cimg, keypoints);

	// Draw detected blobs as red circles.
	// DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures the size of the circle corresponds to the size of blob
	Mat im_with_keypoints;
	drawKeypoints(cimg, keypoints, im_with_keypoints, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

	//blob center
	// extract the x y coordinates of the keypoints: 
	for (int i = 0; i<keypoints.size(); i++) {
		int X = keypoints[i].pt.x;
		int Y = keypoints[i].pt.y;

		//cout << "Center: " << X << "; " << Y << "\n";
		circle(im_with_keypoints, cvPoint(X, Y), 1, Scalar(255, 0, 0), 2);
	}	

	// Show blobs
	imshow("keypoints", im_with_keypoints);
}
bool CustomPattern::init(Mat& image, const float pixel_size, OutputArray output)
{
    image.copyTo(img_roi);
    //Setup object corners
    obj_corners = std::vector<Point2f>(4);
    obj_corners[0] = Point2f(0, 0); obj_corners[1] = Point2f(img_roi.cols, 0);
    obj_corners[2] = Point2f(img_roi.cols, img_roi.rows); obj_corners[3] = Point2f(0, img_roi.rows);

    if (!detector)   // if no detector chosen, use default
    {
        detector = FeatureDetector::create("ORB");
        detector->set("nFeatures", 2000);
        detector->set("scaleFactor", 1.15);
        detector->set("nLevels", 30);
    }

    detector->detect(img_roi, keypoints);
    if (keypoints.empty())
    {
        initialized = false;
        return initialized;
    }
    refineKeypointsPos(img_roi, keypoints);

    if (!descriptorExtractor)   // if no extractor chosen, use default
        descriptorExtractor = DescriptorExtractor::create("ORB");
    descriptorExtractor->compute(img_roi, keypoints, descriptor);

    if (!descriptorMatcher)
        descriptorMatcher = DescriptorMatcher::create("BruteForce-Hamming(2)");

    // Scale found points by pixelSize
    pxSize = pixel_size;
    scaleFoundPoints(pxSize, keypoints, points3d);

    if (output.needed())
    {
        Mat out;
        drawKeypoints(img_roi, keypoints, out, CV_RGB(255, 0, 0));
        out.copyTo(output);
    }

    initialized = !keypoints.empty();
    return initialized; // initialized if any keypoints are found
}
예제 #17
0
Mat aplicaORB(Mat original, vector<KeyPoint> &keypoints,Mat &descriptor, Mat salida){
    
    int nfeatures=750;
    float scaleFactor=1.3f;
    int nlevels=9;
    int edgeThreshold=31;
    int firstLevel=0;
    int WTA_K=3;
    int scoreType=ORB::HARRIS_SCORE;
    int patchSize=31;
    
    Ptr<ORB> detector =ORB::create(nfeatures,scaleFactor,nlevels,edgeThreshold,firstLevel,WTA_K);
    
    detector->detect(original,keypoints);
    detector->compute(original,keypoints,descriptor);

    drawKeypoints(original,keypoints,salida);
    return salida;
}
예제 #18
0
/*****************************************************************************
    *  @brief    : orbFeatureDetect
    *  @author   : Zhangle
    *  @date     : 2014/9/8 11:17
    *  @version  : ver 1.0
    *  @inparam  :
    *  @outparam :
*****************************************************************************/
void FeatureDetect::orbFeatureDetect(string inputImageName, string outputImageName,string outputTxtName) {
    Mat image = imread(inputImageName);
    Mat descriptors;
    vector<KeyPoint> keypoints;
    ORB orb;
    time_t beginTime = time(NULL);
    orb.detect(image,keypoints);
    time_t endTime = time(NULL);
    time_t runTime = endTime - beginTime;
    drawKeypoints(image,keypoints,image,Scalar(255,255,255));
    imwrite(outputImageName,image);
    ofstream outTxt(outputTxtName);
    outTxt << "ORB" << endl;
    outTxt << "影像尺寸:" << image.cols<<" * "<<image.rows<<endl;
    outTxt << "特征点数目:" << keypoints.size() <<"个"<< endl;
    outTxt << "提取特征点耗费时间:" << runTime << "s"<< endl;
    outTxt << "默认参数设置" << endl;
    outTxt.close();
}
예제 #19
0
Mat ResultWindow::LoadImage (Mat queryImage, const string& path)
{
	Mat image = imread (path);
	vector<KeyPoint> resultKeypoints;
	if (this->DrawMatches)
	{
		bof::BagOfFeatures& bagOfFeatures = this->configuration.GetBagOfFeatures();
		if (resultKeypoints.size() == 0)
		{
			bagOfFeatures.GetFeatureDetector()->detect (image, resultKeypoints);
		}
		vector<KeyPoint> queryKeypoints;
		bagOfFeatures.GetFeatureDetector()->detect (queryImage, queryKeypoints);

		Mat queryDescriptors, resultDescriptors;
		bagOfFeatures.GetDescriptorExtractor()->compute (queryImage, queryKeypoints, queryDescriptors);
		bagOfFeatures.GetDescriptorExtractor()->compute (image, resultKeypoints, resultDescriptors);

		vector<cv::DMatch> matches;
		bagOfFeatures.GetDescriptorMatcher()->match(queryDescriptors, resultDescriptors, matches);

		if (matches.size() > this->NumMatches)
		{
			// keeping only the strongest matches
			std::nth_element(matches.begin(), matches.begin() + this->NumMatches - 1, matches.end());
			matches.erase(matches.begin() + this->NumMatches, matches.end());
		}

		int flag = this->DrawKeypoints ? DrawMatchesFlags::DEFAULT : DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS;

		cv::Mat imageMatches;
		cv::drawMatches(queryImage, queryKeypoints, image, resultKeypoints, matches, imageMatches, cv::Scalar(0,255,255), cv::Scalar::all(-1), vector<char>(), flag);
		image = imageMatches;
	}
	else if (this->DrawKeypoints)
	{
		this->configuration.GetBagOfFeatures().GetFeatureDetector()->detect (image, resultKeypoints);
		drawKeypoints (image, resultKeypoints, image, cv::Scalar(0,255,255), cv::DrawMatchesFlags::DEFAULT);
	}
	return image;
}
예제 #20
0
파일: lk.cpp 프로젝트: azaganidis/rgbdpro
void loadFeaturesRGB(BoWFeatures &features)
{
    features.clear();
    features.reserve(files_list_rgb.size());
    cv::SURF surf(500, 4, 2, true);
    
    double acc_media = 0,media=0,scarti=0,varianza=0;
    DUtilsCV::GUI::tWinHandler win = "SURF";
    
    for (int i = 0; i < files_list_rgb.size(); ++i) {
        cout << "Estrazione SURF: " << files_list_rgb[i];
        clock_t begin = clock();
        
        cv::Mat image = cv::imread(files_list_rgb[i]);
        cv::Mat mask,outImg;
        vector<cv::KeyPoint> keypoints;
        vector<float> descriptors;
        surf(image, mask, keypoints, descriptors);
        drawKeypoints(image, keypoints, outImg );
        DUtilsCV::GUI::showImage(outImg, true, &win, 10);
        features.push_back(vector<vector<float> >());
        changeStructure(descriptors, features.back(), surf.descriptorSize());
        
     
        clock_t end = clock();
        double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
        media = media + elapsed_secs;
        acc_media = media / (i+1);
        cout << "media: " << acc_media<<endl;
        scarti += pow(elapsed_secs-acc_media,2);
        varianza = sqrt(scarti/(i+1));
        cout << "varianza: " << varianza<<endl; 
        
        cout << ". Estratti " << features[i].size() << " descrittori." << endl;
        descriptors.clear();
        keypoints.clear();
        mask.release();
        image.release();
    }
    cout << "Estrazione terminata." << endl;
}
예제 #21
0
void CImageDoc::OnFeaturesSift()
{
	// TODO: Add your command handler code here
	if (m_image.empty())
	{
		return;
	}

	Mat mask;

	SIFT sift;

	sift(m_image, mask, m_pFeatures->key_points, m_pFeatures->descriptors);

	drawKeypoints(m_image, m_pFeatures->key_points, m_imgHandled);

	m_pFeatures->type = Feature_SIFT;

	m_pImgView->m_pImage = &m_imgHandled;
	m_pImgView->m_bShowHandled = TRUE;
	m_pImgView->Invalidate(TRUE);
}
예제 #22
0
vector <detection> detectorBlob::detect(cv::Mat frame)
{

        vector<cv::KeyPoint> keypoints;
        blobDetector.detect(frame, keypoints);

        vector<struct detection> detections;
        detection temp;
        
        // drawKeypoints
        drawKeypoints( frame, keypoints, mask, cv::Scalar(0,0,255), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );

        // extract centroid and bbox
        for(int i =0; i < keypoints.size();i ++)
        {
                temp.centroid   = keypoints[i].pt;
                temp.bbox       = cv::Rect(temp.centroid.x,temp.centroid.y,keypoints[i].size,keypoints[i].size);                        
                detections.push_back(temp);

        }


        return detections;
}
예제 #23
0
void ObjWidget::paintEvent(QPaintEvent *event)
{
	if(graphicsViewMode_->isChecked())
	{
		QWidget::paintEvent(event);
	}
	else
	{
		if(!pixmap_.isNull())
		{
			//Scale
			float ratio, offsetX, offsetY;
			this->computeScaleOffsets(ratio, offsetX, offsetY);
			QPainter painter(this);

			if(mirrorView_->isChecked())
			{
				painter.translate(offsetX+pixmap_.width()*ratio, offsetY);
				painter.scale(-ratio, ratio);
			}
			else
			{
				painter.translate(offsetX, offsetY);
				painter.scale(ratio, ratio);
			}

			if(showImage_->isChecked())
			{
				painter.drawPixmap(QPoint(0,0), pixmap_);
			}

			if(showFeatures_->isChecked())
			{
				drawKeypoints(&painter);
			}

			for(int i=0; i<rectItems_.size(); ++i)
			{
				painter.save();
				painter.setTransform(rectItems_.at(i)->transform(), true);
				painter.setPen(rectItems_.at(i)->pen());
				painter.drawRect(rectItems_.at(i)->rect());
				painter.restore();
			}

			if(mouseCurrentPos_ != mousePressedPos_)
			{
				painter.save();
				int left, top, right, bottom;
				left = mousePressedPos_.x() < mouseCurrentPos_.x() ? mousePressedPos_.x():mouseCurrentPos_.x();
				top = mousePressedPos_.y() < mouseCurrentPos_.y() ? mousePressedPos_.y():mouseCurrentPos_.y();
				right = mousePressedPos_.x() > mouseCurrentPos_.x() ? mousePressedPos_.x():mouseCurrentPos_.x();
				bottom = mousePressedPos_.y() > mouseCurrentPos_.y() ? mousePressedPos_.y():mouseCurrentPos_.y();
				if(mirrorView_->isChecked())
				{
					int l = left;
					left = qAbs(right - pixmap_.width());
					right = qAbs(l - pixmap_.width());
				}
				painter.setPen(Qt::NoPen);
				painter.setBrush(QBrush(QColor(0,0,0,100)));
				painter.drawRect(0, 0, pixmap_.width(), top);
				painter.drawRect(0, top, left, bottom-top);
				painter.drawRect(right, top, pixmap_.width()-right, bottom-top);
				painter.drawRect(0, bottom, pixmap_.width(), pixmap_.height()-bottom);
				painter.restore();
			}
		}
	}
}
예제 #24
0
void CvFeature::drawKeypoints(cv::Mat &canvas)
{
	drawKeypoints(canvas, keypoints_);
}
예제 #25
0
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}
예제 #26
0
void TrackFace::on_drawKeypoints_clicked()
{
    int nFeatures=128;
    TrackFace::capture.open(0);

    string windowName="Draw Keypoints";
    cv::namedWindow(windowName.c_str(), cv::WINDOW_AUTOSIZE);
    cv::moveWindow(windowName.c_str(), window_x, window_y);

    featureExtractor_state=SIFT_MODE;

    while (true)
    {
        cv::Mat frame, buffer;
        if (!capture.isOpened()) break;

        capture >> buffer;
        cv::resize(buffer, frame,Size(buffer.cols/2,buffer.rows/2),0,0,INTER_LINEAR);
        setMouseCallback(windowName.c_str(), drawKeypointsCallBack, NULL);

        switch(featureExtractor_state)
        {
        case SIFT_MODE:
        {
            SiftFeatureDetector detector( nFeatures );
            std::vector<KeyPoint> keypoints;

            detector.detect(frame, keypoints);
            cv::Mat img_keypoints;
            drawKeypoints(frame, keypoints, img_keypoints, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
            putText(img_keypoints, "SIFT MODE, right click to SURF MODE", Point(10, 20), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0),2.0);

            imshow(windowName.c_str(), img_keypoints);

            break;
        }
        case SURF_MODE:
        {
            SurfFeatureDetector detector( nFeatures );
            std::vector<KeyPoint> keypoints;

            detector.detect(frame, keypoints);
            cv::Mat img_keypoints;
            drawKeypoints(frame, keypoints, img_keypoints, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

            putText(img_keypoints, "SURF MODE, left click to SIFT MODE", Point(10, 20), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0),2.0);

            imshow(windowName.c_str(), img_keypoints);

            break;
        }
        default: break;
        }

        while (cv::waitKey(100)==27)
        {
            capture.release();
            cv::destroyWindow(windowName.c_str());
        }
    }
}
예제 #27
0
//TODO:FlannIndexEstimate Debug
bool YawAngleEstimator::Estimate(Mat& CurrentFrame,float& CurrentAngle)
{
	printf("YawAngleEstimator:estimate\n");

	Mat TempFrame = CurrentFrame.clone();
	if (TempFrame.channels() == 3)
		cvtColor(TempFrame, TempFrame, CV_BGR2GRAY);

	vector<Mat> MatchingImg(1, TempFrame);
	vector<vector<KeyPoint>> CurrentKp(1, vector<KeyPoint>());
	vector<Mat> CurrentDescriptors(1,Mat());
	float* CurrentVote = new float[AngleNum];
	float maxVote=0;
	//Extract current feature
	featureExtract(MatchingImg, CurrentKp, CurrentDescriptors, Feature);

	//draw Keypoints on frame and record
	drawKeypoints(CurrentFrame, CurrentKp[0], CurrentFrame);
	fss <<"\nKeypoints number of CurrentFrame is "<<CurrentDescriptors[0].rows<<endl;

		printf("Estimate::The Number of keypoints is:%d\n", CurrentDescriptors[0].rows);

	if (useIndex)
		Indexmatch(CurrentDescriptors[0], CurrentVote);
	else
		BFmatch(CurrentDescriptors[0], CurrentVote);

	if (FramesVote.size() < FrameNum)
		FramesVote.push_back(CurrentVote);
	else//caculate FinalVote for each angle from last frame's vote
	{
		float* ptrTemp = FramesVote.front();
		FramesVote.pop_front();
		FramesVote.push_back(CurrentVote);
		delete ptrTemp;

		for (int i = 0; i < FrameNum; i++)
		{
			for (int j = 0; j < AngleNum; j++)
			{
				FinalVote[j] = FramesVote[i][j] * VoteWeight[i];
				
				if (FinalVote[j] > maxVote)
				{
					maxVote = FinalVote[j];
					AngleIndex = j;
				}
			}
		}

		for (int i = 0; i < AngleNum; i++)
		{
			cout << "Final Vote for template " << i << " is " << FinalVote[i] << endl;
			
			//record
			fss << "Final Vote for template " << i << " is " << FinalVote[i] << endl;
		}
	}
	if (AngleIndex == -1)
	{
		printf("Cannot estimate right now!\n");
		return false;
	}
	else
	{
		CurrentAngle = Angle[AngleIndex];
		printf("The max vote of angle is: %f\n", maxVote);
		return true;
	}
}
예제 #28
0
  RecognitionResult FeatureMatcher::recognize( const Mat& queryImg, bool drawOnImage, Mat* outputImage,
      bool debug_on, vector<int> debug_matches_array
                                             )
  {
    RecognitionResult result;

    result.haswinner = false;
    result.confidence = 0;

    Mat queryDescriptors;
    vector<KeyPoint> queryKeypoints;

    detector->detect( queryImg, queryKeypoints );
    extractor->compute(queryImg, queryKeypoints, queryDescriptors);

    if (queryKeypoints.size() <= 5)
    {
      // Cut it loose if there's less than 5 keypoints... nothing would ever match anyway and it could crash the matcher.
      if (drawOnImage)
      {
        drawKeypoints(  queryImg, queryKeypoints, *outputImage, CV_RGB(0, 255, 0), DrawMatchesFlags::DEFAULT );
      }
      return result;
    }

    vector<DMatch> filteredMatches;

    surfStyleMatching( queryDescriptors, queryKeypoints, filteredMatches );

    // Create and initialize the counts to 0
    std::vector<int> bill_match_counts( billMapping.size() );

    for (unsigned int i = 0; i < billMapping.size(); i++)
    {
      bill_match_counts[i] = 0;
    }

    for (unsigned int i = 0; i < filteredMatches.size(); i++)
    {
      bill_match_counts[filteredMatches[i].imgIdx]++;
      //if (filteredMatches[i].imgIdx
    }

    float max_count = 0;	// represented as a percent (0 to 100)
    int secondmost_count = 0;
    int maxcount_index = -1;
    for (unsigned int i = 0; i < billMapping.size(); i++)
    {
      if (bill_match_counts[i] > max_count && bill_match_counts[i] >= 4)
      {
        secondmost_count = max_count;
        if (secondmost_count <= 2) 	// A value of 1 or 2 is effectively 0
          secondmost_count = 0;

        max_count = bill_match_counts[i];
        maxcount_index = i;
      }
    }

    float score = ((max_count - secondmost_count - 3) / 10) * 100;
    if (score < 0)
      score = 0;
    else if (score > 100)
      score = 100;

    if (score > 0)
    {
      result.haswinner = true;
      result.winner = billMapping[maxcount_index];
      result.confidence = score;

      if (drawOnImage)
      {
        vector<KeyPoint> positiveMatches;
        for (unsigned int i = 0; i < filteredMatches.size(); i++)
        {
          if (filteredMatches[i].imgIdx == maxcount_index)
          {
            positiveMatches.push_back( queryKeypoints[filteredMatches[i].queryIdx] );
          }
        }

        Mat tmpImg;
        drawKeypoints(  queryImg, queryKeypoints, tmpImg, CV_RGB(185, 0, 0), DrawMatchesFlags::DEFAULT );
        drawKeypoints(  tmpImg, positiveMatches, *outputImage, CV_RGB(0, 255, 0), DrawMatchesFlags::DEFAULT );

        if (result.haswinner)
        {
          std::ostringstream out;
          out << result.winner << " (" << result.confidence << "%)";

          // we detected a bill, let the people know!
          //putText(*outputImage, out.str(), Point(15, 27), FONT_HERSHEY_DUPLEX, 1.1, CV_RGB(0, 0, 0), 2);
        }
      }
    }

    if (this->config->debugStateId)
    {
      for (unsigned int i = 0; i < billMapping.size(); i++)
      {
        cout << billMapping[i] << " : " << bill_match_counts[i] << endl;
      }
    }

    return result;
  }
void blob_main(sample_loc &s_loc)
{
    /*
	OpenCV defines HSV colors by the following ranges: 
	H: 0-180, S: 0-255, V: 0-255
	*/

	// hsvParams hsvWhite = {20,0,0,180,80,255}; // original
	hsvParams hsvWhite = {0, 0, 230, 180, 20, 255}; // edited
    hsvParams hsvPurple = {80,60,0,130,255,255};

    hsvParams hsv = hsvWhite; // s_loc.whiteSample==true? hsvWhite:hsvPurple;

    //Set up blob detection parameters
    SimpleBlobDetector::Params params = setupObjectBlobParams();

    vector<KeyPoint> keypoints;

    // const string filename("/home/buckeye/catkin_ws/src/CapstoneROS/src/vision/samplePics/25ft3.jpg");
    //Initialize camera
/* 
    VideoCapture cap(0);
    if ( !cap.isOpened() ){
        cout << "Cannot open the web cam" << endl;
        return;
    }
*/
    while(true){
      for(int n=1; n=4; n++)
	{
	  stringstream ss;
	  ss << n;
	  string num = ss.str();
        Mat img, imgHSV, imgTHRESH, out;
		/* img = imread(filename, CV_LOAD_IMAGE_COLOR); */
		
    	// cap>>img;
		img = imread("samplePics/pic"+num+".jpg", CV_LOAD_IMAGE_COLOR);
        
        if(img.empty()){
            cout << "can not open image" << endl;
	    s_loc.sample_not_found=true;
            return;
        }

        //convert color to HSV, threshold and remove noise
        cvtColor(img, imgHSV, COLOR_BGR2HSV);
        findGrass(img,imgHSV);
        cvtColor(img, imgHSV, COLOR_BGR2HSV);

        inRange(imgHSV, Scalar(hsv.hL, hsv.sL, hsv.vL), Scalar(hsv.hH, hsv.sH, hsv.vH), imgTHRESH);
        removenoise(imgTHRESH);

        namedWindow("Input", WINDOW_AUTOSIZE);
        namedWindow("Detection", WINDOW_AUTOSIZE);
	
        Ptr<SimpleBlobDetector> blobDetect = SimpleBlobDetector::create(params);
        blobDetect->detect( imgTHRESH, keypoints );

        drawKeypoints(imgTHRESH, keypoints, out, CV_RGB(0,0,255), DrawMatchesFlags::DEFAULT);

        /* Circle blobs
        for(int i = 0; i < keypoints.size(); i++)
            circle(out, keypoints[i].pt, 1.5*keypoints[i].size, CV_RGB(0,255,0), 20, 8);
        */

        // Find largest keypoint blob, and use that in determining angle and distance
        if(keypoints.size() >= 1){
			int index = 0;
			for (int i = 0; i < keypoints.size(); i++){
				if( keypoints[i].size > keypoints[index].size ) { 
					index = i;
				}
			}
            cout<<endl<<endl<<"Object Found"<<endl;
            tilt_turn_degrees(imgTHRESH, keypoints[index].pt.y, keypoints[index].pt.x, &s_loc);
	    	robot_angle(&s_loc, imgTHRESH, keypoints[index].pt.x);
        }
        else{
            cout<<"No Object Found"<<endl;
        }

        imshow("Input", img);
        // imwrite("exampleOfFindGrass.jpg", img);
        imshow("Detection", out);
        // imwrite("showingKeypoints.jpg", out);
        waitKey(-1);
	}
    }
  }
int beaconpics_main(struct beacon_loc orientation)
{
    int thresh=140;
    namedWindow("Original 1", WINDOW_NORMAL);
    namedWindow("Original 2", WINDOW_NORMAL);
    namedWindow("Original 3", WINDOW_NORMAL);
    namedWindow("Diff", WINDOW_NORMAL);

    //hsvParams hsv = {76,0,224,97,37,255};
    hsvParams hsv = {20,0,0,97,37,255};

    //Set up blob detection parameters
    SimpleBlobDetector::Params params;
// params.blobColor //can we use this???
// params.minDistBetweenBlobs = 50.0f;
    params.filterByInertia = true;
    params.filterByConvexity = false;
    params.filterByColor = false;
    params.filterByCircularity = false;
    params.filterByArea = true;

    params.minThreshold = 150;
    params.maxThreshold = 255;
    params.thresholdStep = 1;

    params.minArea = 0;
    params.minConvexity = 0.3;
    params.minInertiaRatio = 0.10;

    params.maxArea = 2000;
    params.maxConvexity = 10;


    vector<KeyPoint> keypoints;

    VideoCapture cap(0); //capture the video from web cam

    if ( !cap.isOpened() )  // if not success, exit program
    {
        cout << "Cannot open the web cam" << endl;
        return -1;
    }

    while(true) {

        Mat imgOriginal1 = getPic(cap);
        Mat imgOriginal2 = getPic(cap);
        Mat imgOriginal3 = getPic(cap);

        Mat imgHSV1,imgHSV2, imgHSV3;

        if(imgOriginal1.empty() || imgOriginal2.empty() || imgOriginal3.empty())
        {
            cout << "can not open " << endl;
            return -1;
        }

        Mat diff;
        absdiff(imgOriginal1,imgOriginal2,diff);
        cvtColor(diff, diff, COLOR_BGR2GRAY); //Convert the captured

        threshold(diff, diff, thresh, 255, cv::THRESH_BINARY);
        dilate(diff, diff, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

        //opencv 3.0 version
        //detect beacon blobs between pictures 1&2
        Ptr<SimpleBlobDetector> blobDetect = SimpleBlobDetector::create(params);
        blobDetect->detect( diff, keypoints );
        cout<<keypoints.size()<<endl;
        //detect blobs between images 2&3
        if(keypoints.size() ==0) {
            absdiff(imgOriginal2,imgOriginal3,diff);
            cvtColor(diff, diff, COLOR_BGR2GRAY); //Convert the captured

            threshold(diff, diff, thresh, 255, cv::THRESH_BINARY);
            dilate(diff, diff, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

            blobDetect = SimpleBlobDetector::create(params);
            blobDetect->detect( diff, keypoints );
        }
        cout<<keypoints.size()<<endl;

        Mat out;
        drawKeypoints(diff, keypoints, out, CV_RGB(0,0,0), DrawMatchesFlags::DEFAULT);
        /*//finding if things are green or red
        cvtColor(out, out, COLOR_BGR2HSV);
        inRange(out, Scalar(hsv.hL, hsv.sL, hsv.vL),
               Scalar(hsv.hH, hsv.sH, hsv.vH), out);
        blobDetect.detect( out, keypoints );
        drawKeypoints(out, keypoints, out, CV_RGB(0,0,0), DrawMatchesFlags::DEFAULT);

        for(int i=0;i<diff.rows;i++){
           for(int j=0;j<diff.cols;j++){
                  if(out.at<Vec3b>(i,j)[0]==0 && out.at<Vec3b>(i,j)[1]==0 && out.at<Vec3b>(i,j)[2]==0){
                      imgOriginalON.at<Vec3b>(i,j)=(0,0,0);
                  }
             }
          }
          inRange(imgOriginalON, Scalar(hsv.hL, hsv.sL, hsv.vL),
               Scalar(hsv.hH, hsv.sH, hsv.vH), out);
          blobDetect.detect( out, keypoints );
          drawKeypoints(out, keypoints, out, CV_RGB(0,0,0), DrawMatchesFlags::DEFAULT);
          */

        //Circle blobs
        for(int i = 0; i < keypoints.size(); i++)
        {
            if(keypoints[i].size>0)
                circle(out, keypoints[i].pt, 1.5*keypoints[i].size, CV_RGB(0,255,0), 1, 8);
        }
        string text;
        if(keypoints.size() == 4)
        {
            text = "Object Found";
            cout<<endl<<endl<<"Object Found"<<endl;
            Point cent;
            cent=findkeyPoint(keypoints);
//     cout<<"dist: "<<printDistanceFromLights(keypoints)<<endl;
            circle(out, cent, 5, CV_RGB(0,100,0), -1, 8);
            robot_angle(diff, cent.y, cent.x, 1);
        }
        else
        {
            text = "Error";
            cout<<endl<<endl<<"No Object Found"<<endl;
            //	while(keypoints.size() > 2)
            //	   thresh+=5;
        }
        imshow("Original 1", imgOriginal1); //show the original image
        imshow("Original 2", imgOriginal2); //show the original image
        imshow("Original 3", imgOriginal3); //show the original image
        imshow("Diff", out);
        waitKey(-1);
    }
    return 0;
}