void build_model() {
			vector<Mat> trainDesc;
        		FeatureDetector *detector = new SurfFeatureDetector();
        		DescriptorExtractor *extractor = new SurfDescriptorExtractor();

        		// Generate descriptors from the image db
        		fs::path p = fs::system_complete(IMAGE_DATABASE);
			fs::directory_iterator end_iter;
			for(fs::directory_iterator dir_itr(p); dir_itr != end_iter; ++dir_itr) {
                		string img_name(dir_itr->path().string());
                		Mat img = imread(img_name, CV_LOAD_IMAGE_GRAYSCALE);
				
				// feature extraction
                		vector<KeyPoint> keypoints;
				detector->detect(img, keypoints);

				// feature description
				Mat descriptor;
				extractor->compute(img, keypoints, descriptor);
                		trainDesc.push_back(descriptor);
                		imgNames.push_back(img_name);
			}

			// train the model
			matcher->add(trainDesc);
			matcher->train();

			// Clean up
        		delete detector;
        		delete extractor;
		}
Пример #2
0
void Processor::detectAndDrawFeatures(int input_idx, image_pool* pool,int feature_type) {
	FeatureDetector* fd = 0;

	switch (feature_type) {
	case DETECT_SURF:
		fd = &surfd;
		break;
	case DETECT_FAST:
		detectAndDrawCircles( input_idx, pool );
		return;
		//fd = &fastd;
		//break;
	case DETECT_STAR:
		fd = &stard;
		break;
	}

	Mat greyimage;
	pool->getGrey(input_idx, greyimage);
	//Mat* grayimage = pool->getYUV(input_idx);

	Mat* img = pool->getImage(input_idx);

	if (!img || greyimage.empty() || fd == 0)
		return; //no image at input_idx!


	keypoints.clear();

	//if(grayimage->step1() > sizeof(uchar)) return;
	//cvtColor(*img,*grayimage,CV_RGB2GRAY);


	fd->detect(greyimage, keypoints);

	for (vector<KeyPoint>::const_iterator it = keypoints.begin(); it
			!= keypoints.end(); ++it) {
		circle(*img, it->pt, 3, cvScalar(255, 0, 255, 0));
	}

	//pool->addImage(output_idx,outimage);

}
Пример #3
0
int SfM_Frame::detectKeypoints(void)
{
    string                  fd_name = "SURF";
    double                  SURF_minHessian = 50;

    FeatureDetector         *detector;
    SurfDescriptorExtractor *extractor;

    // load parameters
    fd_name = svar.GetString("kp_detector", "SURF");
    SURF_minHessian = svar.GetInt("kp_SURF_minHessian", 50);

    /////////////////////////////////////////////////
    /// create feature detector
    /////////////////////////////////////////////////
    if( fd_name == "SURF" ) {
        detector = new SurfFeatureDetector( SURF_minHessian );
    } else if ( fd_name == "FAST" ) {
        detector = new FastFeatureDetector();
    } else if ( fd_name == "PyramidFAST" )  {
        //detector = FeatureDetector::create("PyramidFAST");
    } else if ( fd_name == "SIFT" ) {
        detector = new SiftFeatureDetector;
    }

    extractor  = new SurfDescriptorExtractor(48, 12, true);


    detector->detect(imgGray, kpRaw);
    extractor->compute(imgGray, kpRaw, kpDesc);


    delete extractor;
    delete detector;

    return 0;
}
		void match_img(string &query_img) {
			
			// save the query image into local disk
                        // gettimeofday(&tp, NULL);
                        // long int timestamp = tp.tv_sec * 1000000 + tp.tv_usec;
			// ostringstream sstream;
			// sstream << timestamp;
                        // string image_path = "input-" + sstream.str() + ".jpg";
                        // ofstream imagefile(image_path.c_str(), ios::binary);
                        // imagefile.write(query_img.c_str(), query_img.size());
                        // imagefile.close();
			cout << "image query is " << query_img << endl;
			string image_path = query_img;
			
			
                        gettimeofday(&tv1, NULL);
			// feature extraction
                        Mat imgInput = imread(image_path, CV_LOAD_IMAGE_GRAYSCALE);
                        vector<KeyPoint> features;
                        // gettimeofday(&tv1, NULL);
                        detector->detect(imgInput, features);
                        // gettimeofday(&tv2, NULL);

			 // feature description
                        Mat descriptors;
                        // gettimeofday(&tv1, NULL);
                        extractor->compute(imgInput, features, descriptors);
                        descriptors.convertTo(descriptors, CV_32F);
                        // gettimeofday(&tv2, NULL);

			// image matching
			// gettimeofday(&tv1, NULL);
			string response = exec_match(descriptors, MATCHING_METHOD);
			gettimeofday(&tv2, NULL);

			long int runtimematching = (tv2.tv_sec - tv1.tv_sec) * 1000000 + (tv2.tv_usec - tv1.tv_usec);
			cout << "The matching image is " << response << endl;
			cout << "Image Matching Time: " << fixed << setprecision(2) << (double)runtimematching / 1000 << "(ms)" << endl;
		}
Пример #5
0
int main(){
	thread t(waitForKeyPress);

	list<CvPoint> opticalFlowVectors;

	//set up communicator	
	Communicator* comm = new Communicator(512, "192.168.2.3", 9002, "*", 9000);

	//receive size of one image
	char frameWidthBuf[3];
	char frameHeightBuf[3];
	comm->recv(frameWidthBuf, 3, 0);
	comm->recv(frameHeightBuf, 3, 0);
	//extract data
	int frameWidth = atoi(frameWidthBuf);
	int frameHeight = atoi(frameHeightBuf);
	int frameSize = frameWidth*frameHeight;

	cout << frameSize << endl;

	//declare frames
	Mat frame1(frameWidth,frameHeight,CV_8UC1);
	Mat frame2(frameWidth,frameHeight,CV_8UC1);

	//second get the image
	comm->recv(frame1.data, frameSize, 0);

	//build pyramid for frame 1
	buildOpticalFlowPyramid(frame1, pyramid1, cvSize(pyrWindowSize,pyrWindowSize), 3);


	//start optical flow algorithm
	cout << "Started optical flow algorithm." << endl;
	high_resolution_clock::time_point t1 = high_resolution_clock::now();
	int iter = 0;
    mtx.lock();
    while(loop)
    {
    	mtx.unlock();
    	
    	//recv frame 2
		comm->recv(frame2.data, frameSize, 0);

		FeatureDetector* detector = new FastFeatureDetector(FASTThreshold,true);
		detector->detect(frame1, KeyPointVector);
		delete detector;

		if(KeyPointVector.size() > 30)
			FASTThreshold++;
		else 
			FASTThreshold--;

		//build pyramid for frame 2
		buildOpticalFlowPyramid(frame2, pyramid2, cvSize(pyrWindowSize,pyrWindowSize), 3);
		KeyPoint::convert(KeyPointVector, pointVector1);

		//run Lucas Kanade optical flow if features have been found
		if(KeyPointVector.size() > 0)
		{
			calcOpticalFlowPyrLK(pyramid1, pyramid2, pointVector1,
			 pointVector2, featuresFound, featuresError,
			 cvSize(pyrWindowSize,pyrWindowSize), 0,
			 cvTermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 10, 0.2),
			 0,0.0001);
		}


		Mat frame3;
		cvtColor(frame2, frame3, CV_GRAY2RGB);

		for(int i=0; i < pointVector1.size(); i++){
			if(featuresFound[i]==0 || featuresError[i]>50)
			{
				//printf("Error is: %f\n",featuresError[i]);
				
			} else {
				CvPoint p0 = cvPoint(
						cvRound(pointVector1[i].x),
						cvRound(pointVector1[i].y));

				CvPoint p1 = cvPoint(
						cvRound(pointVector2[i].x),
						cvRound(pointVector2[i].y));

				line(frame3, p0, p1, CV_RGB(255,0,0), 1, 8, 0);
			}
		}

		ostringstream fileName2;
		fileName2 << "flightphoto/flow" << iter <<".jpg";
		imwrite(fileName2.str(), frame3);

		//store pyramid 2 in pyramid 1
		frame1 = frame2.clone();
		pyramid1.swap(pyramid2);

		//find the average displacement
		displacement = trajectoryCalc(pointVector1, pointVector2, featuresFound,
		 featuresError, KeyPointVector.size());
		//Compensate angle: must be done on RPI

		char xBuf[4]; char yBuf[4];
		int xBufLen = sprintf(xBuf, "%d", displacement.x);
		int yBufLen = sprintf(yBuf, "%d", displacement.y);
		comm->send(xBuf,xBufLen,ZMQ_NOBLOCK);
		comm->send(yBuf,yBufLen,ZMQ_NOBLOCK);

		opticalFlowVectors.push_back(displacement);
		mtx.lock();
		iter ++;
	}
	t.join();

	high_resolution_clock::time_point t2 = high_resolution_clock::now();
	duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
	double fps = ((double)opticalFlowVectors.size())/time_span.count();

	ofstream myFile;
	myFile.open ("opticalFlow.txt");	

	myFile << "FPS: \t" << fps << endl;

	for (list<CvPoint>::iterator it=opticalFlowVectors.begin(); it!=opticalFlowVectors.end(); ++it){
		  myFile << "x:\t" << it->x << "\ty:\t" << it->y << endl;
	}

  	myFile.close();		

}
Пример #6
0
int main(){
	thread t(waitForKeyPress);

	list<CvPoint> opticalFlowVectors;

	//set up communicator	
	Communicator* comm = new Communicator(512, "192.168.2.3", 9002, "*", 9000);

	//receive size of one image
	char frameWidthBuf[3];
	char frameHeightBuf[3];
	comm->recv(frameWidthBuf, 3, 0);
	comm->recv(frameHeightBuf, 3, 0);
	//extract data
	int frameWidth = atoi(frameWidthBuf);
	int frameHeight = atoi(frameHeightBuf);
	int frameSize = frameWidth*frameHeight;

	//declare frames
	Mat frame1(frameWidth,frameHeight,CV_8UC1);
	Mat frame2(frameWidth,frameHeight,CV_8UC1);

	//second recv the first frame

	//recv the size of the encoded frame
	char encSizeBuf[6];
	comm->recv(encSizeBuf, 6, 0);
	int encSize = atoi(encSizeBuf);

	vector<uchar> enc = vector<uchar>(encSize);

	//recv the encoded frame
	comm->recv(&enc[0], encSize, 0);

	//decode the frame
	qlz_state_decompress *state_decompress = (qlz_state_decompress *)malloc(sizeof(qlz_state_decompress));
	qlz_decompress((const char*) &enc[0], (char*) frame1.data, state_decompress);


	//build pyramid for frame 1
	buildOpticalFlowPyramid(frame1, pyramid1, cvSize(pyrWindowSize,pyrWindowSize), 3);


	//start optical flow algorithm
	cout << "Started optical flow algorithm." << endl;
	high_resolution_clock::time_point t1 = high_resolution_clock::now();
    mtx.lock();
    while(loop)
    {
    	mtx.unlock();

    	//recv frame 2		
		//recv the size of the encoded frame
		comm->recv(encSizeBuf, 6, 0);
		encSize = atoi(encSizeBuf);

		enc = vector<uchar>(encSize);

		//recv the encoded frame
		comm->recv(&enc[0], encSize, 0);

		//uncompress recv data
		qlz_decompress((const char*) &enc[0], (char*) frame2.data, state_decompress);
		

		FeatureDetector* detector = new FastFeatureDetector(FASTThreshold,true);
		detector->detect(frame1, KeyPointVector);
		delete detector;

		if(KeyPointVector.size() > 30)
			FASTThreshold++;
		else 
			FASTThreshold--;

		//build pyramid for frame 2
		buildOpticalFlowPyramid(frame2, pyramid2, cvSize(pyrWindowSize,pyrWindowSize), 3);
		KeyPoint::convert(KeyPointVector, pointVector1);

		//run Lucas Kanade optical flow if features have been found
		if(KeyPointVector.size() > 0)
		{
			calcOpticalFlowPyrLK(pyramid1, pyramid2, pointVector1,
			 pointVector2, featuresFound, featuresError,
			 cvSize(pyrWindowSize,pyrWindowSize), 0,
			 cvTermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 10, 0.2),
			 0,0.0001);
		}

		//store pyramid 2 in pyramid 1
		frame1 = frame2.clone();
		pyramid1.swap(pyramid2);

		//find the average displacement
		displacement = trajectoryCalc(pointVector1, pointVector2, featuresFound,
		 featuresError, KeyPointVector.size());
		//Compensate angle: must be done on RPI

		char xBuf[4]; char yBuf[4];
		int xBufLen = sprintf(xBuf, "%d", displacement.x);
		int yBufLen = sprintf(yBuf, "%d", displacement.y);
		comm->send(xBuf,xBufLen,ZMQ_NOBLOCK);
		comm->send(yBuf,yBufLen,ZMQ_NOBLOCK);

		opticalFlowVectors.push_back(displacement);
		mtx.lock();
	}
	t.join();

	high_resolution_clock::time_point t2 = high_resolution_clock::now();
	duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
	double fps = ((double)opticalFlowVectors.size())/time_span.count();

	ofstream myFile;
	myFile.open ("opticalFlow.txt");	

	myFile << "FPS: \t" << fps << endl;

	for (list<CvPoint>::iterator it=opticalFlowVectors.begin(); it!=opticalFlowVectors.end(); ++it){
		  myFile << "x:\t" << it->x << "\ty:\t" << it->y << endl;
	}

  	myFile.close();		

  	free(state_decompress);
}