static void calcKeyPointProjections( const vector<KeyPoint>& src, const Mat_<double>& H, vector<KeyPoint>& dst )
{
    if(  !src.empty() )
    {
        CV_Assert( !H.empty() && H.cols == 3 && H.rows == 3);
        dst.resize(src.size());
        vector<KeyPoint>::const_iterator srcIt = src.begin();
        vector<KeyPoint>::iterator       dstIt = dst.begin();
        for( ; srcIt != src.end(); ++srcIt, ++dstIt )
        {
            Point2f dstPt = applyHomography(H, srcIt->pt);

            float srcSize2 = srcIt->size * srcIt->size;
            Mat_<double> M(2, 2);
            M(0,0) = M(1,1) = 1./srcSize2;
            M(1,0) = M(0,1) = 0;
            Mat_<double> invM; invert(M, invM);
            Mat_<double> Aff; linearizeHomographyAt(H, srcIt->pt, Aff);
            Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);
            Mat_<double> eval; eigen( dstM, eval );
            CV_Assert( eval(0,0) && eval(1,0) );
            float dstSize = (float)pow(1./(eval(0,0)*eval(1,0)), 0.25);

            // TODO: check angle projection
            float srcAngleRad = (float)(srcIt->angle*CV_PI/180);
            Point2f vec1(cos(srcAngleRad), sin(srcAngleRad)), vec2;
            vec2.x = (float)(Aff(0,0)*vec1.x + Aff(0,1)*vec1.y);
            vec2.y = (float)(Aff(1,0)*vec1.x + Aff(0,1)*vec1.y);
            float dstAngleGrad = fastAtan2(vec2.y, vec2.x);

            *dstIt = KeyPoint( dstPt, dstSize, dstAngleGrad, srcIt->response, srcIt->octave, srcIt->class_id );
        }
    }
}
void FaceAnalyser::UpdatePredictionTrack(Mat_<unsigned int>& prediction_corr_histogram, int& prediction_correction_count, vector<double>& correction, const vector<pair<string, double>>& predictions, double ratio, int num_bins, double min_val, double max_val, int min_frames)
{
	double length = max_val - min_val;
	if(length < 0)
		length = -length;

	correction.resize(predictions.size(), 0);

	// The median update
	if(prediction_corr_histogram.empty())
	{
		prediction_corr_histogram = Mat_<unsigned int>(predictions.size(), num_bins, (unsigned int)0);
	}
	
	for(int i = 0; i < prediction_corr_histogram.rows; ++i)
	{
		// Find the bins corresponding to the current descriptor
		int index = (predictions[i].second - min_val)*((double)num_bins)/(length);
		if(index < 0)
		{
			index = 0;
		}
		else if(index > num_bins - 1)
		{
			index = num_bins - 1;
		}
		prediction_corr_histogram.at<unsigned int>(i, index)++;
	}

	// Update the histogram count
	prediction_correction_count++;

	if(prediction_correction_count >= min_frames)
	{
		// Recompute the correction
		int cutoff_point = ratio * prediction_correction_count;

		// For each dimension
		for(int i = 0; i < prediction_corr_histogram.rows; ++i)
		{
			int cummulative_sum = 0;
			for(int j = 0; j < prediction_corr_histogram.cols; ++j)
			{
				cummulative_sum += prediction_corr_histogram.at<unsigned int>(i, j);
				if(cummulative_sum > cutoff_point)
				{
					double corr = min_val + j * (length/num_bins);
					correction[i] = corr;
					break;
				}
			}
		}
	}
}
Exemplo n.º 3
0
void EllipticKeyPoint::calcProjection( const vector<EllipticKeyPoint>& src, const Mat_<double>& H, vector<EllipticKeyPoint>& dst )
{
    if( !src.empty() )
    {
        assert( !H.empty() && H.cols == 3 && H.rows == 3);
        dst.resize(src.size());
        vector<EllipticKeyPoint>::const_iterator srcIt = src.begin();
        vector<EllipticKeyPoint>::iterator       dstIt = dst.begin();
        for( ; srcIt != src.end(); ++srcIt, ++dstIt )
            srcIt->calcProjection(H, *dstIt);
    }
}
Exemplo n.º 4
0
int copyMakeBorder(/*const*/ Mat_<_Tp, chs>& src, Mat_<_Tp, chs>& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value = Scalar())
{
	FBC_Assert(top >= 0 && bottom >= 0 && left >= 0 && right >= 0);

	if (src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0) {
		Size wholeSize;
		Point ofs;
		src.locateROI(wholeSize, ofs);
		int dtop = std::min(ofs.y, top);
		int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
		int dleft = std::min(ofs.x, left);
		int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
		src.adjustROI(dtop, dbottom, dleft, dright);
		top -= dtop;
		left -= dleft;
		bottom -= dbottom;
		right -= dright;
	}

	if (dst.empty() || dst.rows != (src.rows + top + bottom) || dst.cols != (src.cols + left + right)) {
		dst.release();
		dst = Mat_<_Tp, chs>(src.rows + top + bottom, src.cols + left + right);
	}

	if (top == 0 && left == 0 && bottom == 0 && right == 0) {
		if (src.data != dst.data || src.step != dst.step)
			src.copyTo(dst);
		return 0;
	}

	borderType &= ~BORDER_ISOLATED;

	if (borderType != BORDER_CONSTANT) {
		copyMakeBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, src.elemSize(), borderType);
	} else {
		int cn = src.channels, cn1 = cn;
		AutoBuffer<double> buf(cn);

		scalarToRawData<_Tp, chs>(value, buf, cn);
		copyMakeConstBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, (int)src.elemSize(), (uchar*)(double*)buf);
	}

	return 0;
}
int main (int argc, char **argv)
{

	vector<string> arguments = get_arguments(argc, argv);

	// Some initial parameters that can be overriden from command line	
	vector<string> files, dDirs, outposes, outvideos, outfeatures;
	
	// By default try webcam
	int device = 0;

	// cx and cy aren't always half dimx or half dimy, so need to be able to override it (start with unit vals and init them if none specified)
    float fx = 500, fy = 500, cx = 0, cy = 0;
	int dimx = 0, dimy = 0;

	bool useCLMTracker = true;
	
	CLMWrapper::CLMParameters clmParams(arguments);
	
	clmParams.wSizeCurrent = clmParams.wSizeInit;

    PoseDetectorHaar::PoseDetectorHaarParameters haarParams;

	#if OS_UNIX
    haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt.xml";
	#else
		haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt.xml";
	#endif
		
	// Get the input output file parameters
	CLMWrapper::get_video_input_output_params(files, dDirs, outposes, outvideos, outfeatures, arguments);
	// Get camera parameters
	CLMWrapper::get_camera_params(fx, fy, cx, cy, dimx, dimy, arguments);    
	
	// The modules that are being used for tracking
	CLMTracker::TrackerCLM clmModel;	
	
	// Face detector initialisation
	CascadeClassifier classifier(haarParams.ClassifierLocation);
	if(classifier.empty())
	{
		string err = "Could not open a face detector at: " + haarParams.ClassifierLocation;
		FATAL_STREAM( err );
	}

	bool done = false;
	
	int f_n = -1;

	while(!done)
	{
		string file;

		// We might specify multiple video files as arguments
		if(files.size() > 0)
		{
			f_n++;			
		    file = files[f_n];
		}

		bool readDepth = !dDirs.empty();	

		// Do some grabbing
		VideoCapture vCap;
		if( file.size() > 0 )
		{
			INFO_STREAM( "Attempting to read from file: " << file );
			vCap = VideoCapture( file );
		}
		else
		{
			INFO_STREAM( "Attempting to capture from device: " << device );
			vCap = VideoCapture( device );

			// Read a first frame often empty in camera
			Mat img;
			vCap >> img;
		}

		if( !vCap.isOpened() ) FATAL_STREAM( "Failed to open video source" );
		else INFO_STREAM( "Device or file opened");

		Mat img;
		vCap >> img;

		// If no dimensions defined, do not do any resizing
		if(dimx == 0 || dimy == 0)
		{
			dimx = img.cols;
			dimy = img.rows;
		}
	
		// If optical centers are not defined just use center of image
		if(cx == 0 || cy == 0)
		{
			cx = dimx / 2.0f;
			cy = dimy / 2.0f;
		}
	
		// Creating output files
		std::ofstream posesFile;
		if(!outposes.empty())
		{
			posesFile.open (outposes[f_n]);
		}
	
		std::ofstream featuresFile;		
		if(!outfeatures.empty())
		{
			featuresFile.open(outfeatures[f_n]);
		}
	
		int frameProc = 0;

		// faces in a row detected
		int facesInRow = 0;

		// saving the videos
		VideoWriter writerFace;
		if(!outvideos.empty())
		{
			writerFace = VideoWriter(outvideos[f_n], CV_FOURCC('D','I','V','X'), 30, img.size(), true);		
		}

		// Variables useful for the tracking itself
		bool success = false;
		bool trackingInitialised = false;
	
		// For measuring the timings
		int64 t1,t0 = cv::getTickCount();
		double fps = 10;

		Mat disp;

		INFO_STREAM( "Starting tracking");
		while(!img.empty())
		{		

			Mat_<float> depth;
			Mat_<uchar> gray;
			cvtColor(img, gray, CV_BGR2GRAY);
		
			// Don't resize if it's unneeded
			Mat_<uchar> img_scaled;		
			if(dimx != gray.cols || dimy != gray.rows)
			{
				resize( gray, img_scaled, Size( dimx, dimy ) );
				resize(img, disp, Size( dimx, dimy));
			}
			else
			{
				img_scaled = gray;
				disp = img.clone();
			}
		
			namedWindow("colour",1);

			// Get depth image
			if(readDepth)
			{
				char* dst = new char[100];
				std::stringstream sstream;
				//sstream << dDir << "\\depth%06d.png";
				sstream << dDirs[f_n] << "\\depth%05d.png";
				sprintf(dst, sstream.str().c_str(), frameProc + 1);
				Mat_<short> dImg = imread(string(dst), -1);
				if(!dImg.empty())
				{
					if(dimx != dImg.cols || dimy != dImg.rows)
					{
						Mat_<short> dImgT;
						resize(dImg, dImgT, Size( dimx, dimy));
						dImgT.convertTo(depth, CV_32F);
					}
					else
					{
						dImg.convertTo(depth, CV_32F);
					}
				}
				else
				{
					WARN_STREAM( "Can't find depth image" );
				}
			}

			Vec6d poseEstimateHaar;
			Matx66d poseEstimateHaarUncertainty;

			Rect faceRegion;

			// The start place where CLM should start a search (or if it fails, can use the frame detection)
			if(!trackingInitialised || (!success && ( frameProc  % 2 == 0)))
			{
				INFO_STREAM( "Attempting to initialise a face");
				// The tracker can return multiple head pose observation
				vector<Vec6d> poseEstimatesInitialiser;
				vector<Matx66d> covariancesInitialiser;			
				vector<Rect> regionsInitialiser;

				bool initSuccess = PoseDetectorHaar::InitialisePosesHaar(img_scaled, depth, poseEstimatesInitialiser, covariancesInitialiser, regionsInitialiser, classifier, fx, fy, cx, cy, haarParams);
					
				if(initSuccess)
				{
					INFO_STREAM( "Face(s) detected");
					if(poseEstimatesInitialiser.size() > 1)
					{
						cout << "ambiguous detection: " << endl;
						// keep the closest one (this is a hack for the experiment)
						double best = 10000;
						int bestIndex = -1;
						for( size_t i = 0; i < poseEstimatesInitialiser.size(); ++i)
						{
							cout << regionsInitialiser[i].x << " " << regionsInitialiser[i].y <<  " " << regionsInitialiser[i].width << " " <<  regionsInitialiser[i].height << endl;
							if(poseEstimatesInitialiser[i][2] < best  && poseEstimatesInitialiser[i][2] > 100)
							{
								bestIndex = i;
								best = poseEstimatesInitialiser[i][2];
							}									
						}
						if(bestIndex != -1)
						{
							cout << "Choosing bbox:" << regionsInitialiser[bestIndex].x << " " << regionsInitialiser[bestIndex].y <<  " " << regionsInitialiser[bestIndex].width << " " <<  regionsInitialiser[bestIndex].height << endl;
							faceRegion = regionsInitialiser[bestIndex];
						}
						else
						{
							initSuccess = false;
						}
					}
					else
					{	
						faceRegion = regionsInitialiser[0];
					}				
	
					facesInRow++;
				}
			}

			// If condition for tracking is met initialise the trackers
			if(!trackingInitialised && facesInRow >= 1)
			{			
				INFO_STREAM( "Initialising CLM");
				trackingInitialised = CLMWrapper::InitialiseCLM(img_scaled, depth, clmModel, poseEstimateHaar, faceRegion, fx, fy, cx, cy, clmParams);		
				facesInRow = 0;
			}		

			// opencv detector is needed here, if tracking failed reinitialise using it
			if(trackingInitialised)
			{
				success = CLMWrapper::TrackCLM(img_scaled, depth, clmModel, vector<Vec6d>(), vector<Matx66d>(), faceRegion, fx, fy, cx, cy, clmParams);								
			}			
			if(success)
			{			
				clmParams.wSizeCurrent = clmParams.wSizeSmall;
			}
			else
			{
				clmParams.wSizeCurrent = clmParams.wSizeInit;
			}

			// Changes for no reinit version
			//success = true;
			//clmParams.wSizeCurrent = clmParams.wSizeInit;

			Vec6d poseEstimateCLM = CLMWrapper::GetPoseCLM(clmModel, fx, fy, cx, cy, clmParams);

			if(!outfeatures.empty())
			{
				featuresFile << frameProc + 1 << " " << success;
				for (int i = 0; i < 66 * 2; ++ i)
				{
					featuresFile << " " << clmModel._shape.at<double>(i) << endl;
				}
				featuresFile << endl;
			}

			if(!outposes.empty())
			{
				posesFile << frameProc + 1 << " " << (float)frameProc * 1000/30 << " " << 1 << " " << poseEstimateCLM[0] << " " << poseEstimateCLM[1] << " " << poseEstimateCLM[2] << " " << poseEstimateCLM[3] << " " << poseEstimateCLM[4] << " " << poseEstimateCLM[5] << endl;
			}										
	
			if(success)			
			{
				int idx = clmModel._clm.GetViewIdx(); 	

				// drawing the facial features on the face if tracking is successful
				clmModel._clm._pdm.Draw(disp, clmModel._shape, clmModel._clm._triangulations[idx], clmModel._clm._visi[0][idx]);

				DrawBox(disp, poseEstimateCLM, Scalar(255,0,0), 3, fx, fy, cx, cy);			
			}
			else if(!clmModel._clm._pglobl.empty())
			{			
				int idx = clmModel._clm.GetViewIdx(); 	
			
				// draw the facial features
				clmModel._clm._pdm.Draw(disp, clmModel._shape, clmModel._clm._triangulations[idx], clmModel._clm._visi[0][idx]);

				// if tracking fails draw a red outline
				DrawBox(disp, poseEstimateCLM, Scalar(0,0,255), 3, fx, fy, cx, cy);	
			}
			if(frameProc % 10 == 0)
			{      
				t1 = cv::getTickCount();
				fps = 10.0 / (double(t1-t0)/cv::getTickFrequency()); 
				t0 = t1;
			}

			char fpsC[255];
			sprintf(fpsC, "%d", (int)fps);
			string fpsSt("FPS:");
			fpsSt += fpsC;
			cv::putText(disp, fpsSt, cv::Point(10,20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255,0,0));
		
			frameProc++;
						
			imshow("colour", disp);
			if(!depth.empty())
			{
				imshow("depth", depth/2000.0);
			}

			vCap >> img;
		
			if(!outvideos.empty())
			{		
				writerFace << disp;
			}
		
			// detect key presses
			char c = cv::waitKey(2);

			// key detections

			// restart the tracker
			if(c == 'r')
			{
				trackingInitialised = false;
				facesInRow = 0;
			}
			// quit the application
			else if(c=='q')
			{
				return(0);
			}


		}
		
		trackingInitialised = false;
		facesInRow = 0;

		posesFile.close();

		// break out of the loop if done with all the files
		if(f_n == files.size() -1)
		{
			done = true;
		}
	}

	return 0;
}
int main (int argc, char **argv)
{

	//Convert arguments to more convenient vector form
	vector<string> arguments = get_arguments(argc, argv);

	// Some initial parameters that can be overriden from command line
	vector<string> files, dFiles, outimages, outfeaturess;

	// these can be overriden using -cx, -cy, -fx, -fy, -dimx, -dimy flags
	float fx = 500, fy = 500, cx = 0, cy = 0;
	int dimx = 0, dimy = 0;
	
	// initial translation, rotation and scale (can be specified by the user)
	vector<Vec6d> initial_poses;

	bool useCLMTracker = true;

	// Get camera parameters
	CLMWrapper::get_camera_params(fx, fy, cx, cy, dimx, dimy, arguments);
	CLMWrapper::get_image_input_output_params(files, dFiles, outfeaturess, outimages, initial_poses, arguments);	
	CLMWrapper::CLMParameters clmParams(arguments);	

	// The modules that are being used for tracking
	CLMTracker::TrackerCLM clmModel;

	cout << "Loading the model" << endl;
	clmModel.Read(clmParams.defaultModelLoc, clmParams.override_pdm_loc);
	cout << "Model loaded" << endl;

	PoseDetectorHaar::PoseDetectorHaarParameters haarParams;

	haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt2.xml";

	CascadeClassifier classifier(haarParams.ClassifierLocation);

	bool visualise = true;

	//clmParams.multi_view = true;

	// Do some image loading
	for(size_t i = 0; i < files.size(); i++)
	{
		string file = files.at(i);

		// Loading image
		Mat img = imread(file, -1);
		
		// Loading depth file if exists
		Mat dTemp;		
		Mat_<float> dImg;

		if(dFiles.size() > 0)
		{
			string dFile = dFiles.at(i);
			dTemp = imread(dFile, -1);
			dTemp.convertTo(dImg, CV_32F);
		}

		if(dimx != 0)
		{
			cv::resize(img.clone(), img, cv::Size(dimx, dimy));
			if(!dImg.empty())
			{
				cv::resize(dImg.clone(), dImg, cv::Size(dimx, dimy));
			}
		}

		bool trackingInitialised = false;
	
		// Making sure the image is in uchar grayscale
		Mat_<uchar> gray;		
		convert_to_grayscale(img, gray);
		
			
		// Face detection initialisation
		Vec6d poseEstimateHaar;
		Matx66d poseEstimateHaarUncertainty;

		vector<Vec6d> poseEstimatesInitialiser;
		vector<Matx66d> covariancesInitialiser;	
		vector<Rect> faceRegions;
	
		bool initSuccess = false;

		// if no pose defined we just use OpenCV
		if(initial_poses.empty())
		{
			initSuccess = PoseDetectorHaar::InitialisePosesHaar(gray, dImg, poseEstimatesInitialiser, covariancesInitialiser, faceRegions, classifier, fx, fy, cx, cy, haarParams);

			if(initSuccess)
			{
				if(poseEstimatesInitialiser.size() > 1)
				{
					cout << "Multiple faces detected" << endl;
				}
			}
		
			if(initSuccess)
			{
				// perform landmark detection for every face detected
				for(int r=0; r < faceRegions.size(); ++r)
				{
					if(!clmParams.multi_view)
					{
						Vec6d pose(0.0);
						bool success = CLMWrapper::InitialiseCLM(gray, dImg, clmModel, pose, faceRegions[r], fx, fy, cx, cy, clmParams);
					}
					else
					{
						vector<Vec3d> orientations;

						// Have multiple hypotheses, check which one is best and keep it
						orientations.push_back(Vec3d(0,0,0));
						orientations.push_back(Vec3d(0,0.5236,0));
						orientations.push_back(Vec3d(0,-0.5236,0));
						orientations.push_back(Vec3d(0.5236,0,0));
						orientations.push_back(Vec3d(-0.5236,0,0));
				
						double best_lhood;
						Mat best_shape;

						for (size_t p = 0; p<orientations.size(); ++p)
						{

							Vec6d pose;		

							pose(1) = orientations[p][0];
							pose(2) = orientations[p][1];
							pose(3) = orientations[p][2];
					
							CLMWrapper::InitialiseCLM(gray, dImg, clmModel, pose, faceRegions[r], fx, fy, cx, cy, clmParams);

							double curr_lhood = clmModel._clm._likelihood;

							if(p==0 || curr_lhood > best_lhood)
							{
								best_lhood = curr_lhood;
								best_shape = clmModel._shape.clone();
							}
						}

						clmModel._shape = best_shape;
						clmModel._clm._likelihood = best_lhood;
					}

					// Writing out the detected landmarks
					if(!outfeaturess.empty())
					{
						char name[100];
						sprintf(name, "_det_%d", r);

						boost::filesystem::path slash("/");
						std::string preferredSlash = slash.make_preferred().string();

						// append detection number
						boost::filesystem::path out_feat_path(outfeaturess.at(i));
						boost::filesystem::path dir = out_feat_path.parent_path();
						boost::filesystem::path fname = out_feat_path.filename().replace_extension("");
						boost::filesystem::path ext = out_feat_path.extension();
						string outfeatures = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
						write_out_landmarks(outfeatures, clmModel);
					}

					// displaying detected stuff
					Mat disp;
					create_display_image(img, disp, clmModel);

					if(visualise)
					{
						// For debug purposes show final likelihood
						//std::ostringstream s;
						//s << clmModel._clm._likelihood;
						//string lhoodSt("Lhood:");
						//lhoodSt += s.str();
						//cv::putText(disp, lhoodSt, cv::Point(10,20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255,0,0));
						imshow("colour", disp);
						cv::waitKey(10);
					}

					if(!outimages.empty())
					{
						string outimage = outimages.at(i);
						if(!outimage.empty())
						{
							char name[100];
							sprintf(name, "_det_%d", r);

							boost::filesystem::path slash("/");
							std::string preferredSlash = slash.make_preferred().string();

							// append detection number
							boost::filesystem::path out_feat_path(outimage);
							boost::filesystem::path dir = out_feat_path.parent_path();
							boost::filesystem::path fname = out_feat_path.filename().replace_extension("");
							boost::filesystem::path ext = out_feat_path.extension();
							outimage = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();

							imwrite(outimage, disp);	
						}
					}


				}
			}
			else
			{
				cout << "No faces detected" << endl;
				continue;
			}
		}
		else
		{
			initSuccess = true;

			// if pose defined we don't need the conventional initialisation			
			if(!clmParams.multi_view)
			{
				clmModel._clm._pglobl = Mat(initial_poses[i]);			

				Vec6d pose = CLMWrapper::GetPoseCLM(clmModel, dImg, fx, fy, cx, cy, clmParams);

				bool success = CLMWrapper::InitialiseCLM(gray, dImg, clmModel, pose, Rect(), fx, fy, cx, cy, clmParams);
			}
			else
			{
				vector<Vec3d> orientations;

				// Have multiple hypotheses, check which one is best and keep it
				orientations.push_back(Vec3d(0,0,0));
				orientations.push_back(Vec3d(0,0.5236,0));
				orientations.push_back(Vec3d(0,-0.5236,0));
				orientations.push_back(Vec3d(0.5236,0,0));
				orientations.push_back(Vec3d(-0.5236,0,0));
				
				double best_lhood;
				Mat best_shape;

				for (size_t p = 0; p<orientations.size(); ++p)
				{

					clmModel._clm._pglobl = Mat(initial_poses[i]);		

					clmModel._clm._pglobl.at<double>(1) = orientations[p][0];
					clmModel._clm._pglobl.at<double>(2) = orientations[p][1];
					clmModel._clm._pglobl.at<double>(3) = orientations[p][2];
					
					Vec6d pose = CLMWrapper::GetPoseCLM(clmModel, dImg, fx, fy, cx, cy, clmParams);

					CLMWrapper::InitialiseCLM(gray, dImg, clmModel, pose, Rect(), fx, fy, cx, cy, clmParams);

					double curr_lhood = clmModel._clm._likelihood;

					if(p==0 || curr_lhood > best_lhood)
					{
						best_lhood = curr_lhood;
						best_shape = clmModel._shape.clone();
					}
				}

				clmModel._shape = best_shape;

			}

			// Writing out the detected landmarks
			if(!outfeaturess.empty())
			{
				string outfeatures = outfeaturess.at(i);
				write_out_landmarks(outfeatures, clmModel);
			}

			// displaying detected stuff
			Mat disp;
			create_display_image(img, disp, clmModel);

			if(visualise)
			{
				imshow("colour", disp);
				cv::waitKey(5);
			}

			if(!outimages.empty())
			{
				string outimage = outimages.at(i);
				if(!outimage.empty())
				{
					imwrite(outimage, disp);	
				}
			}


		}				

		// Reset the parameters if more images are coming
		if(files.size() > 0)
		{
			clmModel._clm._plocal.setTo(0);
		}

	}
	return 0;
}
int main(int argc, char **argv) {
    try {
        ParseArgs(argc, argv);

        int num_cameras = static_cast<int>(imgs.size());
        if (num_cameras < 1)
            throw runtime_error("Need at least one camera");

        // Find features

        cout << "Finding features...\n";
        Ptr<detail::FeaturesFinder> features_finder = features_finder_creator->Create();

        for (int i = 0; i < num_cameras; ++i) {
            int64 t = getTickCount();
            cout << "Finding features in '" << img_names[i] << "'... ";

            Ptr<detail::ImageFeatures> features = new detail::ImageFeatures();
            (*features_finder)(imgs[i], *features);
            features_collection[i] = features;

            cout << "#features = " << features_collection.find(i)->second->keypoints.size()
                 << ", time = " << (getTickCount() - t) / getTickFrequency() << " sec\n";
        }

        // Match all pairs

        cout << "Matching pairs... ";
        MatchesCollection matches_collection;
        Ptr<detail::FeaturesMatcher> matcher = features_matcher_creator.Create();

        FeaturesCollection::iterator from_iter = features_collection.begin();
        FeaturesCollection::iterator from_next_iter = from_iter; ++from_next_iter;
        FeaturesCollection::iterator to_iter;

        for (; from_next_iter != features_collection.end(); from_iter = from_next_iter++) {
            for (to_iter = from_next_iter; to_iter != features_collection.end(); ++to_iter) {
                cout << "(" << from_iter->first << "->" << to_iter->first << ") ";
                detail::MatchesInfo mi;
                (*matcher)(*(from_iter->second), *(to_iter->second), mi);
                matches_collection[make_pair(from_iter->first, to_iter->first)]
                        = new vector<DMatch>(mi.matches);
            }
        }
        cout << endl;

        // Estimate homographies

        HomographiesP2 Hs;
        HomographiesP2 good_Hs;
        vector<Mat> Hs_from_0;
        RelativeConfidences rel_confs;
        Mat keypoints1, keypoints2;

        cout << "Estimating Hs...\n";
        for (int from = 0; from < num_cameras - 1; ++from) {
            for (int to = from + 1; to < num_cameras; ++to) {
                const vector<DMatch> &matches = *(matches_collection.find(make_pair(from, to))->second);

                cout << "Estimating H between '" << img_names[from] << "' and '" << img_names[to]
                     << "'... #matches = " << matches.size();

                if (static_cast<int>(matches.size()) < min_num_matches) {
                    cout << ", not enough matches\n";
                    continue;
                }

                ExtractMatchedKeypoints(*(features_collection.find(from)->second),
                                        *(features_collection.find(to)->second),
                                        matches, keypoints1, keypoints2);
                vector<uchar> inliers_mask;
                Mat_<double> H = findHomography(keypoints1.reshape(2), keypoints2.reshape(2),
                                                inliers_mask, RANSAC, H_est_thresh);

                if (H.empty()) {
                    cout << ", can't estimate H\n";
                    continue;
                }

                Ptr<vector<DMatch> > inliers = new vector<DMatch>();
                for (size_t i = 0; i < matches.size(); ++i)
                    if (inliers_mask[i])
                        inliers->push_back(matches[i]);
                cout << ", #inliers = " << inliers->size();

                double rms_err = 0;
                for (size_t i = 0; i < matches.size(); ++i) {
                    const Point2d &kp1 = keypoints1.at<Point2d>(0, i);
                    const Point2d &kp2 = keypoints2.at<Point2d>(0, i);
                    double x = H(0, 0) * kp1.x + H(0, 1) * kp1.y + H(0, 2);
                    double y = H(1, 0) * kp1.x + H(1, 1) * kp1.y + H(1, 2);
                    double z = H(2, 0) * kp1.x + H(2, 1) * kp1.y + H(2, 2);
                    x /= z; y /= z;
                    rms_err += (kp2.x - x) * (kp2.x - x) + (kp2.y - y) * (kp2.y - y);
                }
                rms_err = sqrt(rms_err / matches.size());
                cout << ", RMS err = " << rms_err;

                // See "Automatic Panoramic Image Stitching using Invariant Features"
                // by Matthew Brown and David G. Lowe, IJCV 2007 for the explanation
                double confidence = inliers->size() / (8 + 0.3 * matches.size()) - 1;

                rel_confs[make_pair(from, to)] = confidence;
                cout << ", conf = " << confidence;                

                cout << endl;                                

                Hs[make_pair(from, to)] = H;
                matches_collection[make_pair(from, to)] = inliers;

                if (confidence > 0)
                    good_Hs[make_pair(from, to)] = H;
                if (from == 0)
                    Hs_from_0.push_back(H);
            }
        }

        // Linear calibration

        if (K_init.empty()) {
            cout << "Linear calibrating...\n";
            if (lin_est_skew)
                K_init = CalibRotationalCameraLinear(good_Hs);
            else
                K_init = CalibRotationalCameraLinearNoSkew(good_Hs);
            cout << "K_init =\n" << K_init << endl;
        }

        // Non-linear refinement

        cout << "Refining camera...\n";

        if (Hs_from_0.size() != num_cameras - 1) {
            stringstream msg;
            msg << "Refinement requires Hs between first and all other images, "
                << "but only " << Hs_from_0.size() << " were/was found";
            throw runtime_error(msg.str());
        }

        map<int, Mat> Rs;
        Rs[0] = Mat::eye(3, 3, CV_64F);
        for (int i = 1; i < num_cameras; ++i)
            Rs[i] = K_init.inv() * Hs_from_0[i - 1] * K_init;

        Mat_<double> K_refined = K_init.clone();
        if (refine_skew)
            RefineRigidCamera(K_refined, Rs, features_collection, matches_collection);
        else {
            K_refined(0, 1) = 0;
            RefineRigidCamera(K_refined, Rs, features_collection, matches_collection,
                              REFINE_FLAG_K_ALL & ~REFINE_FLAG_K_SKEW);
        }
        cout << "K_refined =\n" << K_refined << endl;

        cout << "SUMMARY\n";
        cout << "K_init =\n" << K_init << endl;
        cout << "K_refined =\n" << K_refined << endl;
    }
    catch (const exception &e) {
        cout << "Error: " << e.what() << endl;
    }
    return 0;
}