// Triggered when clicking scrolling with the scroll wheel on the mouse
void MainWindow::wheelEvent(QWheelEvent * ev)
{
    // Implement something
    qDebug() << "Mouse wheel:" << ev->delta();
    FoV -= ev->delta()/100;
    calculateProjection();

    renderLater();
}
示例#2
0
    const D3DXMATRIX* Camera::getProjectionMatrix()
    {

		if(!m_projectionCacheValid)
		{
			calculateProjection();
		}

        return &m_projMatrix;
    }
void FeaturePointsRANSAC::runRANSAC(Mat img, vector<pair<string, vector<shared_ptr<imageprocessing::Patch>>>> landmarkData, MorphableModel mm, float thresholdForDatapointFitsModel, int numIter/*=0*/, int numClosePointsRequiredForGoodFit/*=4*/, int minPointsToFitModel/*=3*/)
{
	if(numIter==0) {
		// Calculate/estimate how many iterations we should run (see Wikipedia)
		numIter = 2500;
	}
	numIter = 50;
	minPointsToFitModel = 3;
	bool useSVD = false;
	thresholdForDatapointFitsModel = 30.0f;

	// Note: Doesn't seem to make sense to use more than 3 points for the initial projection. With 4 (and SVD instead of inv()), the projection of the other model-LMs is quite wrong already.
	// So: minPointsToFitModel == 3 unchangeable! (hm, really...?)

	// RANSAC general TODO: Bei Nase beruecksichtigen, dass das Gesicht nicht auf dem Kopf stehen darf? (da sonst das feature nicht gefunden wuerde, nicht symmetrisch top/bottom)

	// TODO Use 1/2 of detected face patch = IED, then discard all model proposals that are smaller than 2/3 IED or so? Make use of face-box...
	// Take face-box as probability of a face there... then also PSM output... then take regions where the FD doesn't have a response, e.g. Skin, Circles etc... and also try PSM there.

	// Build an "iterative global face-probability map" with all those detectors?
	// use additional feature spaces (gabor etc)

	// 
	// Another Idea: Use 3 detected points, project the 3DMM, then, around a possible 4th point, use a detector and search this region!
	//					(I guess I need a dynamic detector map for this :-) Attaching to Image not a good idea... (really?)... use a Master-detector or so? Hm I kind of already have that (Casc.Fac.Feat...))

	// only temporary to print the 3DMM
	vector<string> modelPointsToRender;
	modelPointsToRender.push_back("nose_tip");
	modelPointsToRender.push_back("mouth_rc");
	modelPointsToRender.push_back("mouth_lc");
	modelPointsToRender.push_back("chin");
	modelPointsToRender.push_back("reye_c");
	modelPointsToRender.push_back("leye_c");
	modelPointsToRender.push_back("rear_c");
	modelPointsToRender.push_back("lear_c");
	modelPointsToRender.push_back("mouth_ulb");
	modelPointsToRender.push_back("mouth_llb");
	modelPointsToRender.push_back("rebr_olc");
	modelPointsToRender.push_back("lebr_olc");
	modelPointsToRender.push_back("rear_ic");
	modelPointsToRender.push_back("lear_ic");
	modelPointsToRender.push_back("rear_lobe");
	modelPointsToRender.push_back("lear_lobe");
	modelPointsToRender.push_back("rear_oc");
	modelPointsToRender.push_back("lear_oc");
	modelPointsToRender.push_back("rear_uc");
	modelPointsToRender.push_back("lear_uc");

	unsigned int iterations = 0;

	//boost::random::mt19937 rndGen(time(0));	// Pseudo-random number generators should not be constructed (initialized) frequently during program execution, for two reasons... see boost doc. But this is not a problem as long as we don't call runRANSAC too often (e.g. every frame).
	boost::random::mt19937 rndGen(1); // TODO: Use C++11

	// Todo: Probably faster if I convert all LMs to Point2i beforehands, so I only do it once.
	// Maybe I want the patch certainty later, then I need to make a pair<Point2i, float> or so. But the rest of the Patch I won't need for certain.

	vector<int> indexMap;		// This is created so each point of all landmarks together has the same probability of being chosen (eg we have more eyes -> eyes get chosen more often)
	for (unsigned int i=0; i<landmarkData.size(); ++i) {
		for (unsigned int j=0; j<landmarkData[i].second.size(); ++j) {
			indexMap.push_back(i);
		}
	}
	boost::random::uniform_int_distribution<> rndLandmarkDistr(0, indexMap.size()-1);

	// threshold around 1/3 IED?

	// some LMs wrong? nose_tip? No

	vector<vector<pair<string, Point2f>>> bestConsensusSets;
	
	while (iterations < numIter) {
		++iterations;
		// maybe_inliers := #minPointsToFitModel randomly selected values from data
		vector<pair<string, shared_ptr<imageprocessing::Patch> > > maybeInliersPatches; // CHANGED TO PATCH
		unsigned int distinctFfpsChosen = 0;	// we don't need this, we could use size of alreadyChosenFfps?
		set<int> alreadyChosenFfps;
		while(distinctFfpsChosen < minPointsToFitModel) {

			int whichFfp = indexMap[rndLandmarkDistr(rndGen)];	// Find out which ffp to take
			boost::random::uniform_int_distribution<> rndCandidateDistr(0, landmarkData[whichFfp].second.size()-1);	// this could be taken out of the loop and only done once for each landmark-type
			int whichPoint = rndCandidateDistr(rndGen);			// Find out which detected point from the selected ffp to take
			string thePointLMName = landmarkData[whichFfp].first;	// The LM we took (e.g. reye_c)
			shared_ptr<imageprocessing::Patch> thePointPatch = landmarkData[whichFfp].second[whichPoint];	// The patch (with 2D-coords) we took // CHANGED TO PATCH

			// Check if we didn't already choose this FFP-type
			if(alreadyChosenFfps.find(whichFfp)==alreadyChosenFfps.end()) {
				// We didn't already choose this FFP-type:
				maybeInliersPatches.push_back(make_pair(thePointLMName, thePointPatch));
				alreadyChosenFfps.insert(whichFfp);
				++distinctFfpsChosen;
			}
		}
		// We got #minPointsToFitModel distinct maybe_inliers, go on
		// == maybeInliers
		// maybe_model := model parameters fitted to maybeInliers

		vector<pair<string, Point2f> > maybeInliers;
		/*maybeInliersDbg.push_back(make_pair("reye_c", Point2f(100.0f, 100.0f)));
		maybeInliersDbg.push_back(make_pair("leye_c", Point2f(160.0f, 100.0f)));
		//maybeInliersDbg.push_back(make_pair("mouth_rc", Point2f(110.0f, 180.0f)));
		maybeInliersDbg.push_back(make_pair("mouth_lc", Point2f(150.0f, 180.0f)));*/
		for (unsigned int i=0; i<maybeInliersPatches.size(); ++i) {
			maybeInliers.push_back(make_pair(maybeInliersPatches[i].first, Point2f((float)maybeInliersPatches[i].second->getX(), (float)maybeInliersPatches[i].second->getY())));
		}

		LandmarksVerticesPair lmVertPairAtOrigin = getCorrespondingLandmarkpointsAtOriginFrom3DMM(maybeInliers, mm);
		// Calculate s and t
		// Todo: Using this method, we don't check if the face is facing us or not. Somehow check this.
		//			E.g. give the LMs color, or text, or some math?
		pair<Mat, Mat> projection = calculateProjection(lmVertPairAtOrigin.landmarks, lmVertPairAtOrigin.vertices);
		Mat s = projection.first;
		Mat t = projection.second;

		Mat outimg = img.clone(); // CHANGED 2013
		//drawFull3DMMProjection(outimg, lmVertPairAtOrigin.verticesMean, lmVertPairAtOrigin.landmarksMean, s, t);
		//vector<pair<string, Point2f> > pointsInImage = projectVerticesToImage(modelPointsToRender, lmVertPairAtOrigin.verticesMean, lmVertPairAtOrigin.landmarksMean, s, t);
		//drawAndPrintLandmarksInImage(outimg, pointsInImage);

		// TODO COMMENTED 2013
		//Logger->drawFfpsSmallSquare(outimg, maybeInliers);		// TODO: Should not call this directly! Make a LogImg... function!
		//drawLandmarksText(outimg, maybeInliers);
		//imwrite("out\\a_ransac_initialPoints.png", outimg);
		// END

		// consensus_set := maybe_inliers
		vector<pair<string, Point2f> > consensusSet = maybeInliers;

		// (TODO? get the IED of the current projection, to calculate the threshold. Hmm - what if I dont have the eye-LMs... Somehow make thresh dynamic (because of different face-sizes! absolute Pixels not good!))
		// But the FaceDetector knows the face-box size!!!

		// for every point in data not in maybe_inliers
		for (unsigned int i=0; i<indexMap.size(); ++i) {	// This could be made faster, by using i<landmarkData.size(), and then skip the whole landmark
			int whichFfp = indexMap[i];	// Find out which ffp to take
			// Check if we didn't already choose this FFP-type
			if(alreadyChosenFfps.find(whichFfp)==alreadyChosenFfps.end()) {
				// We didn't already choose this FFP-type:
				alreadyChosenFfps.insert(whichFfp);
				++distinctFfpsChosen;
				// Loop through all candidates of this FFP-type. If several fit, use the one with the lowest error.
				vector<double> currentFfpDistances; //TODO pre-Alloc with ...
				for (unsigned int j=0; j<landmarkData[whichFfp].second.size(); ++j) {
					// if point fits maybe_model with an error smaller than t
					// Fit the point with current model:
					string thePointLMName = landmarkData[whichFfp].first;	// The LM we took (e.g. reye_c)
					shared_ptr<imageprocessing::Patch> thePointPatch = landmarkData[whichFfp].second[j];	// The patch (with 2D-coords) we took // // CHANGED TO PATCH
					// get "thePointLMName" vertex from 3DMM, project into 2D with s, t
					vector<string> theNewLmToProject;
					theNewLmToProject.push_back(thePointLMName);
					vector<pair<string, Point2f> > theNewLmInImage = projectVerticesToImage(theNewLmToProject, lmVertPairAtOrigin.verticesMean, lmVertPairAtOrigin.landmarksMean, s, t, mm);
					// error of theNewLmInImage[0] - thePointPatch < t ?
					cv::Vec2f theNewLm(theNewLmInImage[0].second.x, theNewLmInImage[0].second.y);
					cv::Vec2f theNewPointPatch((float)thePointPatch->getX(), (float)thePointPatch->getY());
					double distance = cv::norm(theNewLm, theNewPointPatch, cv::NORM_L2);
					currentFfpDistances.push_back(distance);
				}
				vector<double>::iterator beforeItr = min_element(currentFfpDistances.begin(), currentFfpDistances.end());
				if(*beforeItr > thresholdForDatapointFitsModel) {
					// None of the candidates for the current Ffp fit the model well. Don't add anything to the consensusSet.
				} else {
					// We found a candidate that fits the model, add it to the consensusSet! (and continue with the next Ffp)
					int positionOfMinimumElement = distance(currentFfpDistances.begin(), beforeItr);
					shared_ptr<imageprocessing::Patch> thePointPatch = landmarkData[whichFfp].second[positionOfMinimumElement]; // CHANGED TO PATCH
					Point2f theNewPointPatch((float)thePointPatch->getX(), (float)thePointPatch->getY());
					consensusSet.push_back(make_pair(landmarkData[whichFfp].first, theNewPointPatch));
				}
			}
		}
		// We went through all Ffp's.
		// if the number of elements in consensus_set is > d (this implies that we may have found a good model, now test how good it is)
		if (consensusSet.size()<numClosePointsRequiredForGoodFit) {
			continue;	// We didn't find a good model, the consensusSet only consists of the points projected (e.g. 3).
		}
		// this_model := model parameters fitted to all points in consensus_set

		LandmarksVerticesPair consensusSetLmVertPairAtOrigin = getCorrespondingLandmarkpointsAtOriginFrom3DMM(consensusSet, mm);	// Todo: It's kind of not clear if this expects centered 2D LMs or not-centered (it's not-centered)
		// Todo: Using this method, we don't check if the face is facing us or not. Somehow check this.
		pair<Mat, Mat> projectionC = calculateProjection(consensusSetLmVertPairAtOrigin.landmarks, consensusSetLmVertPairAtOrigin.vertices);
		Mat sc = projectionC.first;
		Mat tc = projectionC.second;

		Mat outimgConsensus = img.clone(); // TODO 2013 changed
		drawFull3DMMProjection(outimgConsensus, consensusSetLmVertPairAtOrigin.verticesMean, consensusSetLmVertPairAtOrigin.landmarksMean, sc, tc, mm);
		vector<pair<string, Point2f> > pointsInImageC = projectVerticesToImage(modelPointsToRender, consensusSetLmVertPairAtOrigin.verticesMean, consensusSetLmVertPairAtOrigin.landmarksMean, sc, tc, mm);
		drawAndPrintLandmarksInImage(outimgConsensus, pointsInImageC);

		// TODO 2013 CHANGED
		drawFfpsSmallSquare(outimgConsensus, consensusSet);		// TODO: Should not call this directly! Make a LogImg... function!
		drawLandmarksText(outimgConsensus, consensusSet);
		imwrite("out\\a_ransac_ConsensusSet.png", outimgConsensus);
		// END

		// this_error := a measure of how well this_model fits these points
		// TODO
		// we could check how many of the 3dmm LMs projected into the model with s and t are inside the actual image. If they are far off, the model is not good. (?)
		// also check this for the three initial points?
		// or better use some kind of matrix/projection/regularisation, that those transformations doesn't exist in the first place...

		// Check for number of points AND some kind of error measure?
		if(bestConsensusSets.empty()) {
			bestConsensusSets.push_back(consensusSet);
		} else {
			if(consensusSet.size()==bestConsensusSets[0].size()) {
				bestConsensusSets.push_back(consensusSet);
			} else if (consensusSet.size()>bestConsensusSets[0].size()) {
				bestConsensusSets.clear();
				bestConsensusSets.push_back(consensusSet);
			}
		}
		
		Loggers->getLogger("shapemodels").debug("Finished one iteration.");
	}

	// Hmm, the bestConsensusSets is not unique? Doubled sets? (print them?) Write a comparator for two landmarks (make a landmark class?), then for two LM-sets, etc.
	for (unsigned int i=0; i<bestConsensusSets.size(); ++i) {
		// TODO 2013 CHANGED
		//Mat output = img->data_matbgr.clone();
		//drawAndPrintLandmarksInImage(output, bestConsensusSets[i]);
		//Logger->drawFfpsSmallSquare(output, bestConsensusSets[i]);		// TODO: Should not call this directly! Make a LogImg... function! Make these Logger-functions private?
		//drawLandmarksText(output, bestConsensusSets[i]);
		//stringstream sstm;
		//sstm << "out\\a_ransac_ConsensusSet_" << i << ".png";
		//string fn = sstm.str();
		//imwrite(fn, output);
		// END
	}
	for (unsigned int i=0; i<bestConsensusSets.size(); ++i) {
		for (unsigned int j=0; j<bestConsensusSets[i].size(); ++j) {
			cout << "[" << bestConsensusSets[i][j].first << " " << bestConsensusSets[i][j].second  << "], ";
		}
		cout << endl;
	}

}
// Initialize all your OpenGL objects here
void MainWindow::initialize()
{
    // Initialize important variables and the MVP matrices
    mouseClick = QPoint(0,0);
    eye = QVector3D(0,0,-4);
    center = QVector3D(0,0,0) ;
    up = QVector3D(0,1,0);
    FoV = 60;
    model.setToIdentity();
    view.lookAt(eye, center, up);
    calculateProjection();

    qDebug() << "MainWindow::initialize()";
    QString glVersion;
    glVersion = reinterpret_cast<const char*>(glGetString(GL_VERSION));
    qDebug() << "Using OpenGL" << qPrintable(glVersion);

    // Initialize the shaders
    m_shaderProgram = new QOpenGLShaderProgram(this);
    // Use the ":" to load from the resources files (i.e. from the resources.qrc)
    m_shaderProgram->addShaderFromSourceFile(QOpenGLShader::Vertex, ":/shaders/vertex.glsl");
    m_shaderProgram->addShaderFromSourceFile(QOpenGLShader::Fragment, ":/shaders/fragment.glsl");
    m_shaderProgram->link();


    // Shaders are initialized
    // You can retrieve the locations of the uniforms from here


    // Initialize your objects and buffers
    QVector<QVector3D> objectVectors;
    QVector<QVector3D> generatedColors;

    OBJModel cube = OBJModel(":/models/cube.obj");

    objectVectors = cube.vertices;
    vectorsNumber = objectVectors.length();

    // Calculate the random colours using the color seed
    for(int i = 0; i < vectorsNumber/3; i++){
        float colorArray[3] = {0,0,0};
        for(unsigned int j = 0; j < 3; j++){
            srand(COLOR_SEED*(j+1)*(i+1));
            colorArray[j] = ((double)rand()/RAND_MAX);
        }
        printf("color %d, %f %f %f\n", i, colorArray[0],colorArray[1],colorArray[2] );
        generatedColors.append(QVector3D(colorArray[0], colorArray[1], colorArray[2]));
        generatedColors.append(QVector3D(colorArray[0], colorArray[1], colorArray[2]));
        generatedColors.append(QVector3D(colorArray[0], colorArray[1], colorArray[2]));
    }

    // generate VAO and bind it
    m_funcs->glGenVertexArrays(1, &VAO);
    m_funcs->glBindVertexArray(VAO);

    // generate color and vertice buffers
    m_funcs->glGenBuffers(1, &vertices);
    m_funcs->glGenBuffers(1, &colors);

    // bind vertice buffer and fill it with the vertices
    m_funcs->glBindBuffer(GL_ARRAY_BUFFER, vertices);
    m_funcs->glBufferData(GL_ARRAY_BUFFER, sizeof(QVector3D) * objectVectors.length(),objectVectors.data(), GL_STATIC_DRAW);

    m_funcs->glEnableVertexAttribArray(0);
    m_funcs->glVertexAttribPointer(0,3, GL_FLOAT, GL_FALSE, 0,0);

    // Bind color buffer, fill it with the generated colors
    m_funcs->glBindBuffer(GL_ARRAY_BUFFER, colors);
    m_funcs->glBufferData(GL_ARRAY_BUFFER, sizeof(QVector3D) * generatedColors.length(), generatedColors.data(), GL_STATIC_DRAW);

    m_funcs->glEnableVertexAttribArray(1);
    m_funcs->glVertexAttribPointer(1,3, GL_FLOAT, GL_FALSE, 0,0);

    // Create your Vertex Array Object (VAO) and Vertex Buffer Objects (VBO) here.



    // Set OpenGL to use Filled triangles (can be used to render points, or lines)
    m_funcs->glPolygonMode( GL_FRONT_AND_BACK, GL_FILL );

    // Enable Z-buffering
    glEnable(GL_DEPTH_TEST);
    glDepthFunc(GL_LESS);
    qDebug() << "Depth Buffer size:" <<  this->format().depthBufferSize() << "bits";

    // Function for culling, removing faces which don't face the camera
    //glEnable(GL_CULL_FACE);
    //glCullFace(GL_BACK);

    // Set the clear color to be black (color used for resetting the screen)
    glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
}