float CalculateError(const Mat_<float>& ground_truth_shape, const Mat_<float>& predicted_shape) { Mat_<float> temp; //temp = ground_truth_shape.rowRange(36, 37)-ground_truth_shape.rowRange(45, 46); temp = ground_truth_shape.rowRange(1, 2) - ground_truth_shape.rowRange(6, 7); float x = mean(temp.col(0))[0]; float y = mean(temp.col(1))[1]; float interocular_distance = sqrt(x*x + y*y); float sum = 0; for (int i = 0; i < ground_truth_shape.rows; i++) { sum += norm(ground_truth_shape.row(i) - predicted_shape.row(i)); } return sum / (ground_truth_shape.rows*interocular_distance); }
// MARK: implement not complete yet. bool SimpleNN::train(const Mat_<double> &train_X, const Mat_<double> &train_Y, string &err_msg) { /* Parameters ---------- - Mat_<double> train_X: the training input data. It should be a NxM matrix where N is the number of training data, M is the size of input layer. - Mat_<double> train_Y: the target training output data. It should be a NxO matrix where N is the number of training data, O is the size of output layer. - string err_msg: error message (if any). It will be "" if there is no error. */ int N = train_X.rows; if (train_Y.rows != N){ err_msg = "the number of training input data does not match the number of output data."; return false; } srand((unsigned int) time(NULL)); vector<Mat_<double> > deltas(this->weights.size()); for (int iter_index = 0; iter_index < this->num_iteration; ++iter_index){ if (iter_index % 100 == 0 && iter_index > 0){ cout << "Iteration " << iter_index << " over total iteration " << this->num_iteration << endl; } // back-propagation with Stochastic Gradient Descend. int rand_index = rand() % N; Mat_<double> one_sample = train_X.row(rand_index).reshape(0, 1); // make it a column vector. Mat_<double> one_target = train_Y.row(rand_index).reshape(0, 1); Mat_<double> output; string err_msg; if (!this->predict(one_sample, output, err_msg)){ cerr << err_msg << endl; return false; } else { deltas[this->weights.size() - 1] = 2.0*(one_target - output); } } err_msg = ""; return true; }
//============================================================================== vector<uchar> ANN::predict(Mat_<float> &testData) { int numberOfSamples = testData.rows; Mat_<float> classifResult(1, this->numberOfClasses); vector<uchar> predictedLabels(numberOfSamples); for(int i = 0; i < numberOfSamples; i++) { nnetwork->predict(testData.row(i), classifResult); #if DEBUG cout << classifResult << endl; #endif // int max = 0; // int maxI = 0; // for(int j = 0; j < this->numberOfClasses; ++j){ // if( classifResult(0,j) > max){ // max = classifResult(0,j); // maxI = j; // } // } Point2i max_loc; minMaxLoc(classifResult, 0, 0, 0, &max_loc); // add row into predictions predictions.push_back(classifResult); //predictedLabels.push_back(maxI); predictedLabels[i] = static_cast<unsigned char>(max_loc.x); } predictLabels.insert(predictLabels.end(),predictedLabels.begin(), predictedLabels.end()); return predictLabels; }
static Matx31d NLtriangulation(Point2f P1, Point2f P2, Matx34f M1, Matx34f M2) { Matx44d A(P1.x*M1(2,0)-M1(0,0), P1.x*M1(2,1)-M1(0,1), P1.x*M1(2,2)-M1(0,2), P1.x*M1(2,3)-M1(0,3), P1.y*M1(2,0)-M1(1,0), P1.y*M1(2,1)-M1(1,1), P1.y*M1(2,2)-M1(1,2), P1.y*M1(2,3)-M1(1,3), P2.x*M2(2,0)-M2(0,0), P2.x*M2(2,1)-M2(0,1), P2.x*M2(2,2)-M2(0,2), P2.x*M2(2,3)-M2(0,3), P2.y*M2(2,0)-M2(1,0), P2.y*M2(2,1)-M2(1,1), P2.y*M2(2,2)-M2(1,2), P2.y*M2(2,3)-M2(1,3) ); SVD svd(A,cv::SVD::MODIFY_A); Mat_<double> vt = svd.vt; Mat_<double> t = vt.row(3); t = t/t(3); Matx31d XX(t(0), t(1), t(2)); return XX; }
/** * @author JIA Pei * @version 2010-02-05 * @brief Calculate statistics for all profiles; Computer all landmarks' mean prof and covariance matrix * @return void */ void VO_ASMNDProfiles::VO_CalcStatistics4AllProfiles() { // Calcuate Inverse of Sg for(unsigned int i = 0; i < this->m_iNbOfPyramidLevels; i++) { Mat_<float> allProfiles = Mat_<float>::zeros( this->m_iNbOfSamples, this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() ); Mat_<float> Covar = Mat_<float>::zeros(this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength(), this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() ); Mat_<float> Mean = Mat_<float>::zeros(1, this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() ); for(unsigned int j = 0; j < this->m_iNbOfPoints; j++) { for(unsigned int k = 0; k < this->m_iNbOfProfileDim; k++) { for(unsigned int l = 0; l < this->m_iNbOfSamples; l++) { Mat_<float> tmpRow = allProfiles.row(l); Mat_<float> tmp = this->m_vvvNormalizedProfiles[l][i][j].GetTheProfile().col(k).t(); tmp.copyTo(tmpRow); } // OK! Now We Calculate the Covariance Matrix of prof for Landmark iPoint cv::calcCovarMatrix( allProfiles, Covar, Mean, CV_COVAR_NORMAL+CV_COVAR_ROWS+CV_COVAR_SCALE, CV_32F); // cv::calcCovarMatrix( allProfiles, Covar, Mean, CV_COVAR_SCRAMBLED+CV_COVAR_ROWS, CV_32F); this->m_vvMeanNormalizedProfile[i][j].Set1DimProfile(Mean.t(), k); // Explained by YAO Wei, 2008-1-29. // Actually Covariance Matrix is semi-positive definite. But I am not sure // whether it is invertible or not!!! // In my opinion it is un-invert, since C = X.t() * X!!! cv::invert(Covar, this->m_vvvCVMInverseOfSg[i][j][k], DECOMP_SVD); } } } }
void KNN::Compute_Centroid_(Mat_<double>& X, Mat_<double>& Centroid, Mat_<int>& Index, Scalar& K) { Scalar _Counter_ = 0; Mat_<double> _Sum_(1, X.cols); for (int _i_ = 0; _i_ < K[0]; _i_++) { Initialize_Zero_(_Sum_); _Counter_ = 0; for (int _j_ = 0; _j_ < Index.rows; _j_++) { if (Index(_j_, 0) == _i_) { add(_Sum_, X.row(_j_), _Sum_); _Counter_[0]++; } } divide(_Sum_, _Counter_[0], Centroid.row(_i_)); } }
void KNN::Compute_Closest_(Mat_<double>& X, Mat_<double>& Centroids, Mat_<int>& Indices, Distance type) { Scalar _Number_Of_Centroids_ = Centroids.rows; Mat_<int> _Index_(X.rows, 1); Mat_<double> _temp_(_Number_Of_Centroids_[0], 1, CV_32F); Mat_<double> _Mult_temp_(1, X.cols, CV_32F); double _min_, _max_; int _minInd_; for (int _i_ = 0; _i_ < X.rows; _i_++) { for (int _j_ = 0; _j_ < _Number_Of_Centroids_[0]; _j_++) { multiply(X.row(_i_) - Centroids.row(_j_), X.row(_i_) - Centroids.row(_j_), _Mult_temp_); _temp_(_j_, 0) = sum(_Mult_temp_)[0]; } min(_temp_, _minInd_); Indices(_i_, 0) = _minInd_; } }
//============================================================================== vector<uchar> ANN::predictMouth(Mat_<float> &testData) { int numberOfSamples = testData.rows; Mat_<float> classifResult(1, (int)MOUTH_CLASSES); vector<uchar> predictedLabels(numberOfSamples); for(int i = 0; i < numberOfSamples; i++) { nnetwork->predict(testData.row(i), classifResult); Point2i max_loc; minMaxLoc(classifResult, 0, 0, 0, &max_loc); // add row into predictions predictions.push_back(classifResult); predictedLabels[i] = static_cast<unsigned char>(max_loc.x) + 2; } predictLabels.insert(predictLabels.end(),predictedLabels.begin(), predictedLabels.end()); return predictedLabels; }
float TrainableStatModel::leaveOneOutCrossValidation(const Mat_<float> &samples, const Mat_<int> &classes) { int correctResults = 0; Mat_<int> sampleIdx(samples.rows - 1, 1); for (int i = 1; i < samples.rows; i++) { sampleIdx(i - 1, 0) = i; } for (int i = 0; i < samples.rows; i++) { this->clear(); this->train(samples, classes, sampleIdx); int actual = (float)this->predict(samples.row(i)); if (actual == classes(i,0)) { correctResults++; } cout<<"actual = "<<actual<<", expected = "<<classes(i,0)<<endl; sampleIdx(i, 0) = i; } return (float)correctResults/(float)samples.rows; }
void NNLSOptimizer::estimateIdentityParameters(const vector<vector<Point2f> >&featurePointsVector, const Mat &cameraMatrix, const Mat& lensDist, Face* face_ptr,const vector<vector<int> >&point_indices_vector, const vector<Mat> &rotation, const vector<Mat> &translation, const vector<vector<double> > &weights_ex, vector<double> &weights_id) { Mat_<double> weakCamera(2,3); //matrices for A_id * w_id = f Mat_<double> A_id, seg_A_id; //matrix Z = kron(weights, Identity_matrix) Mat_<double> Z_id; Mat_<double> ZU; Mat_<double> pr, Pt; //featurePoints converted to 2x1 matrices Mat_<double> fi; Mat_<double> f; int index = 0; //total number of feature points int featurePointNum = 0; const int exr_size = model->getExpSize(); const int id_size = model->getIdSize(); double *w_id = new double[id_size]; double *w_exp = new double[exr_size]; Mat_<double> rmatrix; Mat_<double> x_t(1,exr_size); Mat_<double> y_t(1,id_size); Mat_<double> x(exr_size,1); Mat_<double> y(id_size,1); //used for x_t*U_ex and y_t*U_id Mat_<double> lin_comb_x(exr_size,1); double Z_avg; double average_depth; Mat_<double> proP; Mat_<double> pM(3,1); //get weights from the current face instance face_ptr->getWeights(w_id,id_size,w_exp,exr_size); /***************************************************************/ /*create weak-perspecitve camera matrix from camera matrix*/ /***************************************************************/ for(int i=0;i<2;i++) for(int j=0;j<3;j++) weakCamera(i,j) = cameraMatrix.at<double>(i,j); for(unsigned int i=0;i<featurePointsVector.size();i++) featurePointNum += featurePointsVector[i].size(); f = Mat_<double>(featurePointNum*2,1); fi = Mat_<double>(2,1); //preprocess points and core tensor rows //for points subtract translation too for(unsigned int i=0, count=0;i<featurePointsVector.size();i++) { //precompute translation Pt = (1./translation[i].at<double>(2,0)) * (weakCamera*translation[i]); for(unsigned int j=0;j<featurePointsVector[i].size();j++) { fi = Mat_<double>(2,1); fi(0,0) = featurePointsVector[i][j].x; fi(1,0) = featurePointsVector[i][j].y; fi = fi - Pt; f.row(count + 2*j) = fi.row(0) + 0.0; f.row(count + 2*j+1) = fi.row(1) + 0.0; } count += 2*featurePointsVector[i].size(); } A_id = Mat_<double>(2*featurePointNum,id_size); seg_A_id = Mat_<double>(2,id_size); average_depth = face_ptr->getAverageDepth(); pM(0,0) = 1; pM(1,0) = 1; pM(2,0) = average_depth; for(unsigned int i=0, count = 0;i<point_indices_vector.size();++i) { Rodrigues(rotation[i],rmatrix); proP = cameraMatrix*rmatrix*pM; proP = proP + cameraMatrix*translation[i]; Z_avg = proP(0,2); pr = (1.0/Z_avg)*weakCamera*rmatrix; //load the appropriate weights for the i-th frame for(unsigned int k=0;k<weights_ex[i].size();k++) x_t(0,k) = weights_ex[i][k]; lin_comb_x = (x_t*u_ex).t(); Z_id = Matrix::kronecker(Mat_<double>::eye(id_size,id_size),lin_comb_x); ZU = Z_id*(u_id.t()); for(unsigned int j=0;j<point_indices_vector[i].size();++j) { index = point_indices_vector[i][j]; seg_A_id = pr*M[index]*ZU; A_id.row(count + 2*j) = seg_A_id.row(0) + 0.0; A_id.row(count + 2*j+1) = seg_A_id.row(1) + 0.0; } count += 2*point_indices_vector[i].size(); } //do not use the guess in the first frame but do for every following frame for(int i=0;i<id_size;i++) y_t(0,i) = y(i,0) = w_id[i]; // Mat_<double> temp = (A_id*y - f); // Mat_<double> e = temp.t()*temp; // cout << "id, error before " << e(0,0)<<endl; //optimize using sequential coordinate descent this->scannls(A_id,f,y); // temp = (A_id*y - f); // e = temp.t()*temp; // cout << "id, error after " << e(0,0)<<endl; for(int i=0;i<id_size;i++){ w_id[i] = y(i,0); } for(int i=0;i<exr_size;i++){ w_exp[i] = weights_ex[0][i]; } for(int i=0;i<id_size;i++){ weights_id.push_back(w_id[i]); } face_ptr->setNewIdentityAndExpression(w_id,w_exp); face_ptr->setAverageDepth(average_depth); ZU.release(); Z_id.release(); A_id.release(); f.release(); delete[] w_id; delete[] w_exp; }
int MultipleGraphsClassifier::predict(DisjointSetForest &segmentation, const Mat_<Vec3b> &image, const Mat_<float> &mask) { int maxGraphSize = max(segmentation.getNumberOfComponents(), this->maxTrainingGraphSize); int minGraphSize = min(segmentation.getNumberOfComponents(), this->minTrainingGraphSize); if (minGraphSize <= this->k) { cout<<"the smallest graph has size "<<minGraphSize<<", cannot compute "<<this->k<<" eigenvectors"<<endl; exit(EXIT_FAILURE); } // compute each feature graph for the test sample vector<WeightedGraph> featureGraphs; featureGraphs.reserve(this->features.size()); for (int i = 0; i < (int)this->features.size(); i++) { featureGraphs.push_back(this->computeFeatureGraph(i, segmentation, image, mask)); } // order graphs by feature, to compute pattern vectors feature by // feature. vector<vector<WeightedGraph> > graphsByFeatures(this->features.size()); // add feature graphs for the test sample at index 0 for (int i = 0; i < (int)this->features.size(); i++) { graphsByFeatures[i].reserve(this->trainingFeatureGraphs.size() + 1); graphsByFeatures[i].push_back(featureGraphs[i]); } // add feature graphs for each training sample for (int i = 0; i < (int)this->trainingFeatureGraphs.size(); i++) { for (int j = 0; j < (int)this->features.size(); j++) { graphsByFeatures[j].push_back(get<0>(this->trainingFeatureGraphs[i])[j]); } } // compute the corresponding pattern vectors vector<vector<VectorXd> > patternsByFeatures(this->features.size()); for (int i = 0; i < (int)this->features.size(); i++) { patternsByFeatures[i] = patternVectors(graphsByFeatures[i], this->k, maxGraphSize); } // concatenate pattern vectors by image, converting to opencv format // to work with cv's ML module. Mat_<float> longPatterns = Mat_<float>::zeros(this->trainingFeatureGraphs.size() + 1, maxGraphSize * k * this->features.size()); for (int i = 0; i < (int)this->trainingFeatureGraphs.size() + 1; i++) { VectorXd longPattern(maxGraphSize * k * this->features.size()); for (int j = 0; j < this->features.size(); j++) { longPattern.block(j * maxGraphSize * k, 0, maxGraphSize * k, 1) = patternsByFeatures[j][i]; } Mat_<double> cvLongPattern; eigenToCv(longPattern, cvLongPattern); Mat_<float> castLongPattern = Mat_<float>(cvLongPattern); castLongPattern.copyTo(longPatterns.row(i)); } // classification of long patterns using SVM CvKNearest svmClassifier; Mat_<int> classes(this->trainingFeatureGraphs.size(), 1); for (int i = 0; i < (int)this->trainingFeatureGraphs.size(); i++) { classes(i,0) = get<1>(this->trainingFeatureGraphs[i]); } svmClassifier.train(longPatterns.rowRange(1, longPatterns.rows), classes); return (int)svmClassifier.find_nearest(longPatterns.row(0), 1); }
void NNLSOptimizer::estimateExpressionParameters(const vector<Point2f> &featurePoints, const Mat &cameraMatrix, const Mat& lensDist, Face* face_ptr,const vector<int> &point_indices, const Mat &rotation, const Mat &translation, vector<double> &weights_ex) { Mat_<double> weakCamera(2,3); //matrices for A_ex * w_ex = f and A_id * w_id = f Mat_<double> A_ex, seg_A_ex; //matrix Z = kron(weights, Identity_matrix) Mat_<double> Z_ex; Mat_<double> ZU; Mat_<double> pr, Pt; //core tensor rows Mat_<double> Mi; //featurePoints converted to 2x1 matrices Mat_<double> fi; Mat_<double> f; int index = 0; const int exr_size = model->getExpSize(); const int id_size = model->getIdSize(); double *w_id = new double[id_size]; double *w_exp = new double[exr_size]; Mat_<double> rmatrix; Mat_<double> x_t(1,exr_size); Mat_<double> y_t(1,id_size); Mat_<double> x(exr_size,1); Mat_<double> y(id_size,1); //used for x_t*U_ex and y_t*U_id Mat_<double> lin_comb_x(exr_size,1); Mat_<double> lin_comb_y(id_size,1); double Z_avg; double average_depth; Point3f p; Mat_<double> proP; Mat_<double> pM(3,1); Rodrigues(rotation,rmatrix); //get weights from the current face instance face_ptr->getWeights(w_id,id_size,w_exp,exr_size); //find average depth average_depth = face_ptr->getAverageDepth(); pM(0,0) = 1; pM(1,0) = 1; pM(2,0) = average_depth; proP = cameraMatrix*rmatrix*pM; proP = proP + cameraMatrix*translation; Z_avg = proP(0,2); /***************************************************************/ /*create weak-perspecitve camera matrix from camera matrix*/ /***************************************************************/ for(int i=0;i<2;i++) for(int j=0;j<3;j++) weakCamera(i,j) = cameraMatrix.at<double>(i,j); f = Mat_<double>(featurePoints.size()*2,1); fi = Mat_<double>(2,1); //precompute translation Pt = (1./translation.at<double>(2,0)) * (weakCamera*translation); //preprocess points and core tensor rows //for points subtract translation too for(unsigned int i=0;i<featurePoints.size();i++) { fi = Mat_<double>(2,1); fi(0,0) = featurePoints[i].x; fi(1,0) = featurePoints[i].y; fi = fi - Pt; f.row(2*i) = fi.row(0) + 0.0; f.row(2*i+1) = fi.row(1) + 0.0; } pr = (1.0/Z_avg)*weakCamera*rmatrix; A_ex = Mat_<double>::zeros(2*featurePoints.size(),exr_size); seg_A_ex = Mat_<double>::zeros(2*featurePoints.size(),exr_size); for(int i=0;i<id_size;i++) y_t(0,i) = w_id[i]; lin_comb_y = (y_t*u_id).t(); Z_ex = Matrix::kronecker(lin_comb_y,Mat_<double>::eye(exr_size,exr_size)); ZU = Z_ex*(u_ex.t()); for(unsigned int i=0;i<point_indices.size();++i) { index = point_indices[i]; seg_A_ex = pr*M[index]*ZU; A_ex.row(2*i) = seg_A_ex.row(0) + 0.0; A_ex.row(2*i+1) = seg_A_ex.row(1) + 0.0; } //do not use the guess in the first frame but do for every following frame for(int i=0;i<exr_size;i++) x_t(0,i) = x(i,0) = w_exp[i]; // Mat_<double> temp = (A_ex*x - f); // Mat_<double> e = temp.t()*temp; // cout << "exp, error before " << e(0,0)<<endl; //op .. lets see if we get here this->scannls(A_ex,f,x); // temp = (A_ex*x - f); // e = temp.t()*temp; // cout << "exp, error after " << e(0,0)<<endl; for(int i=0;i<exr_size;i++){ w_exp[i] = x(i,0); } for(int i=0;i<exr_size;i++){ weights_ex.push_back(w_exp[i]); } face_ptr->setNewIdentityAndExpression(w_id,w_exp); //unecessary it seems face_ptr->setAverageDepth(average_depth); ZU.release(); Z_ex.release(); A_ex.release(); f.release(); delete[] w_id; delete[] w_exp; }
// Need to move this all to opengl void DrawBox(Mat image, Vec6d pose, Scalar color, int thickness, float fx, float fy, float cx, float cy) { float boxVerts[] = {-1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1}; Mat_<float> box = Mat(8, 3, CV_32F, boxVerts).clone() * 100; Matx33f rot = Euler2RotMat(Vec3d(pose[3], pose[4], pose[5])); Mat_<float> rotBox; Mat((Mat(rot) * box.t())).copyTo(rotBox); rotBox = rotBox.t(); rotBox.col(0) = rotBox.col(0) + pose[0]; rotBox.col(1) = rotBox.col(1) + pose[1]; rotBox.col(2) = rotBox.col(2) + pose[2]; // draw the lines Mat_<float> rotBoxProj; Project(rotBoxProj, rotBox, image.size(), fx, fy, cx, cy); Mat begin; Mat end; rotBoxProj.row(0).copyTo(begin); rotBoxProj.row(1).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(1).copyTo(begin); rotBoxProj.row(2).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(2).copyTo(begin); rotBoxProj.row(3).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(0).copyTo(begin); rotBoxProj.row(3).copyTo(end); //std::cout << begin <<endl; //std::cout << end <<endl; cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(2).copyTo(begin); rotBoxProj.row(4).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(1).copyTo(begin); rotBoxProj.row(5).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(0).copyTo(begin); rotBoxProj.row(6).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(3).copyTo(begin); rotBoxProj.row(7).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(6).copyTo(begin); rotBoxProj.row(5).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(5).copyTo(begin); rotBoxProj.row(4).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(4).copyTo(begin); rotBoxProj.row(7).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); rotBoxProj.row(7).copyTo(begin); rotBoxProj.row(6).copyTo(end); cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness); }
static int initialize_simplex(Mat_<double>& c, Mat_<double>& b,double& v,vector<int>& N,vector<int>& B,vector<unsigned int>& indexToRow){ N.resize(c.cols); N[0]=0; for (std::vector<int>::iterator it = N.begin()+1 ; it != N.end(); ++it){ *it=it[-1]+1; } B.resize(b.rows); B[0]=(int)N.size(); for (std::vector<int>::iterator it = B.begin()+1 ; it != B.end(); ++it){ *it=it[-1]+1; } indexToRow.resize(c.cols+b.rows); indexToRow[0]=0; for (std::vector<unsigned int>::iterator it = indexToRow.begin()+1 ; it != indexToRow.end(); ++it){ *it=it[-1]+1; } v=0; int k=0; { double min=DBL_MAX; for(int i=0;i<b.rows;i++){ if(b(i,b.cols-1)<min){ min=b(i,b.cols-1); k=i; } } } if(b(k,b.cols-1)>=0){ N.erase(N.begin()); for (std::vector<unsigned int>::iterator it = indexToRow.begin()+1 ; it != indexToRow.end(); ++it){ --(*it); } return 0; } Mat_<double> old_c=c.clone(); c=0; c(0,0)=-1; for(int i=0;i<b.rows;i++){ b(i,0)=-1; } print_simplex_state(c,b,v,N,B); dprintf(("\tWE MAKE PIVOT\n")); pivot(c,b,v,N,B,k,0,indexToRow); print_simplex_state(c,b,v,N,B); inner_simplex(c,b,v,N,B,indexToRow); dprintf(("\tAFTER INNER_SIMPLEX\n")); print_simplex_state(c,b,v,N,B); unsigned int nsize = (unsigned int)N.size(); if(indexToRow[0]>=nsize){ int iterator_offset=indexToRow[0]-nsize; if(b(iterator_offset,b.cols-1)>0){ return SOLVELP_UNFEASIBLE; } pivot(c,b,v,N,B,iterator_offset,0,indexToRow); } vector<int>::iterator iterator; { int iterator_offset=indexToRow[0]; iterator=N.begin()+iterator_offset; std::iter_swap(iterator,N.begin()); SWAP(int,indexToRow[*iterator],indexToRow[0]); swap_columns(c,iterator_offset,0); swap_columns(b,iterator_offset,0); } dprintf(("after swaps\n")); print_simplex_state(c,b,v,N,B); //start from 1, because we ignore x_0 c=0; v=0; for(int I=1;I<old_c.cols;I++){ if(indexToRow[I]<nsize){ dprintf(("I=%d from nonbasic\n",I)); int iterator_offset=indexToRow[I]; c(0,iterator_offset)+=old_c(0,I); print_matrix(c); }else{ dprintf(("I=%d from basic\n",I)); int iterator_offset=indexToRow[I]-nsize; c-=old_c(0,I)*b.row(iterator_offset).colRange(0,b.cols-1); v+=old_c(0,I)*b(iterator_offset,b.cols-1); print_matrix(c); } } dprintf(("after restore\n")); print_simplex_state(c,b,v,N,B); N.erase(N.begin()); for (std::vector<unsigned int>::iterator it = indexToRow.begin()+1 ; it != indexToRow.end(); ++it){ --(*it); } return 0; }
void locallyLinearEmbeddings(const Mat_<float> &samples, int outDim, Mat_<float> &embeddings, int k) { assert(outDim < samples.cols); assert(k >= 1); Mat_<int> nearestNeighbors(samples.rows, k); // determining k nearest neighbors for each sample flann::Index flannIndex(samples, flann::LinearIndexParams()); for (int i = 0; i < samples.rows; i++) { Mat_<int> nearest; Mat dists; flannIndex.knnSearch(samples.row(i), nearest, dists, k + 1); nearest.colRange(1, nearest.cols).copyTo(nearestNeighbors.row(i)); } // determining weights for each sample vector<Triplet<double> > tripletList; tripletList.reserve(samples.rows * k); for (int i = 0; i < samples.rows; i++) { Mat_<double> C(k,k); for (int u = 0; u < k; u++) { for (int v = u; v < k; v++) { C(u,v) = (samples.row(i) - samples.row(nearestNeighbors(i,u))).dot(samples.row(i) - samples.row(nearestNeighbors(i,v))); C(v,u) = C(u,v); } } // regularize when the number of neighbors is greater than the input // dimension if (k > samples.cols) { C = C + Mat_<double>::eye(k,k) * 10E-3 * trace(C)[0]; } Map<MatrixXd,RowMajor> eigC((double*)C.data, C.rows, C.cols); LDLT<MatrixXd> solver(eigC); Mat_<double> weights(k,1); Map<MatrixXd,RowMajor> eigWeights((double*)weights.data, weights.rows, weights.cols); eigWeights = solver.solve(MatrixXd::Ones(k,1)); Mat_<double> normWeights; double weightsSum = sum(weights)[0]; if (weightsSum == 0) { cout<<"error: cannot reconstruct point "<<i<<" from its neighbors"<<endl; exit(EXIT_FAILURE); } normWeights = weights / weightsSum; for (int j = 0; j < k; j++) { tripletList.push_back(Triplet<double>(nearestNeighbors(i,j), i, normWeights(j))); } } SparseMatrix<double> W(samples.rows, samples.rows); W.setFromTriplets(tripletList.begin(), tripletList.end()); // constructing vectors in lower dimensional space from the weights VectorXd eigenvalues; MatrixXd eigenvectors; LLEMult mult(&W); symmetricSparseEigenSolver(samples.rows, "SM", outDim + 1, samples.rows, eigenvalues, eigenvectors, mult); embeddings = Mat_<double>(samples.rows, outDim); if (DEBUG_LLE) { cout<<"actual eigenvalues : "<<eigenvalues<<endl; cout<<"actual : "<<endl<<eigenvectors<<endl; MatrixXd denseW(W); MatrixXd tmp = MatrixXd::Identity(W.rows(), W.cols()) - denseW; MatrixXd M = tmp.transpose() * tmp; SelfAdjointEigenSolver<MatrixXd> eigenSolver(M); MatrixXd expectedEigenvectors = eigenSolver.eigenvectors(); cout<<"expected eigenvalues : "<<eigenSolver.eigenvalues()<<endl; cout<<"expected : "<<endl<<expectedEigenvectors<<endl; } for (int i = 0; i < samples.rows; i++) { for (int j = 0; j < outDim; j++) { embeddings(i,j) = eigenvectors(i, j + 1); } } }
/** * @param avgSParam - input mean shape parameters * @param icovSParam - input covariance matrix of shape parameters * @param avgTParam - input mean texture parameters * @param icovTParam - input covariance matrix of texture parameters * @param iSParams - input the vector of multiple input shape parameters * @param iTParams - input the vector of multiple input texture parameter * @param ShapeDistMean - input mean texture parameters * @param ShapeDistStddev - input covariance matrix of texture parameters * @param TextureDistMean - input the input shape parameter * @param TextureDistStddev - input the input texture parameter * @param WeakFitting - input only shape parameter is used? * @return whether the fitting is acceptable */ bool CRecognitionAlgs::CalcFittingEffect4ImageSequence( const Mat_<float>& avgSParam, const Mat_<float>& icovSParam, const Mat_<float>& avgTParam, const Mat_<float>& icovTParam, const Mat_<float>& iSParams, const Mat_<float>& iTParams, const Scalar& ShapeDistMean, const Scalar& ShapeDistStddev, const Scalar& TextureDistMean, const Scalar& TextureDistStddev, bool WeakFitting ) { assert(iSParams.rows == iTParams.rows); unsigned int NbOfSamples = iSParams.rows; vector<float> sDists, tDists; sDists.resize(NbOfSamples); tDists.resize(NbOfSamples); for(unsigned int i = 0; i < NbOfSamples; ++i) { CRecognitionAlgs::CalcFittingEffect4StaticImage( avgSParam, icovSParam, avgTParam, icovTParam, iSParams.row(i), iTParams.row(i), ShapeDistMean, ShapeDistStddev, TextureDistMean, TextureDistStddev, sDists[i], tDists[i], WeakFitting ); } unsigned int NbOfGood1 = 0; unsigned int NbOfGood2 = 0; for(unsigned int i = 0; i < NbOfSamples; ++i) { if( ( fabs( sDists[i] - ShapeDistMean.val[0] ) < 1.5f * ShapeDistStddev.val[0] ) ) { NbOfGood1++; if( ( fabs( tDists[i] - TextureDistMean.val[0] ) < 3.0f*TextureDistStddev.val[0] ) ) { NbOfGood2++; } } } if(WeakFitting) { if(NbOfGood1 >= (unsigned int )(0.75*NbOfSamples) ) return true; else return false; } else { if(NbOfGood2 >= (unsigned int )(0.75*NbOfGood1) ) return true; else return false; } }
void NNLSOptimizer::estimateModelParameters(const vector<Point2f> &featurePoints, const Mat &cameraMatrix, const Mat& lensDist, Face* face_ptr,const vector<int> &point_indices, const Mat &rotation, const Mat &translation, vector<double> &weights_id, vector<double> &weights_ex) { Mat_<double> weakCamera(2,3); //matrices for A_ex * w_ex = f and A_id * w_id = f Mat_<double> A_ex , A_id; //matrix Z = kron(weights, Identity_matrix) Mat_<double> Z_ex, Z_id; Mat_<double> ZU; Mat_<double> pr, Pt, PRM, prm; //core tensor rows Mat_<double> Mi; //featurePoints converted to 2x1 matrices Mat_<double> fi; Mat_<double> f; Mat_<double> O; int index = 0; const int exr_size = model->getExpSize(); const int id_size = model->getIdSize(); double *w_id = new double[id_size]; double *w_exp = new double[exr_size]; Mat_<double> rmatrix; Mat_<double> x_t(1,exr_size); Mat_<double> y_t(1,id_size); Mat_<double> x(exr_size,1); Mat_<double> y(id_size,1); //used for x_t*U_ex and y_t*U_id Mat_<double> lin_comb_x(exr_size,1); Mat_<double> lin_comb_y(id_size,1); double Z_avg; double average_depth; Point3f p; Mat_<double> proP; Mat_<double> pM(3,1); Rodrigues(rotation,rmatrix); //get weights from the current face instance face_ptr->getWeights(w_id,id_size,w_exp,exr_size); average_depth = face_ptr->getAverageDepth(); pM(0,0) = 1; pM(1,0) = 1; pM(2,0) = average_depth; proP = cameraMatrix*rmatrix*pM; proP = proP + cameraMatrix*translation; Z_avg = proP(0,2); /***************************************************************/ /*create weak-perspecitve camera matrix from camera matrix*/ /***************************************************************/ for(int i=0;i<2;i++) for(int j=0;j<3;j++) weakCamera(i,j) = cameraMatrix.at<double>(i,j); f = Mat_<double>(featurePoints.size()*2,1); fi = Mat_<double>(2,1); //precompute translation Pt = (1./translation.at<double>(2,0)) * (weakCamera*translation); //preprocess points and core tensor rows //for points subtract translation too for(unsigned int i=0;i<featurePoints.size();i++) { fi = Mat_<double>(2,1); fi(0,0) = featurePoints[i].x; fi(1,0) = featurePoints[i].y; fi = fi - Pt; f.row(2*i) = fi.row(0) + 0.0; f.row(2*i+1) = fi.row(1) + 0.0; } pr = (1.0/Z_avg)*weakCamera*rmatrix; PRM = Mat_<double>(2*featurePoints.size(),exr_size*id_size); prm = Mat_<double>(2,exr_size*id_size); for(unsigned int i=0;i<point_indices.size();++i) { index = point_indices[i]; prm = pr*M[index]; PRM.row(2*i) = prm.row(0) + 0.0; PRM.row(2*i+1) = prm.row(1) + 0.0; } //do not use the guess in the first frame but do for every following frame for(int i=0;i<exr_size;i++) x_t(0,i) = x(i,0) = w_exp[i]; for(int i=0;i<id_size;i++) y_t(0,i) = y(i,0) = w_id[i]; A_ex = Mat_<double>::zeros(2*featurePoints.size(),exr_size); A_id = Mat_<double>::zeros(2*featurePoints.size(),id_size); for(int count = 0;count < max_iterations; count++) { //put the guesses into matrix y and x y_t = y.t(); lin_comb_y = (y_t*u_id).t(); Z_ex = Matrix::kronecker(lin_comb_y,Mat_<double>::eye(exr_size,exr_size)); ZU = Z_ex*(u_ex.t()); A_ex = PRM*ZU; this->scannls(A_ex,f,x); x_t = x.t(); lin_comb_x = (x_t*u_ex).t(); Z_id = Matrix::kronecker(Mat_<double>::eye(id_size,id_size),lin_comb_x); ZU = Z_id*(u_id.t()); /* compute the formula : * Sum( U*Z'*Mi'*R'*PW'*PW*R*Mi*Z*U' )*y = Sum( U*Z'*Mi'*R'*PW'*fi - (1/tz)*U*Z'*Mi'*R'*PW'*PW'*t ) */ A_id = PRM*ZU; // Mat_<double> temp = (A_id*y - f); // Mat_<double> e = temp.t()*temp; // cout << "id, error before " << e(0,0)<<endl; this->scannls(A_id,f,y); // temp = (A_id*y - f); // e = temp.t()*temp; // cout << "id, error after " << e(0,0)<<endl; } for(int i=0;i<exr_size;i++){ w_exp[i] = x(i,0); } for(int i=0;i<id_size;i++){ w_id[i] = y(i,0); } //we cannot normalize because then we lose 1/z_avg which is the scale // Vector3::normalize(w_exp, exr_size); // Vector3::normalize(w_id, id_size); for(int i=0;i<exr_size;i++){ weights_ex.push_back(w_exp[i]); } for(int i=0;i<id_size;i++){ weights_id.push_back(w_id[i]); } face_ptr->setNewIdentityAndExpression(w_id,w_exp); face_ptr->setAverageDepth(average_depth); prm.release(); PRM.release(); ZU.release(); Z_id.release(); A_id.release(); Z_ex.release(); A_ex.release(); f.release(); delete[] w_id; delete[] w_exp; }