Vector solve_laplacian_matrix(const SparseMatrix &h, const Vector &g, bool verbose = false) { size_t N = h.rows(); assert(N == h.cols()); auto v = h.diagonal(); if (v.minCoeff() == 0) { size_t i; std::cerr << "Error: hessian of Kantorovich's functional " << "is not invertible:\n"; std::cerr << "diag = " << v.head(10) << " ... in [" << v.minCoeff() << "," << v.minCoeff(&i) << "]\n";; std::cerr << "minCoeff => " << i << "\n"; } // remove last row and column so that the linear system is // invertible Vector gs = g.head(N-1); SparseMatrix hs = h.block(0,0,N-1,N-1); // top-left submatrix Eigen::SimplicialLLT<SparseMatrix> solver(hs); Vector ds = solver.solve(gs); // if cannot solve with Cholesky, use QR decomposition double err = (hs*ds - gs).norm(); if (err > 1e-7) // FIXME: threshold { std::cerr << "WARNING: in solve_laplacian_matrix: err=" << err << "\n"; #ifdef MA_USE_SUITESPARSE_QR std::cerr << "Resorting to QR decomposition"; Eigen::SPQR<SparseMatrix> solver(hs); ds = solver.solve(gs); if (verbose) std::cerr << "rank(h) = " << solver.rank() << "\n"; #endif } // assemble result Vector d(N); d.head(N-1) = ds; d(N-1) = 0; return d; }
Scalar condition(Eigen::Matrix<Scalar, Rows, Cols>& matrix, Scalar maxWanted) { // TODO Separate case for self-adjoint matrices, which would be the case for // TODO covariance matrices. // TODO matrix.selfadjointView<Lower>().eigenvalues(); auto values = matrix.eigenvalues().cwiseAbs(); Scalar max = values.maxCoeff(); Scalar min = values.minCoeff(); // TODO Should I be using the signed min and max eigenvalues? // TODO I'm not sure how to deal generally with complex values if so. Scalar condition = max / min; if (condition > maxWanted) { // TODO If maxWanted is (near?) 1, then just set to identity? Scalar bonus = (max - min * maxWanted) / (maxWanted - 1); matrix.diagonal() = matrix.diagonal().array() + bonus; } return condition; }
void Mesher::check_feature() { auto contour = get_contour(); const auto normals = get_normals(contour); // Find the largest cone and the normals that enclose // the largest angle as n0, n1. float theta = 1; Vec3f n0, n1; for (auto ni : normals) { for (auto nj : normals) { float dot = ni.dot(nj); if (dot < theta) { theta = dot; n0 = ni; n1 = nj; } } } // If there isn't a feature in this fan, then return immediately. if (theta > 0.9) return; // Decide whether this is a corner or edge feature. const Vec3f nstar = n0.cross(n1); float phi = 0; for (auto n : normals) phi = fmax(phi, fabs(nstar.dot(n))); bool edge = phi < 0.7; // Find the center of the contour. Vec3f center(0, 0, 0); for (auto c : contour) center += c; center /= contour.size(); // Construct the matrices for use in our least-square fit. Eigen::MatrixX3d A(normals.size(), 3); { int i=0; for (auto n : normals) A.row(i++) << n.transpose(); } // When building the second matrix, shift position values to be centered // about the origin (because that's what the least-squares fit will // minimize). Eigen::VectorXd B(normals.size(), 1); { auto n = normals.begin(); auto c = contour.begin(); int i=0; while (n != normals.end()) B.row(i++) << (n++)->dot(*(c++) - center); } // Use singular value decomposition to solve the least-squares fit. Eigen::JacobiSVD<Eigen::MatrixX3d> svd(A, Eigen::ComputeFullU | Eigen::ComputeFullV); // Set the smallest singular value to zero to make fitting happier. if (edge) { auto singular = svd.singularValues(); svd.setThreshold(singular.minCoeff() / singular.maxCoeff() * 1.01); } // Solve for the new point's position. const Vec3f new_pt = svd.solve(B) + center; // Erase this triangle fan, as we'll be inserting a vertex in the center. triangles.erase(fan_start, voxel_start); // Construct a new triangle fan. contour.push_back(contour.front()); { auto p0 = contour.begin(); auto p1 = contour.begin(); p1++; while (p1 != contour.end()) push_swappable_triangle(Triangle(*(p0++), *(p1++), new_pt)); } }
LanczosBounds<real_t> minmax_eigenvalues(SparseMatrixX<scalar_t> const& matrix, double precision_percent) { auto const precision = static_cast<real_t>(precision_percent / 100); auto const matrix_size = static_cast<int>(matrix.rows()); auto left = VectorX<scalar_t>{VectorX<scalar_t>::Zero(matrix_size)}; auto right_previous = VectorX<scalar_t>{VectorX<scalar_t>::Zero(matrix_size)}; auto right = num::make_random<VectorX<scalar_t>>(matrix_size); right.normalize(); // Alpha and beta are the diagonals of the tridiagonal matrix. // The final size is not known ahead of time, but it will be small. auto alpha = std::vector<real_t>(); alpha.reserve(100); auto beta = std::vector<real_t>(); beta.reserve(100); // Energy values from the previous iteration. Used to test convergence. // Initial values as far away from expected as possible. auto previous_min = std::numeric_limits<real_t>::max(); auto previous_max = std::numeric_limits<real_t>::lowest(); constexpr auto loop_limit = 1000; // This may iterate up to matrix_size, but since only the extreme eigenvalues are required it // will converge very quickly. Exceeding `loop_limit` would suggest something is wrong. for (int i = 0; i < loop_limit; ++i) { // PART 1: Calculate tridiagonal matrix elements a and b // ===================================================== // left = h_matrix * right // matrix-vector multiplication (the most compute intensive part of each iteration) compute::matrix_vector_mul(matrix, right, left); auto const a = std::real(compute::dot_product(left, right)); auto const b_prev = !beta.empty() ? beta.back() : real_t{0}; // left -= a*right + b_prev*right_previous; compute::axpy(scalar_t{-a}, right, left); compute::axpy(scalar_t{-b_prev}, right_previous, left); auto const b = left.norm(); right_previous.swap(right); right = (1/b) * left; alpha.push_back(a); beta.push_back(b); // PART 2: Check if the largest magnitude eigenvalues have converged // ================================================================= auto const eigenvalues = compute::tridiagonal_eigenvalues(eigen_cast<ArrayX>(alpha), eigen_cast<ArrayX>(beta)); auto const min = eigenvalues.minCoeff(); auto const max = eigenvalues.maxCoeff(); auto const is_converged_min = abs((previous_min - min) / min) < precision; auto const is_converged_max = abs((previous_max - max) / max) < precision; if (is_converged_min && is_converged_max) { return {min, max, i}; } previous_min = min; previous_max = max; }; throw std::runtime_error{"Lanczos algorithm did not converge for the min/max eigenvalues."}; }