void write_faces(std::ofstream& fout, const VectorI& faces, const size_t vertex_per_face, const VectorI& uv_indices=VectorI::Zero(0)) { if (vertex_per_face != 3 && vertex_per_face != 4) { std::stringstream err_msg; err_msg << "OBJ format does not support non-triangle non-quad face with " << vertex_per_face << " vertices." << std::endl; throw RuntimeError(err_msg.str()); } const size_t num_faces = faces.size() / vertex_per_face; const size_t num_uvs = uv_indices.size() / 2; if (num_uvs == 0) { for (size_t i=0; i<num_faces; i++) { const auto& f = faces.segment(i*vertex_per_face, vertex_per_face); fout << "f"; for (size_t j=0; j<vertex_per_face; j++) { fout << " " << f[j] + 1; } fout << std::endl; } } else { for (size_t i=0; i<num_faces; i++) { const auto& f = faces.segment(i*vertex_per_face, vertex_per_face); const auto& uv = uv_indices.segment(i*vertex_per_face, vertex_per_face); fout << "f "; for (size_t j=0; j<vertex_per_face; j++) { fout << f[j] + 1 << "/" << uv[j] + 1 << " "; } fout << std::endl; } } }
bool match(HashGrid::Ptr grid, const Vector3F& v) { VectorI candidates = grid->get_items_near_point(v); if (candidates.size() != 1) { std::cout << "<" << v.transpose() << "> : " << candidates.transpose() << std::endl; } return candidates.size() > 0; }
inline void assign_vector( VectorI &v_src, VectorI &v_ind, VectorI &v_val, VectorJ &v_out, Accum accum) { using namespace detail; if (v_val.size() != v_ind.size()) { return; } assign_vector_helper(v_src, v_ind, v_val.begin(), v_out, accum); }
ZSparseMatrix Assembler2D::getDisplacementStrainMatrix() { typedef Eigen::Triplet<double> T; std::vector<T> triplets; for (size_t i=0; i<m_mesh->getNbrElements(); i++) { Eigen::MatrixXd dN = m_DN[i]; VectorI idx = m_mesh->getElement(i); assert(idx.size() == 3); double V = m_mesh->getElementVolume(i); // e_xx size_t row = i * 3; for (size_t k=0; k<3; k++) { triplets.push_back(T(row, idx[k]*2, dN(k,0))); } // e_yy row++; for (size_t k=0; k<3; k++) { triplets.push_back(T(row, idx[k]*2+1, dN(k,1))); } // e_xy row++; for (size_t k=0; k<3; k++) { triplets.push_back(T(row, idx[k]*2 , dN(k,1) / 2.0)); triplets.push_back(T(row, idx[k]*2+1, dN(k,0) / 2.0)); } } Eigen::SparseMatrix<double> B = Eigen::SparseMatrix<double>(3*m_mesh->getNbrElements(), 2*m_mesh->getNbrNodes()); B.setFromTriplets(triplets.begin(), triplets.end()); return ZSparseMatrix(B); }
void Assembler2D::precomputeShapeFunctionDerivatives() { Eigen::MatrixXd selector = Eigen::MatrixXd::Zero(3,2); selector << 0.0, 0.0, 1.0, 0.0, 0.0, 1.0; m_DN.resize(m_mesh->getNbrElements()); for (size_t i=0; i<m_mesh->getNbrElements(); ++i) { VectorI idx = m_mesh->getElement(i); assert(idx.size() == 3); VectorF u[3]; u[0] = m_mesh->getNode(idx[0]); u[1] = m_mesh->getNode(idx[1]); u[2] = m_mesh->getNode(idx[2]); Eigen::MatrixXd P = Eigen::MatrixXd::Zero(3,3); P << 1.0, 1.0, 1.0, u[0][0], u[1][0], u[2][0], u[0][1], u[1][1], u[2][1]; // DN is a 4x3 matrix containing the gradients of the // 4 shape functions (one for each node) // m_DN[i] = P.inverse() * selector /* * -1.0 */; } }
std::vector<VectorI> enumerate(const VectorI& repetitions) { std::vector<VectorI> result; const size_t dim = repetitions.size(); if (dim == 2) { for (size_t i=0; i<repetitions[0]; i++) { for (size_t j=0; j<repetitions[1]; j++) { result.push_back(Vector2I(i,j)); } } } else if (dim == 3) { for (size_t i=0; i<repetitions[0]; i++) { for (size_t j=0; j<repetitions[1]; j++) { for (size_t k=0; k<repetitions[2]; k++) { result.push_back(Vector3I(i,j,k)); } } } } else { std::stringstream err_msg; err_msg << "Unsupported dim: " << dim; throw NotImplementedError(err_msg.str()); } return result; }
void VertexNormalAttribute::compute_vertex_normals_from_face(Mesh& mesh) { const size_t dim = mesh.get_dim(); const size_t num_vertices = mesh.get_num_vertices(); const size_t num_faces = mesh.get_num_faces(); const size_t vertex_per_face = mesh.get_vertex_per_face(); const VectorF& normals = get_attribute(mesh, "face_normal"); const VectorF& areas = get_attribute(mesh, "face_area"); assert(normals.size() == 3 * num_faces); assert(areas.size() == num_faces); VectorF& v_normals = m_values; v_normals = VectorF::Zero(dim * num_vertices); for (size_t i=0; i<num_faces; i++) { VectorI face = mesh.get_face(i); assert(face.size() == vertex_per_face); VectorF face_normal = normals.segment(i*dim, dim); Float face_area = areas[i]; for (size_t j=0; j<vertex_per_face; j++) { size_t vi = face[j]; v_normals.segment(vi*dim, dim) += face_normal * face_area; } } for (size_t i=0; i<num_vertices; i++) { VectorF n = v_normals.segment(dim*i, dim); Float n_len = n.norm(); if (n_len > 0.0) n /= n_len; v_normals.segment(dim*i, dim) = n; } }
VectorF Assembler::getCurlNorm(double* vector_field, int num_vector_field) { size_t num_node = m_mesh->getNbrNodes(); size_t num_elem = m_mesh->getNbrElements(); size_t dim = m_mesh->getDim(); assert(num_vector_field == dim*num_node); VectorF curl_norm(num_elem); for (size_t i=0; i<num_elem; i++) { VectorI elem = m_mesh->getElement(i); const Eigen::MatrixXd& DN = m_DN[i]; Eigen::Vector3d curl(0, 0, 0); for (size_t j=0; j<elem.size(); j++) { Eigen::Vector3d v(0, 0, 0); Eigen::Vector3d grad(0, 0, 0); for (size_t k=0; k<dim; k++) { v[k] = vector_field[elem[j]*dim+k]; grad[k] = DN(j, k); } curl = curl + grad.cross(v); } curl_norm[i] = curl.norm(); } return curl_norm; }
ZSparseMatrix Assembler2D::getLaplacianMatrix() { typedef Eigen::Triplet<double> T; std::vector<T> triplets; for (size_t i=0; i<m_mesh->getNbrElements(); ++i) { VectorI idx = m_mesh->getElement(i); assert(idx.size() == 3); Eigen::MatrixXd& dN = m_DN[i]; // Small strain-displacement matrix // Eigen::MatrixXd B(2,3); B << dN(0,0), dN(1,0), dN(2,0), dN(0,1), dN(1,1), dN(2,1); Eigen::MatrixXd k_el = B.transpose() * B * m_mesh->getElementVolume(i); for (size_t j=0; j<3; ++j) for (size_t k=0; k<3; ++k) triplets.push_back(T(idx[j], idx[k], k_el(j,k))); } Eigen::SparseMatrix<double> L = Eigen::SparseMatrix<double>(m_mesh->getNbrNodes(), m_mesh->getNbrNodes()); L.setFromTriplets(triplets.begin(), triplets.end()); return ZSparseMatrix(L); }
void Deform::set_soft_ctrs(const VectorF &T, const VectorI &idx_T) { assert(T.size()/3 == idx_T.size()); for (int i = 0, i_end = idx_T.size(); i < i_end; ++ i) { int cid = idx_T[i]; Eigen::Vector3f ctr; ctr << T[3*i], T[3*i+1], T[3*i+2]; soft_ctrs.push_back(Constraint(ctr, cid)); } std::sort(soft_ctrs.begin(), soft_ctrs.end(), ConstraintCompare()); }
ZSparseMatrix Assembler2D::getMassMatrix(bool lumped, int repeats) { double p = m_density; typedef Eigen::Triplet<double> T; std::vector<T> triplets; if (lumped) { for (size_t i=0; i<m_mesh->getNbrElements(); i++) { VectorI idx = m_mesh->getElement(i); assert(idx.size() == 3); double V = m_mesh->getElementVolume(i); for (size_t j=0; j<idx.size(); j++) for (size_t l=0; l<repeats; l++) triplets.push_back(T(repeats*idx[j]+l, repeats*idx[j]+l, p*V/3.0)); } } else { double coeff_jj = 1.0 / 6.0, coeff_jk = 1.0 / 12.0; for (size_t i=0; i<m_mesh->getNbrElements(); ++i) { VectorI idx = m_mesh->getElement(i); assert(idx.size() == 3); double V = m_mesh->getElementVolume(i); for (size_t j=0; j<3; ++j) { for (size_t k=0; k<3; ++k) { if (idx[j] == idx[k]) { for (size_t l=0; l<repeats; ++l) triplets.push_back( T(repeats*idx[j]+l, repeats*idx[k]+l, p*V*coeff_jj)); } else { for (size_t l=0; l<repeats; ++l) triplets.push_back( T(repeats*idx[j]+l, repeats*idx[k]+l, p*V*coeff_jk)); } } } } } Eigen::SparseMatrix<double> M = Eigen::SparseMatrix<double>( repeats*m_mesh->getNbrNodes(), repeats*m_mesh->getNbrNodes()); M.setFromTriplets(triplets.begin(), triplets.end()); return ZSparseMatrix(M); }
VectorI map_indices(const VectorI& face, const VectorI& index_map) { const size_t vertex_per_face = face.size(); VectorI index(vertex_per_face); for (size_t i=0; i<vertex_per_face; i++) { index[i] = index_map[face[i]]; } return index; }
Float ElementWiseMaterial::get_material_tensor( size_t i, size_t j, size_t k, size_t l, VectorF coord) const { const VectorI voxel_ids = look_up_voxels(coord); assert(voxel_ids.size() > 0); const size_t voxel_id = voxel_ids[0]; assert(voxel_id < m_materials.size()); return m_materials[voxel_id]->get_material_tensor(i,j,k,l,coord); }
MatrixF ElementWiseMaterial::strain_to_stress( const MatrixF& strain, VectorF coord) const { const VectorI voxel_ids = look_up_voxels(coord); assert(voxel_ids.size() > 0); const size_t voxel_id = voxel_ids[0]; assert(voxel_id < m_materials.size()); return m_materials[voxel_id]->strain_to_stress(strain, coord); }
void MeshGeometry::extract_faces_from_hexes() { const VectorI& voxels = m_voxels; typedef std::map<Multiplet, int> FaceCounter; FaceCounter face_counter; for (size_t i=0; i<voxels.size(); i+= m_vertex_per_voxel) { VectorI voxel = voxels.segment(i, m_vertex_per_voxel); // Note that the order of vertices below are predefined by MSH format, // each face should have normal pointing outward. assert(voxel.size() == 8); Multiplet voxel_faces[6] = { Multiplet(Vector4I(voxel[0], voxel[1], voxel[5], voxel[4])), // Bottom Multiplet(Vector4I(voxel[2], voxel[3], voxel[7], voxel[6])), // Top Multiplet(Vector4I(voxel[0], voxel[4], voxel[7], voxel[3])), // Left Multiplet(Vector4I(voxel[1], voxel[2], voxel[6], voxel[5])), // Right Multiplet(Vector4I(voxel[4], voxel[5], voxel[6], voxel[7])), // Front Multiplet(Vector4I(voxel[0], voxel[3], voxel[2], voxel[1])) // Back }; for (size_t j=0; j<6; j++) { if (face_counter.find(voxel_faces[j]) == face_counter.end()) { face_counter[voxel_faces[j]] = 1; } else { face_counter[voxel_faces[j]] += 1; } } } std::vector<int> vertex_buffer; for (FaceCounter::const_iterator itr = face_counter.begin(); itr!=face_counter.end(); itr++) { if (itr->second != 1 && itr->second != 2) { const Vector4I& face = itr->first.get_ori_data(); std::stringstream err_msg; err_msg << "Non-manifold mesh detected!" << std::endl; err_msg << "Face <" << face[0] << ", " << face[1] << ", " << face[2] << ", " << face[3] << "> has " << itr->second << " adjacent volume elements"; throw RuntimeError(err_msg.str()); } if (itr->second == 1) { const VectorI& f = itr->first.get_ori_data(); assert(f.size() == 4); vertex_buffer.push_back(f[0]); vertex_buffer.push_back(f[1]); vertex_buffer.push_back(f[2]); vertex_buffer.push_back(f[3]); } } m_faces.resize(vertex_buffer.size()); std::copy(vertex_buffer.begin(), vertex_buffer.end(), m_faces.data()); m_vertex_per_face = 4; }
ALGEB minpoly(MKernelVector kv, ALGEB* args) { int i; ALGEB retlist, blank; char err[] = "ERROR! Associated blackbox object does not exist!"; int key = MapleToInteger32(kv,args[1]), flag; std::map<int,int>::iterator f_i; std::map<int,void*>::iterator h_i; // Get the data from the hash table f_i = typeTable.find(key); if( f_i == typeTable.end() ) MapleRaiseError(kv,err); else flag = f_i->second; h_i = hashTable.find(key); if(h_i != hashTable.end() ) { // We've got data switch( flag ) { // Getting the minimal polynomial is rather complicated, so both instances of this code were // wrapped up inside a block, to cut down at the clutter at the top of this function. // First declares a vector of the proper type and casts the pointer. Then computes the minimal // polynomial. It then builds the proper Maple list structure for this application. case BlackBoxi: { Vectorl mpreturn; Vectorl::iterator mp_i; TriplesBBi* BB = (TriplesBBi*) h_i->second; LinBox::minpoly( mpreturn, *BB, BB->field() ); retlist = MapleListAlloc(kv, mpreturn.size() ); for(i = 1, mp_i = mpreturn.begin(); mp_i != mpreturn.end(); ++mp_i, ++i) MapleListAssign(kv, retlist, i, ToMapleInteger(kv, *mp_i)); } break; case BlackBoxI: { VectorI mpreturn; VectorI::iterator mp_i; TriplesBBI* BB = (TriplesBBI*) h_i->second; LinBox::minpoly( mpreturn, *BB, BB->field() ); retlist = MapleListAlloc(kv, mpreturn.size()); for(i = 1, mp_i = mpreturn.begin(); mp_i != mpreturn.end(); ++mp_i, ++i) MapleListAssign(kv, retlist, i, LiToM(kv, *mp_i, blank)); } break; } } else MapleRaiseError(kv,err); return retlist; }
bool SimpleInflator::belong_to_the_same_loop( const VectorI& indices, const VectorI& source_ids) const { const size_t size = indices.size(); assert(size > 0); const int id = source_ids[indices[0]]; for (size_t i=1; i<size; i++) { if (id != source_ids[indices[i]]) return false; } return true; }
ZSparseMatrix Assembler::getBoundaryBlurMatrix(double radius) { typedef std::priority_queue<Item, std::vector<Item>, ItemComp> Queue; typedef Eigen::Triplet<double> T; std::vector<T> triplets; size_t num_bd_vertices = m_mesh->getNbrBoundaryNodes(); double sd = radius / 3.0; const double A = 2.50662827463; // A = sqrt(2*pi); std::vector<int> visited(num_bd_vertices, -1); for (size_t i=0; i<num_bd_vertices; i++) { size_t seed = i; Queue Q; Q.push(Item(i, 0.0)); VectorF seed_n = m_mesh->getBoundaryNodeNormal(seed); while (!Q.empty()) { Item cur_item = Q.top(); Q.pop(); // Check if visited before if (visited[cur_item.first] == seed) continue; visited[cur_item.first] = seed; size_t curr_glob_idx = m_mesh->getBoundaryNode(cur_item.first); VectorF curr_n = m_mesh->getBoundaryNodeNormal(cur_item.first); double normal_factor = fmax(0.0, curr_n.dot(seed_n)); // Use gaussian distribution as weights triplets.push_back(T(seed, cur_item.first, exp(-0.5 * pow(cur_item.second / sd, 2)) / (sd*A) * normal_factor)); VectorI neighbors = m_mesh->getBoundaryNodeAdjacentBoundaryNodes( cur_item.first); for (size_t j=0; j<neighbors.size(); j++) { size_t next = neighbors[j]; if (visited[next] == seed) continue; size_t next_glob_idx = m_mesh->getBoundaryNode(next); double next_dist = cur_item.second + (m_mesh->getNode(curr_glob_idx) - m_mesh->getNode(next_glob_idx)).norm(); if (next_dist > 2*sd) continue; Q.push(Item(next, next_dist)); } } } Eigen::SparseMatrix<double> Bd_Blur = Eigen::SparseMatrix<double>(num_bd_vertices, num_bd_vertices); Bd_Blur.setFromTriplets(triplets.begin(), triplets.end()); return ZSparseMatrix(Bd_Blur); }
void Deform::find_share_vertex(int pi, int pj, VectorI &share_vertex) { vector<int> vertices; set_intersection(adj_list[pi].begin(), adj_list[pi].end(), adj_list[pj].begin(), adj_list[pj].end(), back_inserter(vertices)); for (auto &i : vertices) { vector<int> f; f.push_back(pi); f.push_back(pj); f.push_back(i); sort(f.begin(), f.end()); vector<Vector3i>::iterator it = find(face_list.begin(), face_list.end(), Map<Vector3i>(&f[0])); if (it != face_list.end()) { if ((*it)(0) != pi && (*it)(0) != pj) share_vertex.push_back((*it)(0)); else if ((*it)(1) != pi && (*it)(1) != pj) share_vertex.push_back((*it)(1)); else share_vertex.push_back((*it)(2)); } } if (share_vertex.size() > 2) { cout << "share vertices number warning: " << share_vertex.size() << endl; } }
void MeshGeometry::extract_faces_from_tets() { const VectorI& voxels = m_voxels; typedef std::map<Triplet, unsigned short> FaceCounter; FaceCounter face_counter; for (size_t i=0; i<voxels.size(); i+= m_vertex_per_voxel) { VectorI voxel = voxels.segment(i, m_vertex_per_voxel); // Note that the order of vertices below are predefined by MSH format, // each face should have normal pointing outward. assert(voxel.size() == 4); Triplet voxel_faces[4] = { {voxel[0], voxel[2], voxel[1]}, {voxel[0], voxel[1], voxel[3]}, {voxel[0], voxel[3], voxel[2]}, {voxel[1], voxel[2], voxel[3]} }; for (size_t j=0; j<4; j++) { if (face_counter.find(voxel_faces[j]) == face_counter.end()) { face_counter[voxel_faces[j]] = 1; } else { face_counter[voxel_faces[j]] += 1; } } } std::vector<int> vertex_buffer; for (FaceCounter::const_iterator itr = face_counter.begin(); itr!=face_counter.end(); itr++) { if (itr->second != 1 && itr->second != 2) { const Vector3I& triplet = itr->first.get_ori_data(); std::stringstream err_msg; err_msg << "Non-manifold mesh detected!" << std::endl; err_msg << "Face <" << triplet[0] << ", " << triplet[1] << ", " << triplet[2] << "> has " << itr->second << " adjacent volume elements"; throw RuntimeError(err_msg.str()); } if (itr->second == 1) { const VectorI& f = itr->first.get_ori_data(); assert(f.size() == 3); vertex_buffer.push_back(f[0]); vertex_buffer.push_back(f[1]); vertex_buffer.push_back(f[2]); } } m_faces.resize(vertex_buffer.size()); std::copy(vertex_buffer.begin(), vertex_buffer.end(), m_faces.data()); m_vertex_per_face = 3; }
void correct_tet_orientation(const VectorF& vertices, VectorI& voxels) { const size_t num_voxels = voxels.size() / 4; for (size_t i=0; i<num_voxels; i++) { const VectorI tet = voxels.segment(i*4, 4); const Vector3F& v1 = vertices.segment(tet[0]*3, 3); const Vector3F& v2 = vertices.segment(tet[1]*3, 3); const Vector3F& v3 = vertices.segment(tet[2]*3, 3); const Vector3F& v4 = vertices.segment(tet[3]*3, 3); if (!positive_orientated(v1, v2, v3, v4)) { voxels[i*4] = tet[1]; voxels[i*4+1] = tet[0]; } } }
void MSHWriter::write(const VectorF& vertices, const VectorI& faces, const VectorI& voxels, size_t dim, size_t vertex_per_face, size_t vertex_per_voxel) { MshSaver saver(m_filename, !m_in_ascii); MshSaver::ElementType type; if (voxels.size() == 0) { type = get_face_type(vertex_per_face); saver.save_mesh(vertices, faces, dim, type); } else { type = get_voxel_type(vertex_per_voxel); saver.save_mesh(vertices, voxels, dim, type); } if (m_attr_names.size() != 0) { std::cerr << "Warning: all attributes are ignored." << std::endl; } }
VectorF Assembler::getDivergence(double* vector_field, int num_vector_field) { size_t num_node = m_mesh->getNbrNodes(); size_t num_elem = m_mesh->getNbrElements(); size_t dim = m_mesh->getDim(); assert(num_vector_field == dim*num_node); VectorF div(num_elem); for (size_t i=0; i<num_elem; i++) { div[i] = 0; VectorI elem = m_mesh->getElement(i); const Eigen::MatrixXd& DN = m_DN[i]; for (size_t j=0; j<elem.size(); j++) { for (size_t k=0; k<dim; k++) { div[i] += DN(j,k) * vector_field[elem[j]*dim+k]; } } } return div; }
ZSparseMatrix Assembler2D::getStiffnessMatrix() { // Elastic modulii // Eigen::MatrixXd& D = m_D; Eigen::MatrixXd& C = m_C; typedef Eigen::Triplet<double> T; std::vector<T> triplets; for (size_t i=0; i<m_mesh->getNbrElements(); ++i) { VectorI idx = m_mesh->getElement(i); assert(idx.size() == 3); Eigen::MatrixXd& dN = m_DN[i]; // Small strain-displacement matrix // Eigen::MatrixXd B(3,6); B << dN(0,0), 0.0,dN(1,0), 0.0,dN(2,0), 0.0, 0.0 ,dN(0,1), 0.0,dN(1,1), 0.0,dN(2,1), 0.5*dN(0,1),0.5*dN(0,0), 0.5*dN(1,1),0.5*dN(1,0), 0.5*dN(2,1),0.5*dN(2,0); Eigen::MatrixXd k_el = B.transpose() * D * C * B * m_mesh->getElementVolume(i); for (size_t j=0; j<3; ++j) for (size_t k=0; k<3; ++k) for (size_t l=0; l<2; ++l) for (size_t m=0; m<2; ++m) triplets.push_back(T(2*idx[j]+l, 2*idx[k]+m, k_el(2*j+l, 2*k+m))); } Eigen::SparseMatrix<double> K = Eigen::SparseMatrix<double>(2*m_mesh->getNbrNodes(), 2*m_mesh->getNbrNodes()); K.setFromTriplets(triplets.begin(), triplets.end()); ZSparseMatrix tmp = ZSparseMatrix(K); return tmp; }
ZSparseMatrix Assembler2D::getBdLaplacianMatrix() { typedef Eigen::Triplet<double> T; std::vector<T> triplets; size_t num_bdv = m_mesh->getNbrBoundaryNodes(); size_t num_bdf = m_mesh->getNbrBoundaryFaces(); // Compute lumped mass VectorF lumped_mass(num_bdv); for (size_t i=0; i<num_bdv; i++) { VectorI neighbor_faces = m_mesh->getBoundaryNodeAdjacentBoundaryFaces(i); assert(neighbor_faces.size() == 2); double total_weight = m_mesh->getBoundaryFaceArea(neighbor_faces[0]) + m_mesh->getBoundaryFaceArea(neighbor_faces[1]); lumped_mass[i] = 0.5 * total_weight; } // Compute laplacian matrix. for (size_t i=0; i<num_bdf; i++) { VectorI face = m_mesh->getBoundaryFace(i); assert(face.size() == 2); double l = m_mesh->getBoundaryFaceArea(i); size_t v1 = m_mesh->getBoundaryIndex(face[0]); size_t v2 = m_mesh->getBoundaryIndex(face[1]); double weight = 1.0 / l; triplets.push_back(T(v1, v1, -weight / lumped_mass[v1])); triplets.push_back(T(v1, v2, weight / lumped_mass[v1])); triplets.push_back(T(v2, v1, weight / lumped_mass[v2])); triplets.push_back(T(v2, v2, -weight / lumped_mass[v2])); } Eigen::SparseMatrix<double> Lb = Eigen::SparseMatrix<double>(num_bdv, num_bdv); Lb.setFromTriplets(triplets.begin(), triplets.end()); return ZSparseMatrix(Lb); }
void OffsetParameters::add(const VectorI& roi, const std::string& formula, Float value, size_t axis) { const Float tol = 1e-12; const size_t dim = m_wire_network->get_dim(); const size_t num_vertices = m_wire_network->get_num_vertices(); const VectorF bbox_min = m_wire_network->get_bbox_min(); const VectorF bbox_max = m_wire_network->get_bbox_max(); const VectorF bbox_center = 0.5 * (bbox_min + bbox_max); const MatrixFr& vertices = m_wire_network->get_vertices(); assert(axis < dim); VectorF roi_min = bbox_max; VectorF roi_max = bbox_min; const size_t num_roi = roi.size(); for (size_t i=0; i<num_roi; i++) { size_t v_idx = roi[i]; assert(v_idx < num_vertices); const VectorF& v = vertices.row(v_idx); roi_min = roi_min.cwiseMin(v); roi_max = roi_max.cwiseMax(v); } if (fabs(roi_max[axis] - bbox_center[axis]) < tol && fabs(roi_min[axis] - bbox_center[axis]) < tol) { // No dof in this axis without destroy symmetry. return; } if (roi_min[axis] > bbox_min[axis] + tol && roi_max[axis] < bbox_max[axis] - tol) { m_params.emplace_back(PatternParameter::Ptr( new VertexOffsetParameter(m_wire_network, axis))); PatternParameter::Ptr param = m_params.back(); param->set_roi(roi); param->set_value(value); param->set_formula(formula); } }
size_t DuplicatedVertexRemoval::run(Float tol) { const size_t dim = m_vertices.cols(); HashGrid::Ptr grid = HashGrid::create(tol, dim); const size_t num_vertices = m_vertices.rows(); const size_t num_faces = m_faces.rows(); const size_t vertex_per_face = m_faces.cols(); m_index_map.resize(num_vertices); std::vector<size_t> source_index; size_t count = 0; size_t num_duplications = 0; for (size_t i=0; i<num_vertices; i++) { int curr_importance_level = m_importance_level[i]; if (curr_importance_level < 0) { m_index_map[i] = count; source_index.push_back(i); count++; continue; } const VectorF& v = m_vertices.row(i); VectorI candidates = grid->get_items_near_point(v); const size_t num_candidates = candidates.size(); if (num_candidates > 0) { VectorF dists(num_candidates); for (size_t j=0; j<num_candidates; j++) { dists[j] = (m_vertices.row(candidates[j]) - v.transpose()).norm(); } size_t min_idx; Float min_dist = dists.minCoeff(&min_idx); if (min_dist < tol) { size_t best_match_idx = candidates[min_idx]; size_t output_idx = m_index_map[best_match_idx]; m_index_map[i] = output_idx; int matched_importance_level = m_importance_level[source_index[output_idx]]; if (curr_importance_level > matched_importance_level) { source_index[output_idx] = i; } num_duplications++; continue; } } // No match, add this vertex in the book. grid->insert(i, v); m_index_map[i] = count; source_index.push_back(i); count++; } assert(source_index.size() == count); MatrixFr vertices(count, dim); for (size_t i=0; i<count; i++) { assert(m_index_map[source_index[i]] == i); vertices.row(i) = m_vertices.row(source_index[i]); } m_vertices = vertices; for (size_t i=0; i<num_faces; i++) { for (size_t j=0; j<vertex_per_face; j++) { size_t v_index = m_faces(i,j); m_faces(i,j) = m_index_map[v_index]; } } return num_duplications; }
ALGEB getVector(MKernelVector kv, ALGEB* args) { // Get the key, declare variables int key = MapleToInteger32(kv,args[1]), flag; char err[] = "ERROR! The associated Vector object does not exist!"; M_INT index, bound[2]; RTableData d; RTableSettings s; ALGEB rtable, blank; char MapleStatement[100] = "rtable(1.."; // Check to see if the object pointed to by key is in the type table. If not, panic std::map<int,int>::iterator f_i = typeTable.find(key); if(f_i == typeTable.end() ) { MapleRaiseError(kv, err); } // Otherwise, we have our object flag = f_i->second; // Get a pointer to the actual data std::map<int,void*>::iterator h_i = hashTable.find(key); if(h_i != hashTable.end() ) { // Diverge over whether we are using maple 7 or 8 ( and 5 & 6) // in Maple, arg 3 is a flag indicating which method to use switch( MapleToInteger32(kv, args[3])) { // In this case, Maple 7 is being used, we have to construct a call using "EvalMapleStatement()" // to call the RTable constructor case 1: switch(flag) { case SmallV:{ // Get the vector Vectorl* V = (Vectorl*) h_i->second; Vectorl::const_iterator V_i; // Create the Maple object sprintf(MapleStatement + strlen(MapleStatement), "%d", V->size() ); strcat(MapleStatement, ", subtype=Vector[column], storage=sparse)"); rtable = kv->evalMapleStatement(MapleStatement); // populate the Maple vector w/ the entries from V above for(index = 1, V_i = V->begin(); V_i != V->end(); ++V_i, ++index) { d.dag = ToMapleInteger(kv, *V_i); // d is a union, dag is the // ALGEB union field RTableAssign(kv, rtable, &index, d); } } break; case LargeV: { // This part works the same way as above VectorI* V = (VectorI*) h_i->second; VectorI::const_iterator V_i; sprintf(MapleStatement + strlen(MapleStatement), "%d", V->size() ); strcat(MapleStatement, ",subtype=Vector[column], storage=sparse)"); rtable = kv->evalMapleStatement(MapleStatement); // Use maple callback to call the procedure from Maple that translates a gmp integer // into a large maple integer. Then put this into the Maple vector for(index = 1, V_i = V->begin(); V_i != V->end(); ++V_i, ++index) { /* Okay, here's how this line works. Basically, * in order to set the entries of this RTable to * multi-precision integers, I have to first use my own conversion * method, LiToM, to convert the integer entry to a ALGEB structure, * then do a callback into Maple that calls the ExToM procedure, * which converts the results of LiToM into a Maple multi-precision * integer. At the moment, this is the best idea I've got as to * how to convert a GMP integer into a Maple representation in one shot. */ d.dag = EvalMapleProc(kv,args[2],1,LiToM(kv, *V_i, blank)); RTableAssign(kv, rtable, &index, d); } } break; default: MapleRaiseError(kv, err); break; } break; // In this case, use the simpler RTableCreate function, rather than building a string // that must be parsed by maple case 2: kv->rtableGetDefaults(&s); // Get default settings - set datatype to Maple, // DAGTAG to anything s.subtype = 2; // Subtype set to column vector s.storage = 4; // Storage set to rectangular s.num_dimensions = 1; // What do you think this means :-) bound[0] = 1; // Set the lower bounds of each dimension to 0 switch(flag) {// Switch on data type of vector case SmallV:{ // single word integer entry vector Vectorl* V = (Vectorl*) h_i->second; Vectorl::const_iterator V_i; bound[1] = V->size(); rtable = kv->rtableCreate(&s, NULL, bound); // Create the Maple vector for(index = 1, V_i = V->begin(); V_i != V->end(); ++V_i, ++index) { d.dag = ToMapleInteger(kv, *V_i); // d is a union, dag is the // ALGEB union field RTableAssign(kv, rtable, &index, d); } } break; case LargeV: { // Same as above for multi-word integer entry vector VectorI* V = (VectorI*) h_i->second; VectorI::const_iterator V_i; bound[1] = V->size(); rtable = kv->rtableCreate(&s, NULL, bound); for(index = 1, V_i = V->begin(); V_i != V->end(); ++V_i, ++index) { /* Okay, here's how this line works. Basically, * in order to set the entries of this RTable to * multi-precision integers, I have to first use my own conversion * method, LiToM, to convert the integer entry to a ALGEB structure, * then do a callback into Maple that calls the ExToM procedure, * which converts the results of LiToM into a Maple multi-precision * integer. At the moment, this is the best idea I've got as to * how to convert a GMP integer into a Maple representation in one shot. */ d.dag = EvalMapleProc(kv,args[2],1,LiToM(kv, *V_i, blank)); RTableAssign(kv, rtable, &index, d); } } break; default: MapleRaiseError(kv, err); break; } break; // breaks case 2. // This was causing a wicked error :-) default: MapleRaiseError(kv, err); break; } } else { MapleRaiseError(kv, err); } return rtable; }